summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/ac.c17
-rw-r--r--drivers/acpi/acpi_pad.c7
-rw-r--r--drivers/acpi/acpica/hwsleep.c22
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/apei/apei-base.c17
-rw-r--r--drivers/acpi/apei/apei-internal.h9
-rw-r--r--drivers/acpi/apei/ghes.c6
-rw-r--r--drivers/acpi/battery.c25
-rw-r--r--drivers/acpi/bgrt.c1
-rw-r--r--drivers/acpi/bus.c88
-rw-r--r--drivers/acpi/button.c9
-rw-r--r--drivers/acpi/fan.c21
-rw-r--r--drivers/acpi/power.c14
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_driver.c13
-rw-r--r--drivers/acpi/processor_idle.c21
-rw-r--r--drivers/acpi/processor_perflib.c30
-rw-r--r--drivers/acpi/sbs.c10
-rw-r--r--drivers/acpi/scan.c23
-rw-r--r--drivers/acpi/sleep.c55
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c17
-rw-r--r--drivers/acpi/video.c35
-rw-r--r--drivers/amba/tegra-ahb.c6
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/atm/solos-pci.c4
-rw-r--r--drivers/base/dd.c4
-rw-r--r--drivers/base/devtmpfs.c98
-rw-r--r--drivers/base/power/domain.c342
-rw-r--r--drivers/base/power/main.c32
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/sysfs.c4
-rw-r--r--drivers/base/regmap/internal.h17
-rw-r--r--drivers/base/regmap/regmap-i2c.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c57
-rw-r--r--drivers/base/regmap/regmap-mmio.c30
-rw-r--r--drivers/base/regmap/regmap.c350
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/bcma/Kconfig19
-rw-r--r--drivers/bcma/Makefile3
-rw-r--r--drivers/bcma/bcma_private.h31
-rw-r--r--drivers/bcma/core.c10
-rw-r--r--drivers/bcma/driver_chipcommon.c5
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c19
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c371
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c19
-rw-r--r--drivers/bcma/driver_gmac_cmn.c14
-rw-r--r--drivers/bcma/driver_mips.c33
-rw-r--r--drivers/bcma/driver_pci.c6
-rw-r--r--drivers/bcma/driver_pci_host.c18
-rw-r--r--drivers/bcma/host_pci.c5
-rw-r--r--drivers/bcma/main.c44
-rw-r--r--drivers/bcma/scan.c48
-rw-r--r--drivers/bcma/scan.h2
-rw-r--r--drivers/bcma/sprom.c26
-rw-r--r--drivers/block/drbd/drbd_actlog.c104
-rw-r--r--drivers/block/drbd/drbd_bitmap.c157
-rw-r--r--drivers/block/drbd/drbd_int.h90
-rw-r--r--drivers/block/drbd/drbd_main.c357
-rw-r--r--drivers/block/drbd/drbd_nl.c48
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c95
-rw-r--r--drivers/block/drbd/drbd_req.c198
-rw-r--r--drivers/block/drbd/drbd_req.h19
-rw-r--r--drivers/block/drbd/drbd_worker.c31
-rw-r--r--drivers/block/floppy.c162
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/mg_disk.c13
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c390
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h53
-rw-r--r--drivers/block/rbd.c76
-rw-r--r--drivers/block/umem.c40
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkfront.c102
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c3
-rw-r--r--drivers/bluetooth/bluecard_cs.c16
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c6
-rw-r--r--drivers/bluetooth/btmrvl_drv.h2
-rw-r--r--drivers/bluetooth/btmrvl_main.c18
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c12
-rw-r--r--drivers/bluetooth/btuart_cs.c6
-rw-r--r--drivers/bluetooth/btusb.c16
-rw-r--r--drivers/bluetooth/dtl1_cs.c22
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_h5.c747
-rw-r--r--drivers/bluetooth/hci_ldisc.c68
-rw-r--r--drivers/bluetooth/hci_ll.c6
-rw-r--r--drivers/bluetooth/hci_uart.h10
-rw-r--r--drivers/char/agp/intel-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/hw_random/atmel-rng.c9
-rw-r--r--drivers/char/hw_random/omap-rng.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c13
-rw-r--r--drivers/char/mem.c11
-rw-r--r--drivers/char/sonypi.c13
-rw-r--r--drivers/char/tpm/tpm.c29
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_atmel.c12
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_nsc.c13
-rw-r--r--drivers/char/tpm/tpm_tis.c18
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-nomadik.c47
-rw-r--r--drivers/clk/clk.c38
-rw-r--r--drivers/clk/mxs/clk-imx23.c12
-rw-r--r--drivers/clk/mxs/clk-imx28.c32
-rw-r--r--drivers/clk/socfpga/Makefile1
-rw-r--r--drivers/clk/socfpga/clk.c51
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c314
-rw-r--r--drivers/clk/spear/spear1340_clock.c281
-rw-r--r--drivers/clk/spear/spear3xx_clock.c182
-rw-r--r--drivers/clk/spear/spear6xx_clock.c126
-rw-r--r--drivers/clocksource/Kconfig6
-rw-r--r--drivers/clocksource/Makefile5
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c131
-rw-r--r--drivers/clocksource/em_sti.c406
-rw-r--r--drivers/clocksource/sh_cmt.c26
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c16
-rw-r--r--drivers/clocksource/time-armada-370-xp.c226
-rw-r--r--drivers/connector/cn_proc.c36
-rw-r--r--drivers/connector/cn_queue.c12
-rw-r--r--drivers/connector/connector.c30
-rw-r--r--drivers/cpufreq/cpufreq.c35
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c14
-rw-r--r--drivers/cpuidle/cpuidle.c18
-rw-r--r--drivers/cpuidle/driver.c29
-rw-r--r--drivers/cpuidle/governors/menu.c6
-rw-r--r--drivers/cpuidle/sysfs.c21
-rw-r--r--drivers/crypto/tegra-aes.c12
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c39
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c33
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ipu/ipu_idmac.c8
-rw-r--r--drivers/dma/ipu/ipu_irq.c14
-rw-r--r--drivers/dma/pl330.c30
-rw-r--r--drivers/edac/amd64_edac.c200
-rw-r--r--drivers/edac/amd76x_edac.c42
-rw-r--r--drivers/edac/cell_edac.c42
-rw-r--r--drivers/edac/cpc925_edac.c91
-rw-r--r--drivers/edac/e752x_edac.c116
-rw-r--r--drivers/edac/e7xxx_edac.c86
-rw-r--r--drivers/edac/edac_core.h47
-rw-r--r--drivers/edac/edac_device.c27
-rw-r--r--drivers/edac/edac_mc.c718
-rw-r--r--drivers/edac/edac_mc_sysfs.c70
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c6
-rw-r--r--drivers/edac/i3000_edac.c49
-rw-r--r--drivers/edac/i3200_edac.c56
-rw-r--r--drivers/edac/i5000_edac.c236
-rw-r--r--drivers/edac/i5100_edac.c106
-rw-r--r--drivers/edac/i5400_edac.c265
-rw-r--r--drivers/edac/i7300_edac.c115
-rw-r--r--drivers/edac/i7core_edac.c285
-rw-r--r--drivers/edac/i82443bxgx_edac.c41
-rw-r--r--drivers/edac/i82860_edac.c55
-rw-r--r--drivers/edac/i82875p_edac.c51
-rw-r--r--drivers/edac/i82975x_edac.c58
-rw-r--r--drivers/edac/mce_amd.h2
-rw-r--r--drivers/edac/mpc85xx_edac.c38
-rw-r--r--drivers/edac/mv64x60_edac.c47
-rw-r--r--drivers/edac/pasemi_edac.c49
-rw-r--r--drivers/edac/ppc4xx_edac.c50
-rw-r--r--drivers/edac/r82600_edac.c40
-rw-r--r--drivers/edac/sb_edac.c222
-rw-r--r--drivers/edac/tile_edac.c33
-rw-r--r--drivers/edac/x38_edac.c52
-rw-r--r--drivers/extcon/extcon-max8997.c5
-rw-r--r--drivers/extcon/extcon_class.c2
-rw-r--r--drivers/extcon/extcon_gpio.c2
-rw-r--r--drivers/gpio/Kconfig20
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/devres.c1
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c158
-rw-r--r--drivers/gpio/gpio-mxc.c66
-rw-r--r--drivers/gpio/gpio-omap.c14
-rw-r--r--drivers/gpio/gpio-samsung.c2
-rw-r--r--drivers/gpio/gpio-sta2x11.c5
-rw-r--r--drivers/gpio/gpio-stp-xway.c301
-rw-r--r--drivers/gpio/gpio-tps65910.c3
-rw-r--r--drivers/gpio/gpio-wm8994.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c19
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_edid.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c35
-rw-r--r--drivers/gpu/drm/gma500/opregion.c8
-rw-r--r--drivers/gpu/drm/gma500/opregion.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c37
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c61
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c76
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h43
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c93
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c84
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c64
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h5
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c53
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c68
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c385
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c49
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h12
-rw-r--r--drivers/gpu/drm/radeon/ni.c388
-rw-r--r--drivers/gpu/drm/radeon/nid.h11
-rw-r--r--drivers/gpu/drm/radeon/r600.c215
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/r600d.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h14
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c58
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c25
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c297
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h7
-rw-r--r--drivers/gpu/drm/radeon/si.c481
-rw-r--r--drivers/gpu/drm/radeon/si_reg.h72
-rw-r--r--drivers/gpu/drm/radeon/sid.h19
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c14
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c15
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c13
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c29
-rw-r--r--drivers/gpu/drm/udl/udl_main.c2
-rw-r--r--drivers/gpu/drm/via/via_map.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c27
-rw-r--r--drivers/hid/Kconfig80
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-chicony.c1
-rw-r--r--drivers/hid/hid-core.c29
-rw-r--r--drivers/hid/hid-cypress.c2
-rw-r--r--drivers/hid/hid-holtek-kbd.c183
-rw-r--r--drivers/hid/hid-ids.h27
-rw-r--r--drivers/hid/hid-input.c12
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c564
-rw-r--r--drivers/hid/hid-logitech-dj.c38
-rw-r--r--drivers/hid/hid-magicmouse.c151
-rw-r--r--drivers/hid/hid-multitouch.c23
-rw-r--r--drivers/hid/hid-picolcd.c6
-rw-r--r--drivers/hid/hid-roccat-arvo.c16
-rw-r--r--drivers/hid/hid-roccat-common.c72
-rw-r--r--drivers/hid/hid-roccat-common.h16
-rw-r--r--drivers/hid/hid-roccat-isku.c52
-rw-r--r--drivers/hid/hid-roccat-isku.h7
-rw-r--r--drivers/hid/hid-roccat-kone.c6
-rw-r--r--drivers/hid/hid-roccat-koneplus.c98
-rw-r--r--drivers/hid/hid-roccat-koneplus.h22
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c71
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h15
-rw-r--r--drivers/hid/hid-roccat-pyra.c59
-rw-r--r--drivers/hid/hid-roccat-pyra.h12
-rw-r--r--drivers/hid/hid-roccat-savu.c316
-rw-r--r--drivers/hid/hid-roccat-savu.h87
-rw-r--r--drivers/hid/hid-wiimote-ext.c2
-rw-r--r--drivers/hid/hidraw.c12
-rw-r--r--drivers/hid/uhid.c572
-rw-r--r--drivers/hid/usbhid/Kconfig8
-rw-r--r--drivers/hid/usbhid/hid-core.c294
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig26
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/abituguru.c19
-rw-r--r--drivers/hwmon/abituguru3.c19
-rw-r--r--drivers/hwmon/acpi_power_meter.c21
-rw-r--r--drivers/hwmon/adm1021.c18
-rw-r--r--drivers/hwmon/adm1025.c15
-rw-r--r--drivers/hwmon/adm1026.c15
-rw-r--r--drivers/hwmon/adm1031.c15
-rw-r--r--drivers/hwmon/adm9240.c14
-rw-r--r--drivers/hwmon/adt7475.c7
-rw-r--r--drivers/hwmon/applesmc.c139
-rw-r--r--drivers/hwmon/asc7621.c5
-rw-r--r--drivers/hwmon/atxp1.c16
-rw-r--r--drivers/hwmon/coretemp.c37
-rw-r--r--drivers/hwmon/da9052-hwmon.c344
-rw-r--r--drivers/hwmon/ds1621.c16
-rw-r--r--drivers/hwmon/emc2103.c12
-rw-r--r--drivers/hwmon/emc6w201.c15
-rw-r--r--drivers/hwmon/exynos4_tmu.c20
-rw-r--r--drivers/hwmon/f71805f.c26
-rw-r--r--drivers/hwmon/fam15h_power.c3
-rw-r--r--drivers/hwmon/gl518sm.c15
-rw-r--r--drivers/hwmon/gl520sm.c15
-rw-r--r--drivers/hwmon/gpio-fan.c77
-rw-r--r--drivers/hwmon/hih6130.c293
-rw-r--r--drivers/hwmon/it87.c2
-rw-r--r--drivers/hwmon/jc42.c2
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/k8temp.c25
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/lm63.c14
-rw-r--r--drivers/hwmon/lm75.c9
-rw-r--r--drivers/hwmon/lm77.c73
-rw-r--r--drivers/hwmon/lm78.c36
-rw-r--r--drivers/hwmon/lm80.c14
-rw-r--r--drivers/hwmon/lm83.c15
-rw-r--r--drivers/hwmon/lm85.c7
-rw-r--r--drivers/hwmon/lm87.c15
-rw-r--r--drivers/hwmon/lm90.c12
-rw-r--r--drivers/hwmon/lm92.c15
-rw-r--r--drivers/hwmon/lm93.c14
-rw-r--r--drivers/hwmon/ltc4261.c2
-rw-r--r--drivers/hwmon/max1111.c9
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1619.c15
-rw-r--r--drivers/hwmon/max6639.c17
-rw-r--r--drivers/hwmon/max6642.c15
-rw-r--r--drivers/hwmon/max6650.c10
-rw-r--r--drivers/hwmon/mc13783-adc.c12
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/pc87360.c41
-rw-r--r--drivers/hwmon/pc87427.c51
-rw-r--r--drivers/hwmon/pcf8591.c15
-rw-r--r--drivers/hwmon/s3c-hwmon.c7
-rw-r--r--drivers/hwmon/sch5627.c2
-rw-r--r--drivers/hwmon/sch5636.c2
-rw-r--r--drivers/hwmon/sch56xx-common.c406
-rw-r--r--drivers/hwmon/sch56xx-common.h2
-rw-r--r--drivers/hwmon/sis5595.c28
-rw-r--r--drivers/hwmon/smsc47b397.c22
-rw-r--r--drivers/hwmon/smsc47m1.c45
-rw-r--r--drivers/hwmon/smsc47m192.c16
-rw-r--r--drivers/hwmon/thmc50.c17
-rw-r--r--drivers/hwmon/tmp102.c14
-rw-r--r--drivers/hwmon/tmp401.c6
-rw-r--r--drivers/hwmon/tmp421.c13
-rw-r--r--drivers/hwmon/via686a.c23
-rw-r--r--drivers/hwmon/vt1211.c22
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/hwmon/w83627hf.c46
-rw-r--r--drivers/hwmon/w83781d.c52
-rw-r--r--drivers/hwmon/w83791d.c15
-rw-r--r--drivers/hwmon/w83792d.c18
-rw-r--r--drivers/hwmon/w83795.c11
-rw-r--r--drivers/hwmon/w83l785ts.c34
-rw-r--r--drivers/hwmon/wm831x-hwmon.c9
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c4
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c2
-rw-r--r--drivers/i2c/busses/i2c-imx.c1
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c3
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c3
-rw-r--r--drivers/i2c/busses/i2c-powermac.c157
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c3
-rw-r--r--drivers/i2c/busses/i2c-tegra.c10
-rw-r--r--drivers/i2c/i2c-dev.c30
-rw-r--r--drivers/i2c/muxes/Kconfig12
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c279
-rw-r--r--drivers/ide/icside.c17
-rw-r--r--drivers/ide/ide-cs.c3
-rw-r--r--drivers/idle/intel_idle.c41
-rw-r--r--drivers/ieee802154/Kconfig6
-rw-r--r--drivers/ieee802154/Makefile1
-rw-r--r--drivers/ieee802154/at86rf230.c968
-rw-r--r--drivers/iio/Kconfig3
-rw-r--r--drivers/iio/industrialio-core.c16
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/netlink.c17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c86
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h9
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c27
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c64
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c17
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h1
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c35
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c15
-rw-r--r--drivers/input/joystick/as5011.c6
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c3
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c2
-rw-r--r--drivers/input/keyboard/qt1070.c3
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c3
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c4
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c8
-rw-r--r--drivers/input/misc/ad714x.c8
-rw-r--r--drivers/input/misc/dm355evm_keys.c3
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/tablet/wacom_sys.c6
-rw-r--r--drivers/input/touchscreen/ad7879.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c3
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c3
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c3
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c2
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c2
-rw-r--r--drivers/input/touchscreen/tsc2005.c3
-rw-r--r--drivers/iommu/amd_iommu.c119
-rw-r--r--drivers/iommu/amd_iommu_init.c19
-rw-r--r--drivers/iommu/amd_iommu_types.h5
-rw-r--r--drivers/iommu/dmar.c194
-rw-r--r--drivers/iommu/intel_irq_remapping.c20
-rw-r--r--drivers/iommu/iommu.c5
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/omap-iommu.c32
-rw-r--r--drivers/iommu/tegra-gart.c20
-rw-r--r--drivers/iommu/tegra-smmu.c6
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c18
-rw-r--r--drivers/isdn/hisax/hfc_usb.c18
-rw-r--r--drivers/isdn/hisax/isurf.c5
-rw-r--r--drivers/isdn/mISDN/stack.c4
-rw-r--r--drivers/leds/Kconfig33
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c23
-rw-r--r--drivers/leds/led-core.c7
-rw-r--r--drivers/leds/leds-da9052.c214
-rw-r--r--drivers/leds/leds-lm3530.c100
-rw-r--r--drivers/leds/leds-lm3533.c785
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-mc13783.c2
-rw-r--r--drivers/leds/leds-pca955x.c95
-rw-r--r--drivers/leds/ledtrig-backlight.c4
-rw-r--r--drivers/leds/ledtrig-gpio.c4
-rw-r--r--drivers/leds/ledtrig-heartbeat.c46
-rw-r--r--drivers/leds/ledtrig-timer.c54
-rw-r--r--drivers/leds/ledtrig-transient.c237
-rw-r--r--drivers/md/dm-mpath.c47
-rw-r--r--drivers/md/dm-raid1.c3
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-thin-metadata.c136
-rw-r--r--drivers/md/dm-thin-metadata.h13
-rw-r--r--drivers/md/dm-thin.c216
-rw-r--r--drivers/md/md.c45
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c54
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c11
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c13
-rw-r--r--drivers/md/raid1.c30
-rw-r--r--drivers/md/raid10.c30
-rw-r--r--drivers/md/raid5.c67
-rw-r--r--drivers/media/common/saa7146_fops.c5
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c1
-rw-r--r--drivers/media/dvb/frontends/cx24110.c4
-rw-r--r--drivers/media/dvb/frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb/frontends/lg2160.c2
-rw-r--r--drivers/media/dvb/siano/smsusb.c2
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/rc/winbond-cir.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c84
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c5
-rw-r--r--drivers/media/video/bt8xx/bttv.h1
-rw-r--r--drivers/media/video/bt8xx/bttvp.h1
-rw-r--r--drivers/media/video/bw-qcam.c47
-rw-r--r--drivers/media/video/cx18/cx18-driver.c10
-rw-r--r--drivers/media/video/cx18/cx18-driver.h2
-rw-r--r--drivers/media/video/cx18/cx18-firmware.c9
-rw-r--r--drivers/media/video/cx18/cx18-mailbox.c15
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c89
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c9
-rw-r--r--drivers/media/video/cx23885/cx23885.h1
-rw-r--r--drivers/media/video/cx25821/cx25821-core.c3
-rw-r--r--drivers/media/video/cx25821/cx25821.h2
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c76
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c3
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c2
-rw-r--r--drivers/media/video/gspca/gspca.c4
-rw-r--r--drivers/media/video/gspca/ov534.c32
-rw-r--r--drivers/media/video/gspca/ov534_9.c1
-rw-r--r--drivers/media/video/gspca/pac7311.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c37
-rw-r--r--drivers/media/video/gspca/sonixj.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c18
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.h2
-rw-r--r--drivers/media/video/mem2mem_testdev.c50
-rw-r--r--drivers/media/video/mx1_camera.c1
-rw-r--r--drivers/media/video/mx2_camera.c37
-rw-r--r--drivers/media/video/omap3isp/isppreview.c6
-rw-r--r--drivers/media/video/pms.c2
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c69
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c21
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite.c73
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c48
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.h2
-rw-r--r--drivers/media/video/s5p-mfc/regs-mfc.h5
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c5
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c13
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_opr.h4
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_shm.h3
-rw-r--r--drivers/media/video/smiapp/Kconfig2
-rw-r--r--drivers/media/video/smiapp/smiapp-core.c2
-rw-r--r--drivers/media/video/tuner-core.c2
-rw-r--r--drivers/media/video/v4l2-dev.c5
-rw-r--r--drivers/media/video/v4l2-ioctl.c1
-rw-r--r--drivers/media/video/vino.c4
-rw-r--r--drivers/media/video/vivi.c6
-rw-r--r--drivers/media/video/zoran/zoran.h4
-rw-r--r--drivers/media/video/zoran/zoran_driver.c4
-rw-r--r--drivers/message/fusion/mptbase.c13
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/ab5500-core.h87
-rw-r--r--drivers/mfd/db8500-prcmu.c1
-rw-r--r--drivers/mfd/mc13xxx-spi.c67
-rw-r--r--drivers/mfd/omap-usb-host.c48
-rw-r--r--drivers/mfd/palmas.c13
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/mfd/tps65217.c67
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c137
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c358
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h205
-rw-r--r--drivers/misc/iwmc3200top/log.c348
-rw-r--r--drivers/misc/iwmc3200top/log.h171
-rw-r--r--drivers/misc/iwmc3200top/main.c662
-rw-r--r--drivers/misc/mei/interrupt.c2
-rw-r--r--drivers/misc/mei/main.c11
-rw-r--r--drivers/misc/mei/wd.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c4
-rw-r--r--drivers/mmc/card/block.c50
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/cd-gpio.c83
-rw-r--r--drivers/mmc/core/core.c90
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/core/mmc.c23
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/sd.c177
-rw-r--r--drivers/mmc/core/sdio.c13
-rw-r--r--drivers/mmc/core/sdio_cis.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c188
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h14
-rw-r--r--drivers/mmc/host/atmel-mci.c26
-rw-r--r--drivers/mmc/host/dw_mmc.c51
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mxs-mmc.c28
-rw-r--r--drivers/mmc/host/omap.c18
-rw-r--r--drivers/mmc/host/omap_hsmmc.c31
-rw-r--r--drivers/mmc/host/s3cmci.c10
-rw-r--r--drivers/mmc/host/sdhci-dove.c51
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c9
-rw-r--r--drivers/mmc/host/sdhci-pci.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c54
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c52
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci-tegra.c11
-rw-r--r--drivers/mmc/host/sdhci.c148
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c185
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c66
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c132
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm63xxpart.c41
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c18
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c7
-rw-r--r--drivers/mtd/devices/docg3.c40
-rw-r--r--drivers/mtd/devices/m25p80.c5
-rw-r--r--drivers/mtd/devices/spear_smi.c14
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c2
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c13
-rw-r--r--drivers/mtd/maps/lantiq-flash.c76
-rw-r--r--drivers/mtd/maps/pci.c13
-rw-r--r--drivers/mtd/maps/scb2_flash.c15
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtdcore.c57
-rw-r--r--drivers/mtd/mtdoops.c22
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/mtdsuper.c4
-rw-r--r--drivers/mtd/nand/Kconfig42
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c14
-rw-r--r--drivers/mtd/nand/au1550nd.c2
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c14
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c4
-rw-r--r--drivers/mtd/nand/cafe_nand.c37
-rw-r--r--drivers/mtd/nand/cs553x_nand.c1
-rw-r--r--drivers/mtd/nand/denali.c38
-rw-r--r--drivers/mtd/nand/docg4.c22
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c37
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c47
-rw-r--r--drivers/mtd/nand/fsmc_nand.c26
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h42
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c27
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c176
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/h1910.c1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c6
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/mxc_nand.c645
-rw-r--r--drivers/mtd/nand/nand_base.c240
-rw-r--r--drivers/mtd/nand/nand_bbt.c1
-rw-r--r--drivers/mtd/nand/nand_ids.c6
-rw-r--r--drivers/mtd/nand/nandsim.c40
-rw-r--r--drivers/mtd/nand/omap2.c253
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/plat_nand.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/nand/r852.c22
-rw-r--r--drivers/mtd/nand/sh_flctl.c8
-rw-r--r--drivers/mtd/nand/sm_common.c9
-rw-r--r--drivers/mtd/onenand/onenand_base.c6
-rw-r--r--drivers/mtd/ubi/Kconfig2
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/debug.c12
-rw-r--r--drivers/mtd/ubi/misc.c25
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/vmt.c20
-rw-r--r--drivers/mtd/ubi/wl.c17
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c13
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_alb.c26
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c96
-rw-r--r--drivers/net/bonding/bond_procfs.c15
-rw-r--r--drivers/net/bonding/bond_sysfs.c10
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/caif/caif_hsi.c551
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/c_can/Kconfig20
-rw-r--r--drivers/net/can/c_can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can.c140
-rw-r--r--drivers/net/can/c_can/c_can.h164
-rw-r--r--drivers/net/can/c_can/c_can_pci.c221
-rw-r--r--drivers/net/can/c_can/c_can_platform.c76
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/cc770/cc770_platform.c2
-rw-r--r--drivers/net/can/dev.c37
-rw-r--r--drivers/net/can/flexcan.c154
-rw-r--r--drivers/net/can/janz-ican3.c241
-rw-r--r--drivers/net/can/mcp251x.c5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/vcan.c27
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/dummy.c23
-rw-r--r--drivers/net/ethernet/3com/3c501.c2
-rw-r--r--drivers/net/ethernet/8390/Kconfig14
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c480
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c5
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c94
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c105
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c45
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c58
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/b44.c100
-rw-r--r--drivers/net/ethernet/broadcom/b44.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c106
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h212
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c273
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c585
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1286
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c310
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h168
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c63
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c287
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h47
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c97
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h34
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c393
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h43
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c48
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h81
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h42
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h51
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c17
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c183
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h59
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c525
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c11
-rw-r--r--drivers/net/ethernet/freescale/fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c508
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c420
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c6
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/intel/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/e100.c40
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c60
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c124
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c43
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c164
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h104
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c78
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c159
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c395
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c840
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c802
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c200
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c223
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c172
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h114
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1386
-rw-r--r--drivers/net/ethernet/jme.c14
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c19
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c28
-rw-r--r--drivers/net/ethernet/marvell/sky2.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c648
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c270
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c142
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c523
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c285
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c35
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c10
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c27
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c42
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c315
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c100
-rw-r--r--drivers/net/ethernet/rdc/r6040.c29
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c24
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1012
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c385
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h77
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/enum.h8
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon.c35
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c12
-rw-r--r--drivers/net/ethernet/sfc/filter.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c11
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h11
-rw-r--r--drivers/net/ethernet/sfc/nic.c11
-rw-r--r--drivers/net/ethernet/sfc/nic.h18
-rw-r--r--drivers/net/ethernet/sfc/rx.c23
-rw-r--r--drivers/net/ethernet/sfc/selftest.c64
-rw-r--r--drivers/net/ethernet/sfc/siena.c37
-rw-r--r--drivers/net/ethernet/sfc/tx.c93
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c26
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h71
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c234
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c16
-rw-r--r--drivers/net/ethernet/sun/niu.c18
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c177
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c208
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c25
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Makefile4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1905
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/fddi/defxx.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c8
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c14
-rw-r--r--drivers/net/hyperv/netvsc_drv.c30
-rw-r--r--drivers/net/hyperv/rndis_filter.c79
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c2
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c8
-rw-r--r--drivers/net/phy/bcm63xx.c31
-rw-r--r--drivers/net/phy/bcm87xx.c231
-rw-r--r--drivers/net/phy/broadcom.c119
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c41
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/fixed.c4
-rw-r--r--drivers/net/phy/icplus.c38
-rw-r--r--drivers/net/phy/lxt.c47
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio-mux.c10
-rw-r--r--drivers/net/phy/mdio_bus.c16
-rw-r--r--drivers/net/phy/micrel.c70
-rw-r--r--drivers/net/phy/national.c8
-rw-r--r--drivers/net/phy/phy.c316
-rw-r--r--drivers/net/phy/phy_device.c139
-rw-r--r--drivers/net/phy/realtek.c6
-rw-r--r--drivers/net/phy/smsc.c64
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/phy/ste10Xp.c21
-rw-r--r--drivers/net/phy/vitesse.c52
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/team/Kconfig13
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c777
-rw-r--r--drivers/net/team/team_mode_activebackup.c17
-rw-r--r--drivers/net/team/team_mode_broadcast.c87
-rw-r--r--drivers/net/team/team_mode_loadbalance.c546
-rw-r--r--drivers/net/team/team_mode_roundrobin.c13
-rw-r--r--drivers/net/tun.c153
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.h218
-rw-r--r--drivers/net/usb/asix_common.c631
-rw-r--r--drivers/net/usb/asix_devices.c (renamed from drivers/net/usb/asix.c)665
-rw-r--r--drivers/net/usb/ax88172a.c414
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/ipheth.c5
-rw-r--r--drivers/net/usb/mcs7830.c25
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c447
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/usb/smsc75xx.c2
-rw-r--r--drivers/net/usb/smsc95xx.c34
-rw-r--r--drivers/net/usb/usbnet.c130
-rw-r--r--drivers/net/virtio_net.c26
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/Kconfig22
-rw-r--r--drivers/net/wimax/i2400m/Makefile8
-rw-r--r--drivers/net/wimax/i2400m/control.c4
-rw-r--r--drivers/net/wimax/i2400m/driver.c5
-rw-r--r--drivers/net/wimax/i2400m/fw.c5
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h157
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h13
-rw-r--r--drivers/net/wimax/i2400m/sdio-debug-levels.h22
-rw-r--r--drivers/net/wimax/i2400m/sdio-fw.c210
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c301
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c177
-rw-r--r--drivers/net/wimax/i2400m/sdio.c602
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c5
-rw-r--r--drivers/net/wireless/airo.c8
-rw-r--r--drivers/net/wireless/ath/ath.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c40
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c288
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h46
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c48
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c158
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h58
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c489
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c776
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c176
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c164
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c216
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c734
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c124
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h229
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h882
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h755
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1404
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h1284
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h772
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h94
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c528
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c122
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c87
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c304
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h107
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c555
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1408
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c246
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c780
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h171
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c532
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c212
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h11
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c53
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c69
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/key.c4
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/b43/b43.h9
-rw-r--r--drivers/net/wireless/b43/main.c45
-rw-r--r--drivers/net/wireless/b43/phy_n.c17
-rw-r--r--drivers/net/wireless/b43/xmit.c9
-rw-r--r--drivers/net/wireless/b43legacy/dma.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c4
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h59
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c669
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c131
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c1223
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c127
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c142
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c172
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/soc.h62
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c20
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c27
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c17
-rw-r--r--drivers/net/wireless/iwlegacy/common.c23
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile33
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn.h)113
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.h)4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-commands.h)48
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-debugfs.c)43
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h (renamed from drivers/net/wireless/iwlwifi/iwl-dev.h)192
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-devices.c)191
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c (renamed from drivers/net/wireless/iwlwifi/iwl-led.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h (renamed from drivers/net/wireless/iwlwifi/iwl-led.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-lib.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c (renamed from drivers/net/wireless/iwlwifi/iwl-mac80211.c)216
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn.c)504
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c (renamed from drivers/net/wireless/iwlwifi/iwl-power.c)11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h (renamed from drivers/net/wireless/iwlwifi/iwl-power.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.c)51
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.h)3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)78
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rxon.c)54
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c (renamed from drivers/net/wireless/iwlwifi/iwl-scan.c)195
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-sta.c)64
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c471
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.c)13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tx.c)62
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-ucode.c)71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c175
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c903
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c463
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h (renamed from drivers/net/wireless/iwlwifi/iwl-phy-db.h)71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c1148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h269
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c856
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c1114
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h82
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c (renamed from drivers/net/wireless/iwlwifi/iwl-1000.c)19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c (renamed from drivers/net/wireless/iwlwifi/iwl-2000.c)28
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c (renamed from drivers/net/wireless/iwlwifi/iwl-5000.c)20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c (renamed from drivers/net/wireless/iwlwifi/iwl-6000.c)54
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h (renamed from drivers/net/wireless/iwlwifi/iwl-cfg.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c (renamed from drivers/net/wireless/iwlwifi/iwl-pci.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h)25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c)106
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie.c)392
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c)214
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig39
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile10
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h57
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c882
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.h31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1002
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h509
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c488
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.c234
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.h127
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c416
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.h100
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c470
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h237
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h367
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h484
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c847
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c191
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1701
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.h60
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c509
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.h64
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c529
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h789
-rw-r--r--drivers/net/wireless/libertas/cfg.c46
-rw-r--r--drivers/net/wireless/libertas/cmd.c25
-rw-r--r--drivers/net/wireless/libertas/cmd.h4
-rw-r--r--drivers/net/wireless/libertas/debugfs.c4
-rw-r--r--drivers/net/wireless/libertas/dev.h2
-rw-r--r--drivers/net/wireless/libertas/firmware.c2
-rw-r--r--drivers/net/wireless/libertas/host.h1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/libertas/main.c6
-rw-r--r--drivers/net/wireless/libertas/mesh.c7
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c51
-rw-r--r--drivers/net/wireless/mwifiex/11n.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c23
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c445
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c31
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c7
-rw-r--r--drivers/net/wireless/mwifiex/decl.h9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h60
-rw-r--r--drivers/net/wireless/mwifiex/ie.c191
-rw-r--r--drivers/net/wireless/mwifiex/init.c67
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h21
-rw-r--r--drivers/net/wireless/mwifiex/join.c20
-rw-r--r--drivers/net/wireless/mwifiex/main.c11
-rw-r--r--drivers/net/wireless/mwifiex/main.h41
-rw-r--r--drivers/net/wireless/mwifiex/scan.c108
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c114
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c151
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c11
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c290
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c10
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c322
-rw-r--r--drivers/net/wireless/mwifiex/usb.c28
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/mwl8k.c5
-rw-r--r--drivers/net/wireless/orinoco/cfg.c11
-rw-r--r--drivers/net/wireless/p54/eeprom.c4
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c6
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c7
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig8
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h181
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c388
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c83
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c4
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c7
-rw-r--r--drivers/net/wireless/rtlwifi/core.c14
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c33
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c46
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c14
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/ti/Kconfig1
-rw-r--r--drivers/net/wireless/ti/Makefile1
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c3
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c67
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c7
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.h237
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.c243
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c621
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h22
-rw-r--r--drivers/net/wireless/ti/wl18xx/Kconfig7
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile3
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c111
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h287
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h111
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c403
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.c75
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1610
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h191
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c127
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.h46
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h95
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig1
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c18
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h263
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c184
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c173
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h40
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h99
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c643
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h87
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c39
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h90
-rw-r--r--drivers/net/wireless/ti/wlcore/ini.h22
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c62
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h145
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c921
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c37
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c52
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h15
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h19
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c91
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c112
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c282
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h53
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h119
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h (renamed from drivers/net/wireless/ti/wlcore/wl12xx.h)75
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h2
-rw-r--r--drivers/net/xen-netback/netback.c7
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nfc/nfcwilink.c7
-rw-r--r--drivers/nfc/pn533.c846
-rw-r--r--drivers/nfc/pn544_hci.c49
-rw-r--r--drivers/of/base.c15
-rw-r--r--drivers/of/of_mdio.c16
-rw-r--r--drivers/of/of_pci_irq.c2
-rw-r--r--drivers/of/platform.c8
-rw-r--r--drivers/oprofile/oprofile_perf.c23
-rw-r--r--drivers/pci/pci-driver.c12
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx.c32
-rw-r--r--drivers/pinctrl/pinctrl-imx6q.c2
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c47
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c491
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/x86/acer-wmi.c34
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c4
-rw-r--r--drivers/platform/x86/classmate-laptop.c13
-rw-r--r--drivers/platform/x86/dell-laptop.c308
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c42
-rw-r--r--drivers/platform/x86/hdaps.c8
-rw-r--r--drivers/platform/x86/hp-wmi.c10
-rw-r--r--drivers/platform/x86/hp_accel.c15
-rw-r--r--drivers/platform/x86/ideapad-laptop.c15
-rw-r--r--drivers/platform/x86/intel_ips.c39
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c16
-rw-r--r--drivers/platform/x86/msi-laptop.c7
-rw-r--r--drivers/platform/x86/panasonic-laptop.c16
-rw-r--r--drivers/platform/x86/sony-laptop.c1588
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c25
-rw-r--r--drivers/platform/x86/toshiba_acpi.c155
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c10
-rw-r--r--drivers/platform/x86/xo1-rfkill.c13
-rw-r--r--drivers/platform/x86/xo15-ebook.c8
-rw-r--r--drivers/power/Kconfig12
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/ab8500_btemp.c12
-rw-r--r--drivers/power/ab8500_charger.c13
-rw-r--r--drivers/power/ab8500_fg.c12
-rw-r--r--drivers/power/avs/Kconfig12
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/smartreflex.c1126
-rw-r--r--drivers/power/charger-manager.c392
-rw-r--r--drivers/power/ds2781_battery.c20
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17042_battery.c148
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/sbs-battery.c2
-rw-r--r--drivers/power/smb347-charger.c712
-rw-r--r--drivers/rapidio/Kconfig14
-rw-r--r--drivers/rapidio/devices/Makefile3
-rw-r--r--drivers/rapidio/devices/tsi721.c211
-rw-r--r--drivers/rapidio/devices/tsi721.h105
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c823
-rw-r--r--drivers/rapidio/rio.c81
-rw-r--r--drivers/regulator/Kconfig37
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/aat2870-regulator.c21
-rw-r--r--drivers/regulator/ab3100.c119
-rw-r--r--drivers/regulator/ab8500.c118
-rw-r--r--drivers/regulator/ad5398.c9
-rw-r--r--drivers/regulator/anatop-regulator.c44
-rw-r--r--drivers/regulator/arizona-ldo1.c138
-rw-r--r--drivers/regulator/arizona-micsupp.c188
-rw-r--r--drivers/regulator/core.c415
-rw-r--r--drivers/regulator/da903x.c6
-rw-r--r--drivers/regulator/da9052-regulator.c4
-rw-r--r--drivers/regulator/db8500-prcmu.c40
-rw-r--r--drivers/regulator/fixed-helper.c19
-rw-r--r--drivers/regulator/fixed.c163
-rw-r--r--drivers/regulator/gpio-regulator.c131
-rw-r--r--drivers/regulator/isl6271a-regulator.c13
-rw-r--r--drivers/regulator/lp3971.c66
-rw-r--r--drivers/regulator/lp3972.c102
-rw-r--r--drivers/regulator/lp872x.c943
-rw-r--r--drivers/regulator/lp8788-buck.c629
-rw-r--r--drivers/regulator/lp8788-ldo.c842
-rw-r--r--drivers/regulator/max1586.c108
-rw-r--r--drivers/regulator/max77686.c389
-rw-r--r--drivers/regulator/max8649.c1
-rw-r--r--drivers/regulator/max8952.c60
-rw-r--r--drivers/regulator/max8997.c40
-rw-r--r--drivers/regulator/max8998.c133
-rw-r--r--drivers/regulator/mc13783-regulator.c38
-rw-r--r--drivers/regulator/mc13892-regulator.c43
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c36
-rw-r--r--drivers/regulator/mc13xxx.h11
-rw-r--r--drivers/regulator/of_regulator.c57
-rw-r--r--drivers/regulator/palmas-regulator.c73
-rw-r--r--drivers/regulator/pcap-regulator.c95
-rw-r--r--drivers/regulator/pcf50633-regulator.c20
-rw-r--r--drivers/regulator/rc5t583-regulator.c24
-rw-r--r--drivers/regulator/s2mps11.c363
-rw-r--r--drivers/regulator/s5m8767.c231
-rw-r--r--drivers/regulator/tps6105x-regulator.c14
-rw-r--r--drivers/regulator/tps62360-regulator.c57
-rw-r--r--drivers/regulator/tps65023-regulator.c201
-rw-r--r--drivers/regulator/tps6507x-regulator.c98
-rw-r--r--drivers/regulator/tps65217-regulator.c140
-rw-r--r--drivers/regulator/tps6524x-regulator.c96
-rw-r--r--drivers/regulator/tps6586x-regulator.c106
-rw-r--r--drivers/regulator/tps65910-regulator.c425
-rw-r--r--drivers/regulator/twl-regulator.c92
-rw-r--r--drivers/regulator/wm831x-dcdc.c78
-rw-r--r--drivers/regulator/wm831x-ldo.c131
-rw-r--r--drivers/regulator/wm8350-regulator.c426
-rw-r--r--drivers/regulator/wm8400-regulator.c25
-rw-r--r--drivers/regulator/wm8994-regulator.c93
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c14
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c57
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ab8500.c10
-rw-r--r--drivers/rtc/rtc-at91rm9200.c1
-rw-r--r--drivers/rtc/rtc-cmos.c10
-rw-r--r--drivers/rtc/rtc-ds1307.c20
-rw-r--r--drivers/rtc/rtc-ep93xx.c24
-rw-r--r--drivers/rtc/rtc-lpc32xx.c12
-rw-r--r--drivers/rtc/rtc-m41t93.c46
-rw-r--r--drivers/rtc/rtc-mxc.c5
-rw-r--r--drivers/rtc/rtc-pcf8563.c44
-rw-r--r--drivers/rtc/rtc-pl031.c14
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-spear.c12
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c8
-rw-r--r--drivers/rtc/rtc-tegra.c50
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/s390/block/dasd.c35
-rw-r--r--drivers/s390/block/dasd_3990_erp.c3
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c3
-rw-r--r--drivers/s390/block/dasd_diag.c3
-rw-r--r--drivers/s390/block/dasd_diag.h3
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c3
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/block/dasd_fba.h3
-rw-r--r--drivers/s390/block/dasd_genhd.c3
-rw-r--r--drivers/s390/block/dasd_int.h6
-rw-r--r--drivers/s390/block/dasd_ioctl.c3
-rw-r--r--drivers/s390/block/dasd_proc.c3
-rw-r--r--drivers/s390/char/ctrlchar.c3
-rw-r--r--drivers/s390/char/ctrlchar.h3
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/char/keyboard.h3
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/char/sclp.h10
-rw-r--r--drivers/s390/char/sclp_cmd.c38
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.h1
-rw-r--r--drivers/s390/char/sclp_ocf.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c3
-rw-r--r--drivers/s390/char/sclp_sdias.c4
-rw-r--r--drivers/s390/char/sclp_tty.c3
-rw-r--r--drivers/s390/char/sclp_tty.h3
-rw-r--r--drivers/s390/char/tape.h1
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c1
-rw-r--r--drivers/s390/char/tape_3590.h3
-rw-r--r--drivers/s390/char/tape_char.c3
-rw-r--r--drivers/s390/char/tape_class.c5
-rw-r--r--drivers/s390/char/tape_class.h3
-rw-r--r--drivers/s390/char/tape_core.c1
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/char/tape_std.h3
-rw-r--r--drivers/s390/char/tty3270.c3
-rw-r--r--drivers/s390/char/tty3270.h2
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/vmcp.h2
-rw-r--r--drivers/s390/char/vmlogrdr.c45
-rw-r--r--drivers/s390/char/vmwatchdog.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/airq.c3
-rw-r--r--drivers/s390/cio/blacklist.c4
-rw-r--r--drivers/s390/cio/chp.c16
-rw-r--r--drivers/s390/cio/chp.h4
-rw-r--r--drivers/s390/cio/chsc.c3
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/crw.c2
-rw-r--r--drivers/s390/cio/device.c3
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/device_pgid.c2
-rw-r--r--drivers/s390/cio/device_status.c5
-rw-r--r--drivers/s390/cio/idset.c2
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c4
-rw-r--r--drivers/s390/cio/qdio_debug.h2
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c4
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c12
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h4
-rw-r--r--drivers/s390/kvm/kvm_virtio.c5
-rw-r--r--drivers/s390/net/claw.c3
-rw-r--r--drivers/s390/net/ctcm_dbug.c2
-rw-r--r--drivers/s390/net/ctcm_dbug.h2
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_fsms.h2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_main.h2
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.h2
-rw-r--r--drivers/s390/net/ctcm_sysfs.c2
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_sys.c2
-rw-r--r--drivers/s390/net/smsgiucv.h2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c2
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h2
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/s390/scsi/zfcp_unit.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfad_attr.c17
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c217
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c39
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c122
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c21
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h59
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c38
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c21
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c200
-rw-r--r--drivers/scsi/fcoe/fcoe.h8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c159
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c832
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c13
-rw-r--r--drivers/scsi/libsas/sas_ata.c12
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h78
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c615
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4963
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1003
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c1904
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h82
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c134
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h28
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c95
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c111
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c738
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h192
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_netlink.c7
-rw-r--r--drivers/scsi/scsi_pm.c5
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c9
-rw-r--r--drivers/scsi/scsi_wait_scan.c7
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/sh/Kconfig1
-rw-r--r--drivers/sh/Makefile3
-rw-r--r--drivers/sh/clk/cpg.c333
-rw-r--r--drivers/sh/intc/Makefile2
-rw-r--r--drivers/sh/intc/dynamic.c57
-rw-r--r--drivers/sh/intc/virq.c4
-rw-r--r--drivers/sh/pfc.c739
-rw-r--r--drivers/sh/pfc/Kconfig26
-rw-r--r--drivers/sh/pfc/Makefile3
-rw-r--r--drivers/sh/pfc/core.c572
-rw-r--r--drivers/sh/pfc/gpio.c239
-rw-r--r--drivers/sh/pfc/pinctrl.c530
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/spi/spi-s3c64xx.c542
-rw-r--r--drivers/spi/spi-tegra.c4
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/scan.c2
-rw-r--r--drivers/staging/comedi/drivers.c5
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c18
-rw-r--r--drivers/staging/iio/Documentation/device.txt2
-rw-r--r--drivers/staging/iio/adc/Kconfig1
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c3
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c6
-rw-r--r--drivers/staging/nvec/nvec.c8
-rw-r--r--drivers/staging/omapdrm/omap_drv.h2
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c10
-rw-r--r--drivers/staging/omapdrm/omap_priv.h55
-rw-r--r--drivers/staging/ramster/zcache-main.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c10
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/iscsi/iscsi_target.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c66
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c46
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c27
-rw-r--r--drivers/target/loopback/tcm_loop.c11
-rw-r--r--drivers/target/sbp/sbp_target.c47
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_device.c170
-rw-r--r--drivers/target/target_core_fabric_configfs.c3
-rw-r--r--drivers/target/target_core_file.c93
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_iblock.c142
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h22
-rw-r--r--drivers/target/target_core_pr.c13
-rw-r--r--drivers/target/target_core_pscsi.c84
-rw-r--r--drivers/target/target_core_rd.c17
-rw-r--r--drivers/target/target_core_sbc.c581
-rw-r--r--drivers/target/target_core_spc.c (renamed from drivers/target/target_core_cdb.c)434
-rw-r--r--drivers/target/target_core_tmr.c57
-rw-r--r--drivers/target/target_core_tpg.c14
-rw-r--r--drivers/target/target_core_transport.c2010
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c12
-rw-r--r--drivers/target/tcm_fc/tfc_io.c13
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c3
-rw-r--r--drivers/tty/amiserial.c14
-rw-r--r--drivers/tty/cyclades.c2
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c46
-rw-r--r--drivers/tty/n_r3964.c11
-rw-r--r--drivers/tty/pty.c25
-rw-r--r--drivers/tty/serial/8250/8250.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c45
-rw-r--r--drivers/tty/serial/crisv10.c8
-rw-r--r--drivers/tty/serial/imx.c6
-rw-r--r--drivers/tty/serial/lantiq.c83
-rw-r--r--drivers/tty/serial/mxs-auart.c42
-rw-r--r--drivers/tty/serial/sb1250-duart.c1
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c38
-rw-r--r--drivers/tty/serial/zs.c1
-rw-r--r--drivers/tty/synclink.c4
-rw-r--r--drivers/tty/synclink_gt.c4
-rw-r--r--drivers/tty/synclinkmp.c4
-rw-r--r--drivers/tty/tty_io.c67
-rw-r--r--drivers/tty/tty_ldisc.c67
-rw-r--r--drivers/tty/tty_mutex.c60
-rw-r--r--drivers/tty/tty_port.c6
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/atm/xusbatm.c4
-rw-r--r--drivers/usb/class/cdc-acm.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c11
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hub.c20
-rw-r--r--drivers/usb/core/message.c3
-rw-r--r--drivers/usb/dwc3/gadget.c3
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.h4
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h4
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/lpc32xx_udc.c1
-rw-r--r--drivers/usb/gadget/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c5
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c54
-rw-r--r--drivers/usb/gadget/u_ether.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-hcd.c7
-rw-r--r--drivers/usb/host/ehci-omap.c184
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-sh.c3
-rw-r--r--drivers/usb/host/ehci-tegra.c20
-rw-r--r--drivers/usb/host/ehci-tilegx.c214
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/ohci-omap.c7
-rw-r--r--drivers/usb/host/ohci-tilegx.c203
-rw-r--r--drivers/usb/host/xhci-hub.c44
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h6
-rw-r--r--drivers/usb/musb/davinci.c1
-rw-r--r--drivers/usb/musb/davinci.h4
-rw-r--r--drivers/usb/musb/musb_gadget.c1
-rw-r--r--drivers/usb/musb/musb_host.c14
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/otg/twl6030-usb.c15
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/serial/cp210x.c12
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/metro-usb.c8
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/option.c130
-rw-r--r--drivers/usb/serial/qcserial.c6
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/usb-serial.c12
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/test.c4
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/Kconfig35
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/auo_k1900fb.c198
-rw-r--r--drivers/video/auo_k1901fb.c251
-rw-r--r--drivers/video/auo_k190x.c1046
-rw-r--r--drivers/video/auo_k190x.h129
-rw-r--r--drivers/video/backlight/Kconfig14
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c4
-rw-r--r--drivers/video/backlight/adp8860_bl.c28
-rw-r--r--drivers/video/backlight/adp8870_bl.c28
-rw-r--r--drivers/video/backlight/ams369fg06.c16
-rw-r--r--drivers/video/backlight/apple_bl.c21
-rw-r--r--drivers/video/backlight/backlight.c11
-rw-r--r--drivers/video/backlight/corgi_lcd.c12
-rw-r--r--drivers/video/backlight/cr_bllcd.c9
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c6
-rw-r--r--drivers/video/backlight/ili9320.c11
-rw-r--r--drivers/video/backlight/jornada720_bl.c14
-rw-r--r--drivers/video/backlight/jornada720_lcd.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c27
-rw-r--r--drivers/video/backlight/lcd.c20
-rw-r--r--drivers/video/backlight/ld9040.c15
-rw-r--r--drivers/video/backlight/lm3533_bl.c423
-rw-r--r--drivers/video/backlight/lms283gf05.c9
-rw-r--r--drivers/video/backlight/ltv350qv.c24
-rw-r--r--drivers/video/backlight/omap1_bl.c4
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/progear_bl.c6
-rw-r--r--drivers/video/backlight/s6e63m0.c16
-rw-r--r--drivers/video/backlight/tdo24m.c21
-rw-r--r--drivers/video/backlight/tosa_bl.c11
-rw-r--r--drivers/video/backlight/tosa_lcd.c8
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/bfin_adv7393fb.c49
-rw-r--r--drivers/video/broadsheetfb.c2
-rw-r--r--drivers/video/cobalt_lcdfb.c45
-rw-r--r--drivers/video/console/Kconfig14
-rw-r--r--drivers/video/ep93xx-fb.c32
-rw-r--r--drivers/video/exynos/exynos_dp_core.c69
-rw-r--r--drivers/video/exynos/exynos_dp_core.h3
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c45
-rw-r--r--drivers/video/exynos/exynos_dp_reg.h29
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c49
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c36
-rw-r--r--drivers/video/exynos/s6e8ax0.c15
-rw-r--r--drivers/video/fb_defio.c6
-rw-r--r--drivers/video/fbmem.c21
-rw-r--r--drivers/video/fbsysfs.c2
-rw-r--r--drivers/video/fsl-diu-fb.c1
-rw-r--r--drivers/video/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/matrox/matroxfb_maven.c1
-rw-r--r--drivers/video/mb862xx/mb862xx-i2c.c2
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/mbx/mbxfb.c4
-rw-r--r--drivers/video/mxsfb.c73
-rw-r--r--drivers/video/omap/Kconfig8
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c8
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c107
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c8
-rw-r--r--drivers/video/omap2/displays/panel-taal.c90
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c76
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c22
-rw-r--r--drivers/video/omap2/dss/Kconfig13
-rw-r--r--drivers/video/omap2/dss/apply.c134
-rw-r--r--drivers/video/omap2/dss/core.c293
-rw-r--r--drivers/video/omap2/dss/dispc.c749
-rw-r--r--drivers/video/omap2/dss/dispc.h72
-rw-r--r--drivers/video/omap2/dss/display.c49
-rw-r--r--drivers/video/omap2/dss/dpi.c75
-rw-r--r--drivers/video/omap2/dss/dsi.c406
-rw-r--r--drivers/video/omap2/dss/dss.c67
-rw-r--r--drivers/video/omap2/dss/dss.h151
-rw-r--r--drivers/video/omap2/dss/dss_features.c30
-rw-r--r--drivers/video/omap2/dss/dss_features.h5
-rw-r--r--drivers/video/omap2/dss/hdmi.c445
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c236
-rw-r--r--drivers/video/omap2/dss/manager.c19
-rw-r--r--drivers/video/omap2/dss/overlay.c16
-rw-r--r--drivers/video/omap2/dss/rfbi.c86
-rw-r--r--drivers/video/omap2/dss/sdi.c63
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h32
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c480
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h161
-rw-r--r--drivers/video/omap2/dss/venc.c135
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c17
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c12
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h1
-rw-r--r--drivers/video/omap2/vrfb.c4
-rw-r--r--drivers/video/pxa3xx-gcu.c5
-rw-r--r--drivers/video/s3c-fb.c160
-rw-r--r--drivers/video/savage/savagefb_driver.c10
-rw-r--r--drivers/video/sh_mobile_hdmi.c219
-rw-r--r--drivers/video/sis/init.h45
-rw-r--r--drivers/video/sis/sis_main.c41
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/smscufx.c4
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--drivers/video/via/viafbdev.c34
-rw-r--r--drivers/virtio/virtio_balloon.c24
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/omap_hdq.c86
-rw-r--r--drivers/watchdog/Kconfig23
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c4
-rw-r--r--drivers/watchdog/booke_wdt.c4
-rw-r--r--drivers/watchdog/coh901327_wdt.c7
-rw-r--r--drivers/watchdog/da9052_wdt.c251
-rw-r--r--drivers/watchdog/f71808e_wdt.c4
-rw-r--r--drivers/watchdog/hpwdt.c4
-rw-r--r--drivers/watchdog/iTCO_wdt.c220
-rw-r--r--drivers/watchdog/ie6xx_wdt.c4
-rw-r--r--drivers/watchdog/lantiq_wdt.c56
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c2
-rw-r--r--drivers/watchdog/omap_wdt.c24
-rw-r--r--drivers/watchdog/orion_wdt.c203
-rw-r--r--drivers/watchdog/s3c2410_wdt.c16
-rw-r--r--drivers/watchdog/sch311x_wdt.c10
-rw-r--r--drivers/watchdog/sp805_wdt.c253
-rw-r--r--drivers/watchdog/via_wdt.c2
-rw-r--r--drivers/watchdog/watchdog_core.c74
-rw-r--r--drivers/watchdog/watchdog_core.h (renamed from drivers/watchdog/watchdog_dev.h)8
-rw-r--r--drivers/watchdog/watchdog_dev.c375
-rw-r--r--drivers/xen/Kconfig8
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/events.c9
-rw-r--r--drivers/xen/mcelog.c414
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/pcpu.c371
-rw-r--r--drivers/xen/platform-pci.c18
-rw-r--r--drivers/xen/tmem.c8
-rw-r--r--drivers/xen/xen-acpi-processor.c10
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c20
2026 files changed, 98168 insertions, 61837 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 47768ff..8099895 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -208,7 +208,7 @@ config ACPI_IPMI
config ACPI_HOTPLUG_CPU
bool
- depends on ACPI_PROCESSOR && HOTPLUG_CPU
+ depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
select ACPI_CONTAINER
default y
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 6512b20..ff9f6bd 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -61,7 +61,6 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device, int type);
-static int acpi_ac_resume(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id ac_device_ids[] = {
@@ -70,6 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+static int acpi_ac_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+
static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
@@ -78,9 +80,9 @@ static struct acpi_driver acpi_ac_driver = {
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
- .resume = acpi_ac_resume,
.notify = acpi_ac_notify,
},
+ .drv.pm = &acpi_ac_pm,
};
struct acpi_ac {
@@ -309,13 +311,18 @@ static int acpi_ac_add(struct acpi_device *device)
return result;
}
-static int acpi_ac_resume(struct acpi_device *device)
+static int acpi_ac_resume(struct device *dev)
{
struct acpi_ac *ac;
unsigned old_state;
- if (!device || !acpi_driver_data(device))
+
+ if (!dev)
return -EINVAL;
- ac = acpi_driver_data(device);
+
+ ac = acpi_driver_data(to_acpi_device(dev));
+ if (!ac)
+ return -EINVAL;
+
old_state = ac->state;
if (acpi_ac_get_state(ac))
return 0;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index a43fa1a..1502c502 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -36,6 +36,7 @@
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
static unsigned long power_saving_mwait_eax;
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
- mutex_lock(&isolated_cpus_lock);
+ mutex_lock(&round_robin_lock);
cpumask_clear(tmp);
for_each_cpu(cpu, pad_busy_cpus)
cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
if (cpumask_empty(tmp))
cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
if (cpumask_empty(tmp)) {
- mutex_unlock(&isolated_cpus_lock);
+ mutex_unlock(&round_robin_lock);
return;
}
for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
tsk_in_cpu[tsk_index] = preferred_cpu;
cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
cpu_weight[preferred_cpu]++;
- mutex_unlock(&isolated_cpus_lock);
+ mutex_unlock(&round_robin_lock);
set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 0ed85ca..615996a 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -95,18 +95,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
return_ACPI_STATUS(status);
}
- if (sleep_state != ACPI_STATE_S5) {
- /*
- * Disable BM arbitration. This feature is contained within an
- * optional register (PM2 Control), so ignore a BAD_ADDRESS
- * exception.
- */
- status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
- if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
- return_ACPI_STATUS(status);
- }
- }
-
/*
* 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
@@ -364,16 +352,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
[ACPI_EVENT_POWER_BUTTON].
status_register_id, ACPI_CLEAR_STATUS);
- /*
- * Enable BM arbitration. This feature is contained within an
- * optional register (PM2 Control), so ignore a BAD_ADDRESS
- * exception.
- */
- status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
- if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
- return_ACPI_STATUS(status);
- }
-
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 23ce096..fe66260 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
/* Create the new outer package and populate it */
status =
- acpi_ns_wrap_with_package(data, *elements,
+ acpi_ns_wrap_with_package(data, return_object,
return_object_ptr);
if (ACPI_FAILURE(status)) {
return (status);
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 5577762..6686b1e 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- return acpi_os_map_generic_address(&entry->register_region);
+ return apei_map_generic_address(&entry->register_region);
return 0;
}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- acpi_os_unmap_generic_address(&entry->register_region);
+ apei_unmap_generic_address(&entry->register_region);
return 0;
}
@@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
return 0;
}
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+ int rc;
+ u32 access_bit_width;
+ u64 address;
+
+ rc = apei_check_gar(reg, &address, &access_bit_width);
+ if (rc)
+ return rc;
+ return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
/* read GAR in interrupt (including NMI) or process context */
int apei_read(u64 *val, struct acpi_generic_address *reg)
{
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cca240a..f220d64 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -7,6 +7,8 @@
#define APEI_INTERNAL_H
#include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
struct apei_exec_context;
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+ acpi_os_unmap_generic_address(reg);
+}
+
int apei_read(u64 *val, struct acpi_generic_address *reg);
int apei_write(u64 val, struct acpi_generic_address *reg);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 9b3cac0..1599566 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- rc = acpi_os_map_generic_address(&generic->error_status_address);
+ rc = apei_map_generic_address(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap:
- acpi_os_unmap_generic_address(&generic->error_status_address);
+ apei_unmap_generic_address(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
- acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+ apei_unmap_generic_address(&ghes->generic->error_status_address);
}
enum {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 86933ca..023f9c8 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
static void acpi_battery_refresh(struct acpi_battery *battery)
{
+ int power_unit;
+
if (!battery->bat.dev)
return;
+ power_unit = battery->power_unit;
+
acpi_battery_get_info(battery);
- /* The battery may have changed its reporting units. */
+
+ if (power_unit == battery->power_unit)
+ return;
+
+ /* The battery has changed its reporting units. */
sysfs_remove_battery(battery);
sysfs_add_battery(battery);
}
@@ -1036,17 +1044,24 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
}
/* this is needed to learn about changes made in suspended state */
-static int acpi_battery_resume(struct acpi_device *device)
+static int acpi_battery_resume(struct device *dev)
{
struct acpi_battery *battery;
- if (!device)
+
+ if (!dev)
return -EINVAL;
- battery = acpi_driver_data(device);
+
+ battery = acpi_driver_data(to_acpi_device(dev));
+ if (!battery)
+ return -EINVAL;
+
battery->update_time = 0;
acpi_battery_update(battery);
return 0;
}
+static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
+
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
@@ -1054,10 +1069,10 @@ static struct acpi_driver acpi_battery_driver = {
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_battery_add,
- .resume = acpi_battery_resume,
.remove = acpi_battery_remove,
.notify = acpi_battery_notify,
},
+ .drv.pm = &acpi_battery_pm,
};
static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 8cf6c46..6680df3 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/io.h>
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3188da3..adceafd 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
Power Management
-------------------------------------------------------------------------- */
+static const char *state_string(int state)
+{
+ switch (state) {
+ case ACPI_STATE_D0:
+ return "D0";
+ case ACPI_STATE_D1:
+ return "D1";
+ case ACPI_STATE_D2:
+ return "D2";
+ case ACPI_STATE_D3_HOT:
+ return "D3hot";
+ case ACPI_STATE_D3_COLD:
+ return "D3";
+ default:
+ return "(unknown)";
+ }
+}
+
static int __acpi_bus_get_power(struct acpi_device *device, int *state)
{
- int result = 0;
- acpi_status status = 0;
- unsigned long long psc = 0;
+ int result = ACPI_STATE_UNKNOWN;
if (!device || !state)
return -EINVAL;
- *state = ACPI_STATE_UNKNOWN;
-
- if (device->flags.power_manageable) {
- /*
- * Get the device's power state either directly (via _PSC) or
- * indirectly (via power resources).
- */
- if (device->power.flags.power_resources) {
- result = acpi_power_get_inferred_state(device, state);
- if (result)
- return result;
- } else if (device->power.flags.explicit_get) {
- status = acpi_evaluate_integer(device->handle, "_PSC",
- NULL, &psc);
- if (ACPI_FAILURE(status))
- return -ENODEV;
- *state = (int)psc;
- }
- } else {
+ if (!device->flags.power_manageable) {
/* TBD: Non-recursive algorithm for walking up hierarchy. */
*state = device->parent ?
device->parent->power.state : ACPI_STATE_D0;
+ goto out;
+ }
+
+ /*
+ * Get the device's power state either directly (via _PSC) or
+ * indirectly (via power resources).
+ */
+ if (device->power.flags.explicit_get) {
+ unsigned long long psc;
+ acpi_status status = acpi_evaluate_integer(device->handle,
+ "_PSC", NULL, &psc);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ result = psc;
+ }
+ /* The test below covers ACPI_STATE_UNKNOWN too. */
+ if (result <= ACPI_STATE_D2) {
+ ; /* Do nothing. */
+ } else if (device->power.flags.power_resources) {
+ int error = acpi_power_get_inferred_state(device, &result);
+ if (error)
+ return error;
+ } else if (result == ACPI_STATE_D3_HOT) {
+ result = ACPI_STATE_D3;
}
+ *state = result;
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n",
- device->pnp.bus_id, *state));
+ out:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
+ device->pnp.bus_id, state_string(*state)));
return 0;
}
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
/* Make sure this is a valid target state */
if (state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n",
- state));
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+ state_string(state)));
return 0;
}
if (!device->power.states[state].flags.valid) {
- printk(KERN_WARNING PREFIX "Device does not support D%d\n", state);
+ printk(KERN_WARNING PREFIX "Device does not support %s\n",
+ state_string(state));
return -ENODEV;
}
if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
end:
if (result)
printk(KERN_WARNING PREFIX
- "Device [%s] failed to transition to D%d\n",
- device->pnp.bus_id, state);
+ "Device [%s] failed to transition to %s\n",
+ device->pnp.bus_id, state_string(state));
else {
device->power.state = state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] transitioned to D%d\n",
- device->pnp.bus_id, state));
+ "Device [%s] transitioned to %s\n",
+ device->pnp.bus_id, state_string(state)));
}
return result;
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index d27d072..79d4c22 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -76,19 +76,21 @@ MODULE_DEVICE_TABLE(acpi, button_device_ids);
static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device, int type);
-static int acpi_button_resume(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
+static int acpi_button_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
+
static struct acpi_driver acpi_button_driver = {
.name = "button",
.class = ACPI_BUTTON_CLASS,
.ids = button_device_ids,
.ops = {
.add = acpi_button_add,
- .resume = acpi_button_resume,
.remove = acpi_button_remove,
.notify = acpi_button_notify,
},
+ .drv.pm = &acpi_button_pm,
};
struct acpi_button {
@@ -308,8 +310,9 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
}
}
-static int acpi_button_resume(struct acpi_device *device)
+static int acpi_button_resume(struct device *dev)
{
+ struct acpi_device *device = to_acpi_device(dev);
struct acpi_button *button = acpi_driver_data(device);
if (button->type == ACPI_BUTTON_TYPE_LID)
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 0f0356c..669d9ee 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -46,8 +46,6 @@ MODULE_LICENSE("GPL");
static int acpi_fan_add(struct acpi_device *device);
static int acpi_fan_remove(struct acpi_device *device, int type);
-static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
-static int acpi_fan_resume(struct acpi_device *device);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
@@ -55,6 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+static int acpi_fan_suspend(struct device *dev);
+static int acpi_fan_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
+
static struct acpi_driver acpi_fan_driver = {
.name = "fan",
.class = ACPI_FAN_CLASS,
@@ -62,9 +64,8 @@ static struct acpi_driver acpi_fan_driver = {
.ops = {
.add = acpi_fan_add,
.remove = acpi_fan_remove,
- .suspend = acpi_fan_suspend,
- .resume = acpi_fan_resume,
},
+ .drv.pm = &acpi_fan_pm,
};
/* thermal cooling device callbacks */
@@ -183,24 +184,24 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
return 0;
}
-static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
+static int acpi_fan_suspend(struct device *dev)
{
- if (!device)
+ if (!dev)
return -EINVAL;
- acpi_bus_set_power(device->handle, ACPI_STATE_D0);
+ acpi_bus_set_power(to_acpi_device(dev)->handle, ACPI_STATE_D0);
return AE_OK;
}
-static int acpi_fan_resume(struct acpi_device *device)
+static int acpi_fan_resume(struct device *dev)
{
int result;
- if (!device)
+ if (!dev)
return -EINVAL;
- result = acpi_bus_update_power(device->handle, NULL);
+ result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL);
if (result)
printk(KERN_ERR PREFIX "Error updating fan power state\n");
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0500f71..894d45c 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -60,7 +60,6 @@ ACPI_MODULE_NAME("power");
static int acpi_power_add(struct acpi_device *device);
static int acpi_power_remove(struct acpi_device *device, int type);
-static int acpi_power_resume(struct acpi_device *device);
static const struct acpi_device_id power_device_ids[] = {
{ACPI_POWER_HID, 0},
@@ -68,6 +67,9 @@ static const struct acpi_device_id power_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, power_device_ids);
+static int acpi_power_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
+
static struct acpi_driver acpi_power_driver = {
.name = "power",
.class = ACPI_POWER_CLASS,
@@ -75,8 +77,8 @@ static struct acpi_driver acpi_power_driver = {
.ops = {
.add = acpi_power_add,
.remove = acpi_power_remove,
- .resume = acpi_power_resume,
},
+ .drv.pm = &acpi_power_pm,
};
/*
@@ -631,7 +633,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
* We know a device's inferred power state when all the resources
* required for a given D-state are 'on'.
*/
- for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) {
+ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
list = &device->power.states[i].resources;
if (list->count < 1)
continue;
@@ -771,14 +773,16 @@ static int acpi_power_remove(struct acpi_device *device, int type)
return 0;
}
-static int acpi_power_resume(struct acpi_device *device)
+static int acpi_power_resume(struct device *dev)
{
int result = 0, state;
+ struct acpi_device *device;
struct acpi_power_resource *resource;
- if (!device)
+ if (!dev)
return -EINVAL;
+ device = to_acpi_device(dev);
resource = acpi_driver_data(device);
if (!resource)
return -EINVAL;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index c850de4..eff7222 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
- * Ignores apic_id and always return 0 for CPU0's handle.
+ * Ignores apic_id and always returns 0 for the processor
+ * handle with acpi id 0 if nr_cpu_ids is 1.
+ * This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
*/
- if (acpi_id == 0)
+ if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
return apic_id;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 0734086..7048b97 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -93,6 +93,9 @@ static const struct acpi_device_id processor_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
+ acpi_processor_suspend, acpi_processor_resume);
+
static struct acpi_driver acpi_processor_driver = {
.name = "processor",
.class = ACPI_PROCESSOR_CLASS,
@@ -100,10 +103,9 @@ static struct acpi_driver acpi_processor_driver = {
.ops = {
.add = acpi_processor_add,
.remove = acpi_processor_remove,
- .suspend = acpi_processor_suspend,
- .resume = acpi_processor_resume,
.notify = acpi_processor_notify,
},
+ .drv.pm = &acpi_processor_pm,
};
#define INSTALL_NOTIFY_HANDLER 1
@@ -427,18 +429,11 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
* Initialize missing things
*/
if (pr->flags.need_hotplug_init) {
- struct cpuidle_driver *idle_driver =
- cpuidle_get_driver();
-
printk(KERN_INFO "Will online and init hotplugged "
"CPU: %d\n", pr->id);
WARN(acpi_processor_start(pr), "Failed to start CPU:"
" %d\n", pr->id);
pr->flags.need_hotplug_init = 0;
- if (idle_driver && !strcmp(idle_driver->name,
- "intel_idle")) {
- intel_idle_cpu_init(pr->id);
- }
/* Normal CPU soft online event */
} else {
acpi_processor_ppc_has_changed(pr, 0);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f3decb3..e589c19 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -221,9 +221,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
#endif
-/*
- * Suspend / resume control
- */
static u32 saved_bm_rld;
static void acpi_idle_bm_rld_save(void)
@@ -240,13 +237,13 @@ static void acpi_idle_bm_rld_restore(void)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
-int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
+int acpi_processor_suspend(struct device *dev)
{
acpi_idle_bm_rld_save();
return 0;
}
-int acpi_processor_resume(struct acpi_device * device)
+int acpi_processor_resume(struct device *dev)
{
acpi_idle_bm_rld_restore();
return 0;
@@ -586,7 +583,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
*/
cx->valid = 1;
- cx->latency_ticks = cx->latency;
/*
* On older chipsets, BM_RLD needs to be set
* in order for Bus Master activity to wake the
@@ -619,7 +615,6 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
if (!cx->address)
break;
cx->valid = 1;
- cx->latency_ticks = cx->latency; /* Normalize latency */
break;
case ACPI_STATE_C3:
@@ -754,6 +749,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
local_irq_disable();
+
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
@@ -764,7 +760,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
dev->last_residency = (int)idle_time;
local_irq_enable();
- cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
return index;
@@ -823,6 +818,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -866,10 +862,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
- cx->usage++;
-
lapic_timer_state_broadcast(pr, cx, 0);
- cx->time += idle_time;
return index;
}
@@ -909,12 +902,13 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
- return -EINVAL;
+ return -EBUSY;
}
}
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -986,10 +980,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
- cx->usage++;
-
lapic_timer_state_broadcast(pr, cx, 0);
- cx->time += idle_time;
return index;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0af48a8..a093dc1 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
struct acpi_buffer state = { 0, NULL };
union acpi_object *pss = NULL;
int i;
+ int last_invalid = -1;
status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
@@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
((u32)(px->core_frequency * 1000) !=
(px->core_frequency * 1000))) {
printk(KERN_ERR FW_BUG PREFIX
- "Invalid BIOS _PSS frequency: 0x%llx MHz\n",
- px->core_frequency);
- result = -EFAULT;
- kfree(pr->performance->states);
- goto end;
+ "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
+ pr->id, px->core_frequency);
+ if (last_invalid == -1)
+ last_invalid = i;
+ } else {
+ if (last_invalid != -1) {
+ /*
+ * Copy this valid entry over last_invalid entry
+ */
+ memcpy(&(pr->performance->states[last_invalid]),
+ px, sizeof(struct acpi_processor_px));
+ ++last_invalid;
+ }
}
}
+ if (last_invalid == 0) {
+ printk(KERN_ERR FW_BUG PREFIX
+ "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
+ result = -EFAULT;
+ kfree(pr->performance->states);
+ pr->performance->states = NULL;
+ }
+
+ if (last_invalid > 0)
+ pr->performance->state_count = last_invalid;
+
end:
kfree(buffer.pointer);
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 6e36d0c..c0b9aa5 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -988,16 +988,18 @@ static void acpi_sbs_rmdirs(void)
#endif
}
-static int acpi_sbs_resume(struct acpi_device *device)
+static int acpi_sbs_resume(struct device *dev)
{
struct acpi_sbs *sbs;
- if (!device)
+ if (!dev)
return -EINVAL;
- sbs = device->driver_data;
+ sbs = to_acpi_device(dev)->driver_data;
acpi_sbs_callback(sbs);
return 0;
}
+static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
+
static struct acpi_driver acpi_sbs_driver = {
.name = "sbs",
.class = ACPI_SBS_CLASS,
@@ -1005,8 +1007,8 @@ static struct acpi_driver acpi_sbs_driver = {
.ops = {
.add = acpi_sbs_add,
.remove = acpi_sbs_remove,
- .resume = acpi_sbs_resume,
},
+ .drv.pm = &acpi_sbs_pm,
};
static int __init acpi_sbs_init(void)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 85cbfdc..fdda493 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -290,26 +290,6 @@ static void acpi_device_release(struct device *dev)
kfree(acpi_dev);
}
-static int acpi_device_suspend(struct device *dev, pm_message_t state)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = acpi_dev->driver;
-
- if (acpi_drv && acpi_drv->ops.suspend)
- return acpi_drv->ops.suspend(acpi_dev, state);
- return 0;
-}
-
-static int acpi_device_resume(struct device *dev)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = acpi_dev->driver;
-
- if (acpi_drv && acpi_drv->ops.resume)
- return acpi_drv->ops.resume(acpi_dev);
- return 0;
-}
-
static int acpi_bus_match(struct device *dev, struct device_driver *drv)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -441,8 +421,6 @@ static int acpi_device_remove(struct device * dev)
struct bus_type acpi_bus_type = {
.name = "acpi",
- .suspend = acpi_device_suspend,
- .resume = acpi_device_resume,
.match = acpi_bus_match,
.probe = acpi_device_probe,
.remove = acpi_device_remove,
@@ -1567,6 +1545,7 @@ static int acpi_bus_scan_fixed(void)
ACPI_BUS_TYPE_POWER_BUTTON,
ACPI_STA_DEFAULT,
&ops);
+ device_init_wakeup(&device->dev, true);
}
if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 06527c5..88561029 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -57,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
static u8 sleep_states[ACPI_S_STATE_COUNT];
+static bool pwr_btn_event_pending;
static void acpi_sleep_tts_switch(u32 acpi_state)
{
@@ -93,11 +94,9 @@ static int acpi_sleep_prepare(u32 acpi_state)
#ifdef CONFIG_ACPI_SLEEP
/* do we have a wakeup address for S2 and S3? */
if (acpi_state == ACPI_STATE_S3) {
- if (!acpi_wakeup_address) {
+ if (!acpi_wakeup_address)
return -EFAULT;
- }
- acpi_set_firmware_waking_vector(
- (acpi_physical_address)acpi_wakeup_address);
+ acpi_set_firmware_waking_vector(acpi_wakeup_address);
}
ACPI_FLUSH_CPU_CACHE();
@@ -186,6 +185,14 @@ static int acpi_pm_prepare(void)
return error;
}
+static int find_powerf_dev(struct device *dev, void *data)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ const char *hid = acpi_device_hid(device);
+
+ return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
+}
+
/**
* acpi_pm_finish - Instruct the platform to leave a sleep state.
*
@@ -194,6 +201,7 @@ static int acpi_pm_prepare(void)
*/
static void acpi_pm_finish(void)
{
+ struct device *pwr_btn_dev;
u32 acpi_state = acpi_target_sleep_state;
acpi_ec_unblock_transactions();
@@ -211,6 +219,23 @@ static void acpi_pm_finish(void)
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
acpi_target_sleep_state = ACPI_STATE_S0;
+
+ /* If we were woken with the fixed power button, provide a small
+ * hint to userspace in the form of a wakeup event on the fixed power
+ * button device (if it can be found).
+ *
+ * We delay the event generation til now, as the PM layer requires
+ * timekeeping to be running before we generate events. */
+ if (!pwr_btn_event_pending)
+ return;
+
+ pwr_btn_event_pending = false;
+ pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
+ find_powerf_dev);
+ if (pwr_btn_dev) {
+ pm_wakeup_event(pwr_btn_dev, 0);
+ put_device(pwr_btn_dev);
+ }
}
/**
@@ -300,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
/* ACPI 3.0 specs (P62) says that it's the responsibility
* of the OSPM to clear the status bit [ implying that the
* POWER_BUTTON event should not reach userspace ]
+ *
+ * However, we do generate a small hint for userspace in the form of
+ * a wakeup event. We flag this condition for now and generate the
+ * event later, as we're currently too early in resume to be able to
+ * generate wakeup events.
*/
- if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3))
- acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+ if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
+ acpi_event_status pwr_btn_status;
+
+ acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
+
+ if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
+ acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
+ /* Flag for later */
+ pwr_btn_event_pending = true;
+ }
+ }
/*
* Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -732,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
* can wake the system. _S0W may be valid, too.
*/
if (acpi_target_sleep_state == ACPI_STATE_S0 ||
- (device_may_wakeup(dev) &&
- adev->wakeup.sleep_state <= acpi_target_sleep_state)) {
+ (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
+ adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
acpi_status status;
acpi_method[3] = 'W';
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 9f66181..240a244 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
int result = 0;
- if (!strncmp(val, "enable", strlen("enable") - 1)) {
+ if (!strncmp(val, "enable", strlen("enable"))) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 0);
if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
goto exit;
}
- if (!strncmp(val, "disable", strlen("disable") - 1)) {
+ if (!strncmp(val, "disable", strlen("disable"))) {
int name = 0;
result = acpi_debug_trace((char *)&name, trace_debug_level,
trace_debug_layer, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 7dbebea..21dd4c2 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -98,7 +98,6 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
static int acpi_thermal_add(struct acpi_device *device);
static int acpi_thermal_remove(struct acpi_device *device, int type);
-static int acpi_thermal_resume(struct acpi_device *device);
static void acpi_thermal_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id thermal_device_ids[] = {
@@ -107,6 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
+static int acpi_thermal_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
+
static struct acpi_driver acpi_thermal_driver = {
.name = "thermal",
.class = ACPI_THERMAL_CLASS,
@@ -114,9 +116,9 @@ static struct acpi_driver acpi_thermal_driver = {
.ops = {
.add = acpi_thermal_add,
.remove = acpi_thermal_remove,
- .resume = acpi_thermal_resume,
.notify = acpi_thermal_notify,
},
+ .drv.pm = &acpi_thermal_pm,
};
struct acpi_thermal_state {
@@ -1041,16 +1043,17 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
return 0;
}
-static int acpi_thermal_resume(struct acpi_device *device)
+static int acpi_thermal_resume(struct device *dev)
{
- struct acpi_thermal *tz = NULL;
+ struct acpi_thermal *tz;
int i, j, power_state, result;
-
- if (!device || !acpi_driver_data(device))
+ if (!dev)
return -EINVAL;
- tz = acpi_driver_data(device);
+ tz = acpi_driver_data(to_acpi_device(dev));
+ if (!tz)
+ return -EINVAL;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!(&tz->trips.active[i]))
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9577b6f..1e0a9e1 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
+ if (!video->cap._DOS)
+ return 0;
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
return -EINVAL;
@@ -1687,10 +1689,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
set_bit(KEY_DISPLAY_OFF, input->keybit);
- error = input_register_device(input);
- if (error)
- goto err_stop_video;
-
printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
video->flags.multihead ? "yes" : "no",
@@ -1701,12 +1699,16 @@ static int acpi_video_bus_add(struct acpi_device *device)
video->pm_nb.priority = 0;
error = register_pm_notifier(&video->pm_nb);
if (error)
- goto err_unregister_input_dev;
+ goto err_stop_video;
+
+ error = input_register_device(input);
+ if (error)
+ goto err_unregister_pm_notifier;
return 0;
- err_unregister_input_dev:
- input_unregister_device(input);
+ err_unregister_pm_notifier:
+ unregister_pm_notifier(&video->pm_nb);
err_stop_video:
acpi_video_bus_stop_devices(video);
err_free_input_dev:
@@ -1743,9 +1745,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
return 0;
}
+static int __init is_i740(struct pci_dev *dev)
+{
+ if (dev->device == 0x00D1)
+ return 1;
+ if (dev->device == 0x7000)
+ return 1;
+ return 0;
+}
+
static int __init intel_opregion_present(void)
{
-#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE)
+ int opregion = 0;
struct pci_dev *dev = NULL;
u32 address;
@@ -1754,13 +1765,15 @@ static int __init intel_opregion_present(void)
continue;
if (dev->vendor != PCI_VENDOR_ID_INTEL)
continue;
+ /* We don't want to poke around undefined i740 registers */
+ if (is_i740(dev))
+ continue;
pci_read_config_dword(dev, 0xfc, &address);
if (!address)
continue;
- return 1;
+ opregion = 1;
}
-#endif
- return 0;
+ return opregion;
}
int acpi_video_register(void)
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index aa0b1f1..0b6f0b2 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -264,11 +264,6 @@ static int __devinit tegra_ahb_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tegra_ahb_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
{ .compatible = "nvidia,tegra30-ahb", },
{ .compatible = "nvidia,tegra20-ahb", },
@@ -277,7 +272,6 @@ static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
static struct platform_driver tegra_ahb_driver = {
.probe = tegra_ahb_probe,
- .remove = __devexit_p(tegra_ahb_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 3239517..ac6a5be 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
* Arasan Compact Flash host controller source file
*
* Copyright (C) 2011 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = {
module_platform_driver(arasan_cf_driver);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e8cd652..9851093 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
} else if (skb && card->using_dma) {
SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ card->tx_skb[port] = skb;
iowrite32(SKB_CB(skb)->dma_addr,
card->config_regs + TX_DMA_ADDR(port));
}
@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
db_fpga_upgrade = db_firmware_upgrade = 0;
}
- if (card->fpga_version >= DMA_SUPPORTED){
+ if (card->fpga_version >= DMA_SUPPORTED) {
+ pci_set_master(dev);
card->using_dma = 1;
} else {
card->using_dma = 0;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1b1cbb5..4b01ab3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -24,6 +24,7 @@
#include <linux/wait.h>
#include <linux/async.h>
#include <linux/pm_runtime.h>
+#include <scsi/scsi_scan.h>
#include "base.h"
#include "power/power.h"
@@ -100,7 +101,7 @@ static void driver_deferred_probe_add(struct device *dev)
mutex_lock(&deferred_probe_mutex);
if (list_empty(&dev->p->deferred_probe)) {
dev_dbg(dev, "Added to deferred list\n");
- list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+ list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
}
mutex_unlock(&deferred_probe_mutex);
}
@@ -332,6 +333,7 @@ void wait_for_device_probe(void)
/* wait for the known devices to complete their probing */
wait_event(probe_waitqueue, atomic_read(&probe_count) == 0);
async_synchronize_full();
+ scsi_complete_async_scans();
}
EXPORT_SYMBOL_GPL(wait_for_device_probe);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 765c3a2..d91a3a0 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -227,33 +227,24 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev)
static int dev_rmdir(const char *name)
{
- struct nameidata nd;
+ struct path parent;
struct dentry *dentry;
int err;
- err = kern_path_parent(name, &nd);
- if (err)
- return err;
-
- mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
- dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
- if (!IS_ERR(dentry)) {
- if (dentry->d_inode) {
- if (dentry->d_inode->i_private == &thread)
- err = vfs_rmdir(nd.path.dentry->d_inode,
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
- dput(dentry);
+ dentry = kern_path_locked(name, &parent);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ if (dentry->d_inode) {
+ if (dentry->d_inode->i_private == &thread)
+ err = vfs_rmdir(parent.dentry->d_inode, dentry);
+ else
+ err = -EPERM;
} else {
- err = PTR_ERR(dentry);
+ err = -ENOENT;
}
-
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
- path_put(&nd.path);
+ dput(dentry);
+ mutex_unlock(&parent.dentry->d_inode->i_mutex);
+ path_put(&parent);
return err;
}
@@ -305,50 +296,43 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
static int handle_remove(const char *nodename, struct device *dev)
{
- struct nameidata nd;
+ struct path parent;
struct dentry *dentry;
- struct kstat stat;
int deleted = 1;
int err;
- err = kern_path_parent(nodename, &nd);
- if (err)
- return err;
+ dentry = kern_path_locked(nodename, &parent);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
- mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
- dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
- if (!IS_ERR(dentry)) {
- if (dentry->d_inode) {
- err = vfs_getattr(nd.path.mnt, dentry, &stat);
- if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = 0;
- newattrs.ia_gid = 0;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
- mutex_unlock(&dentry->d_inode->i_mutex);
- err = vfs_unlink(nd.path.dentry->d_inode,
- dentry);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ if (dentry->d_inode) {
+ struct kstat stat;
+ err = vfs_getattr(parent.mnt, dentry, &stat);
+ if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = 0;
+ newattrs.ia_gid = 0;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ mutex_lock(&dentry->d_inode->i_mutex);
+ notify_change(dentry, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ err = vfs_unlink(parent.dentry->d_inode, dentry);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
- dput(dentry);
} else {
- err = PTR_ERR(dentry);
+ err = -ENOENT;
}
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ dput(dentry);
+ mutex_unlock(&parent.dentry->d_inode->i_mutex);
- path_put(&nd.path);
+ path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 83aa694..ba3487c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -75,19 +75,6 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
start_latency_ns, "start");
}
-static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
- save_state_latency_ns, "state save");
-}
-
-static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
- restore_state_latency_ns,
- "state restore");
-}
-
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
bool ret = false;
@@ -139,6 +126,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
genpd->status = GPD_STATE_ACTIVE;
}
+static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
+{
+ s64 usecs64;
+
+ if (!genpd->cpu_data)
+ return;
+
+ usecs64 = genpd->power_on_latency_ns;
+ do_div(usecs64, NSEC_PER_USEC);
+ usecs64 += genpd->cpu_data->saved_exit_latency;
+ genpd->cpu_data->idle_state->exit_latency = usecs64;
+}
+
/**
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
@@ -146,7 +146,7 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{
struct gpd_link *link;
@@ -176,6 +176,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
return 0;
}
+ if (genpd->cpu_data) {
+ cpuidle_pause_and_lock();
+ genpd->cpu_data->idle_state->disabled = true;
+ cpuidle_resume_and_unlock();
+ goto out;
+ }
+
/*
* The list is guaranteed not to change while the loop below is being
* executed, unless one of the masters' .power_on() callbacks fiddles
@@ -215,6 +222,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
if (elapsed_ns > genpd->power_on_latency_ns) {
genpd->power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
+ genpd_recalc_cpu_exit_latency(genpd);
if (genpd->name)
pr_warning("%s: Power-on latency exceeded, "
"new value %lld ns\n", genpd->name,
@@ -222,6 +230,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
}
}
+ out:
genpd_set_active(genpd);
return 0;
@@ -251,6 +260,19 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_RUNTIME
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
+ save_state_latency_ns, "state save");
+}
+
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
+ restore_state_latency_ns,
+ "state restore");
+}
+
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
@@ -275,7 +297,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
pdd = dev->power.subsys_data ?
dev->power.subsys_data->domain_data : NULL;
- if (pdd) {
+ if (pdd && pdd->dev) {
to_gpd_data(pdd)->td.constraint_changed = true;
genpd = dev_to_genpd(dev);
} else {
@@ -339,19 +361,16 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
+ bool need_restore = gpd_data->need_restore;
- if (!gpd_data->need_restore)
- return;
-
+ gpd_data->need_restore = false;
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
- genpd_restore_dev(genpd, dev);
- genpd_stop_dev(genpd, dev);
+ if (need_restore)
+ genpd_restore_dev(genpd, dev);
mutex_lock(&genpd->lock);
-
- gpd_data->need_restore = false;
}
/**
@@ -458,6 +477,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
}
+ if (genpd->cpu_data) {
+ /*
+ * If cpu_data is set, cpuidle should turn the domain off when
+ * the CPU in it is idle. In that case we don't decrement the
+ * subdomain counts of the master domains, so that power is not
+ * removed from the current domain prematurely as a result of
+ * cutting off the masters' power.
+ */
+ genpd->status = GPD_STATE_POWER_OFF;
+ cpuidle_pause_and_lock();
+ genpd->cpu_data->idle_state->disabled = false;
+ cpuidle_resume_and_unlock();
+ goto out;
+ }
+
if (genpd->power_off) {
ktime_t time_start;
s64 elapsed_ns;
@@ -595,7 +629,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
/* If power.irq_safe, the PM domain is never powered off. */
if (dev->power.irq_safe)
- goto out;
+ return genpd_start_dev(genpd, dev);
mutex_lock(&genpd->lock);
ret = __pm_genpd_poweron(genpd);
@@ -628,9 +662,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
wake_up_all(&genpd->status_wait_queue);
mutex_unlock(&genpd->lock);
- out:
- genpd_start_dev(genpd, dev);
-
return 0;
}
@@ -1235,6 +1266,27 @@ static void pm_genpd_complete(struct device *dev)
#endif /* CONFIG_PM_SLEEP */
+static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+ if (!gpd_data)
+ return NULL;
+
+ mutex_init(&gpd_data->lock);
+ gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+ return gpd_data;
+}
+
+static void __pm_genpd_free_dev_data(struct device *dev,
+ struct generic_pm_domain_data *gpd_data)
+{
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ kfree(gpd_data);
+}
+
/**
* __pm_genpd_add_device - Add a device to an I/O PM domain.
* @genpd: PM domain to add the device to.
@@ -1244,7 +1296,7 @@ static void pm_genpd_complete(struct device *dev)
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
struct gpd_timing_data *td)
{
- struct generic_pm_domain_data *gpd_data;
+ struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
struct pm_domain_data *pdd;
int ret = 0;
@@ -1253,14 +1305,10 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
- if (!gpd_data)
+ gpd_data_new = __pm_genpd_alloc_dev_data(dev);
+ if (!gpd_data_new)
return -ENOMEM;
- mutex_init(&gpd_data->lock);
- gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
-
genpd_acquire_lock(genpd);
if (genpd->prepared_count > 0) {
@@ -1274,35 +1322,42 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
goto out;
}
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ goto out;
+
genpd->device_count++;
genpd->max_off_time_changed = true;
- dev_pm_get_subsys_data(dev);
-
- mutex_lock(&gpd_data->lock);
spin_lock_irq(&dev->power.lock);
+
dev->pm_domain = &genpd->domain;
- dev->power.subsys_data->domain_data = &gpd_data->base;
- gpd_data->base.dev = dev;
- list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ } else {
+ gpd_data = gpd_data_new;
+ dev->power.subsys_data->domain_data = &gpd_data->base;
+ }
+ gpd_data->refcount++;
if (td)
gpd_data->td = *td;
+ spin_unlock_irq(&dev->power.lock);
+
+ mutex_lock(&gpd_data->lock);
+ gpd_data->base.dev = dev;
+ list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = -1;
- spin_unlock_irq(&dev->power.lock);
mutex_unlock(&gpd_data->lock);
- genpd_release_lock(genpd);
-
- return 0;
-
out:
genpd_release_lock(genpd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- kfree(gpd_data);
+ if (gpd_data != gpd_data_new)
+ __pm_genpd_free_dev_data(dev, gpd_data_new);
+
return ret;
}
@@ -1348,6 +1403,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
+ bool remove = false;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1368,22 +1424,28 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
spin_lock_irq(&dev->power.lock);
+
dev->pm_domain = NULL;
pdd = dev->power.subsys_data->domain_data;
list_del_init(&pdd->list_node);
- dev->power.subsys_data->domain_data = NULL;
+ gpd_data = to_gpd_data(pdd);
+ if (--gpd_data->refcount == 0) {
+ dev->power.subsys_data->domain_data = NULL;
+ remove = true;
+ }
+
spin_unlock_irq(&dev->power.lock);
- gpd_data = to_gpd_data(pdd);
mutex_lock(&gpd_data->lock);
pdd->dev = NULL;
mutex_unlock(&gpd_data->lock);
genpd_release_lock(genpd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- kfree(gpd_data);
dev_pm_put_subsys_data(dev);
+ if (remove)
+ __pm_genpd_free_dev_data(dev, gpd_data);
+
return 0;
out:
@@ -1541,33 +1603,52 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
* @dev: Device to add the callbacks to.
* @ops: Set of callbacks to add.
* @td: Timing data to add to the device along with the callbacks (optional).
+ *
+ * Every call to this routine should be balanced with a call to
+ * __pm_genpd_remove_callbacks() and they must not be nested.
*/
int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
struct gpd_timing_data *td)
{
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
int ret = 0;
- if (!(dev && dev->power.subsys_data && ops))
+ if (!(dev && ops))
return -EINVAL;
+ gpd_data_new = __pm_genpd_alloc_dev_data(dev);
+ if (!gpd_data_new)
+ return -ENOMEM;
+
pm_runtime_disable(dev);
device_pm_lock();
- pdd = dev->power.subsys_data->domain_data;
- if (pdd) {
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ goto out;
- gpd_data->ops = *ops;
- if (td)
- gpd_data->td = *td;
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
} else {
- ret = -EINVAL;
+ gpd_data = gpd_data_new;
+ dev->power.subsys_data->domain_data = &gpd_data->base;
}
+ gpd_data->refcount++;
+ gpd_data->ops = *ops;
+ if (td)
+ gpd_data->td = *td;
+
+ spin_unlock_irq(&dev->power.lock);
+ out:
device_pm_unlock();
pm_runtime_enable(dev);
+ if (gpd_data != gpd_data_new)
+ __pm_genpd_free_dev_data(dev, gpd_data_new);
+
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
@@ -1576,10 +1657,13 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
* __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
* @dev: Device to remove the callbacks from.
* @clear_td: If set, clear the device's timing data too.
+ *
+ * This routine can only be called after pm_genpd_add_callbacks().
*/
int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
{
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data = NULL;
+ bool remove = false;
int ret = 0;
if (!(dev && dev->power.subsys_data))
@@ -1588,24 +1672,118 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
pm_runtime_disable(dev);
device_pm_lock();
- pdd = dev->power.subsys_data->domain_data;
- if (pdd) {
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+ spin_lock_irq(&dev->power.lock);
- gpd_data->ops = (struct gpd_dev_ops){ 0 };
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ gpd_data->ops = (struct gpd_dev_ops){ NULL };
if (clear_td)
gpd_data->td = (struct gpd_timing_data){ 0 };
+
+ if (--gpd_data->refcount == 0) {
+ dev->power.subsys_data->domain_data = NULL;
+ remove = true;
+ }
} else {
ret = -EINVAL;
}
+ spin_unlock_irq(&dev->power.lock);
+
device_pm_unlock();
pm_runtime_enable(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ dev_pm_put_subsys_data(dev);
+ if (remove)
+ __pm_genpd_free_dev_data(dev, gpd_data);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+{
+ struct cpuidle_driver *cpuidle_drv;
+ struct gpd_cpu_data *cpu_data;
+ struct cpuidle_state *idle_state;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd) || state < 0)
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->cpu_data) {
+ ret = -EEXIST;
+ goto out;
+ }
+ cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
+ if (!cpu_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cpuidle_drv = cpuidle_driver_ref();
+ if (!cpuidle_drv) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (cpuidle_drv->state_count <= state) {
+ ret = -EINVAL;
+ goto err;
+ }
+ idle_state = &cpuidle_drv->states[state];
+ if (!idle_state->disabled) {
+ ret = -EAGAIN;
+ goto err;
+ }
+ cpu_data->idle_state = idle_state;
+ cpu_data->saved_exit_latency = idle_state->exit_latency;
+ genpd->cpu_data = cpu_data;
+ genpd_recalc_cpu_exit_latency(genpd);
+
+ out:
+ genpd_release_lock(genpd);
+ return ret;
+
+ err:
+ cpuidle_driver_unref();
+ goto out;
+}
+
+int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+ struct gpd_cpu_data *cpu_data;
+ struct cpuidle_state *idle_state;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ cpu_data = genpd->cpu_data;
+ if (!cpu_data) {
+ ret = -ENODEV;
+ goto out;
+ }
+ idle_state = cpu_data->idle_state;
+ if (!idle_state->disabled) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ idle_state->exit_latency = cpu_data->saved_exit_latency;
+ cpuidle_driver_unref();
+ genpd->cpu_data = NULL;
+ kfree(cpu_data);
+
+ out:
+ genpd_release_lock(genpd);
+ return ret;
+}
+
/* Default device callbacks for generic PM domains. */
/**
@@ -1615,16 +1793,24 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
static int pm_genpd_default_save_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- struct device_driver *drv = dev->driver;
cb = dev_gpd_data(dev)->ops.save_state;
if (cb)
return cb(dev);
- if (drv && drv->pm && drv->pm->runtime_suspend)
- return drv->pm->runtime_suspend(dev);
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_suspend;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_suspend;
+ else
+ cb = NULL;
- return 0;
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_suspend;
+
+ return cb ? cb(dev) : 0;
}
/**
@@ -1634,16 +1820,24 @@ static int pm_genpd_default_save_state(struct device *dev)
static int pm_genpd_default_restore_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- struct device_driver *drv = dev->driver;
cb = dev_gpd_data(dev)->ops.restore_state;
if (cb)
return cb(dev);
- if (drv && drv->pm && drv->pm->runtime_resume)
- return drv->pm->runtime_resume(dev);
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_resume;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_resume;
+ else
+ cb = NULL;
- return 0;
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_resume;
+
+ return cb ? cb(dev) : 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e0fb5b0..0113adc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -28,7 +28,7 @@
#include <linux/sched.h>
#include <linux/async.h>
#include <linux/suspend.h>
-
+#include <linux/cpuidle.h>
#include "../base.h"
#include "power.h"
@@ -45,10 +45,10 @@ typedef int (*pm_callback_t)(struct device *);
*/
LIST_HEAD(dpm_list);
-LIST_HEAD(dpm_prepared_list);
-LIST_HEAD(dpm_suspended_list);
-LIST_HEAD(dpm_late_early_list);
-LIST_HEAD(dpm_noirq_list);
+static LIST_HEAD(dpm_prepared_list);
+static LIST_HEAD(dpm_suspended_list);
+static LIST_HEAD(dpm_late_early_list);
+static LIST_HEAD(dpm_noirq_list);
struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
@@ -166,7 +166,7 @@ static ktime_t initcall_debug_start(struct device *dev)
{
ktime_t calltime = ktime_set(0, 0);
- if (initcall_debug) {
+ if (pm_print_times_enabled) {
pr_info("calling %s+ @ %i, parent: %s\n",
dev_name(dev), task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
@@ -181,7 +181,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
{
ktime_t delta, rettime;
- if (initcall_debug) {
+ if (pm_print_times_enabled) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
@@ -467,6 +467,7 @@ static void dpm_resume_noirq(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
+ cpuidle_resume();
}
/**
@@ -867,6 +868,7 @@ static int dpm_suspend_noirq(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_late_early_list)) {
@@ -989,8 +991,16 @@ static int dpm_suspend_late(pm_message_t state)
int dpm_suspend_end(pm_message_t state)
{
int error = dpm_suspend_late(state);
+ if (error)
+ return error;
+
+ error = dpm_suspend_noirq(state);
+ if (error) {
+ dpm_resume_early(state);
+ return error;
+ }
- return error ? : dpm_suspend_noirq(state);
+ return 0;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
@@ -1031,7 +1041,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_children(dev, async);
if (async_error)
- return 0;
+ goto Complete;
pm_runtime_get_noresume(dev);
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1050,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
pm_runtime_put_sync(dev);
async_error = -EBUSY;
- return 0;
+ goto Complete;
}
device_lock(dev);
@@ -1097,6 +1107,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
device_unlock(dev);
+
+ Complete:
complete_all(&dev->power.completion);
if (error) {
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index fd849a2..74a67e0 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -462,7 +462,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev)
{
dev_pm_qos_remove_request(dev->power.pq_req);
- dev->power.pq_req = 0;
+ dev->power.pq_req = NULL;
}
/**
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 48be2ad..b91dc6f 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -474,6 +474,8 @@ static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
#endif
+#ifdef CONFIG_PM_SLEEP
+
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -500,6 +502,8 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(async, 0644, async_show, async_store);
+
+#endif
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b986b86..80f9ab9 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -95,6 +95,9 @@ struct regmap {
/* if set, converts bulk rw to single rw */
bool use_single_rw;
+
+ struct rb_root range_tree;
+ void *selector_work_buf; /* Scratch buffer used for selector */
};
struct regcache_ops {
@@ -115,6 +118,20 @@ bool regmap_precious(struct regmap *map, unsigned int reg);
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val);
+struct regmap_range_node {
+ struct rb_node node;
+
+ unsigned int range_min;
+ unsigned int range_max;
+
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ unsigned int window_start;
+ unsigned int window_len;
+};
+
#ifdef CONFIG_DEBUG_FS
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 5f6b247..fa6bf52 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -42,7 +42,7 @@ static int regmap_i2c_gather_write(void *context,
/* If the I2C controller can't do a gather tell the core, it
* will substitute in a linear write for us.
*/
- if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_NOSTART))
return -ENOTSUPP;
xfer[0].addr = i2c->addr;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4fac4b9..a897346 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -24,14 +24,18 @@ struct regmap_irq_chip_data {
struct mutex lock;
struct regmap *map;
- struct regmap_irq_chip *chip;
+ const struct regmap_irq_chip *chip;
int irq_base;
struct irq_domain *domain;
+ int irq;
+ int wake_count;
+
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
+ unsigned int *wake_buf;
unsigned int irq_reg_stride;
};
@@ -71,6 +75,16 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
d->chip->mask_base + (i * map->reg_stride));
}
+ /* If we've changed our wakeup count propagate it to the parent */
+ if (d->wake_count < 0)
+ for (i = d->wake_count; i < 0; i++)
+ irq_set_irq_wake(d->irq, 0);
+ else if (d->wake_count > 0)
+ for (i = 0; i < d->wake_count; i++)
+ irq_set_irq_wake(d->irq, 1);
+
+ d->wake_count = 0;
+
mutex_unlock(&d->lock);
}
@@ -92,18 +106,41 @@ static void regmap_irq_disable(struct irq_data *data)
d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
}
+static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+
+ if (!d->chip->wake_base)
+ return -EINVAL;
+
+ if (on) {
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ &= ~irq_data->mask;
+ d->wake_count++;
+ } else {
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ |= irq_data->mask;
+ d->wake_count--;
+ }
+
+ return 0;
+}
+
static struct irq_chip regmap_irq_chip = {
.name = "regmap",
.irq_bus_lock = regmap_irq_lock,
.irq_bus_sync_unlock = regmap_irq_sync_unlock,
.irq_disable = regmap_irq_disable,
.irq_enable = regmap_irq_enable,
+ .irq_set_wake = regmap_irq_set_wake,
};
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
struct regmap_irq_chip_data *data = d;
- struct regmap_irq_chip *chip = data->chip;
+ const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
bool handled = false;
@@ -195,7 +232,7 @@ static struct irq_domain_ops regmap_domain_ops = {
* register values used by the IRQ controller over suspend and resume.
*/
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
- int irq_base, struct regmap_irq_chip *chip,
+ int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data *d;
@@ -240,6 +277,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d->mask_buf_def)
goto err_alloc;
+ if (chip->wake_base) {
+ d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->wake_buf)
+ goto err_alloc;
+ }
+
+ d->irq = irq;
d->map = map;
d->chip = chip;
d->irq_base = irq_base;
@@ -294,6 +339,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
err_domain:
/* Should really dispose of the domain but... */
err_alloc:
+ kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_buf);
@@ -315,6 +361,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
free_irq(irq, d);
/* We should unmap the domain but... */
+ kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_buf);
@@ -346,6 +393,10 @@ EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
*/
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
+ /* Handle holes in the IRQ list */
+ if (!data->chip->irqs[irq].mask)
+ return -EINVAL;
+
return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index febd6de..f05fc74 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -37,7 +37,7 @@ static int regmap_mmio_gather_write(void *context,
BUG_ON(reg_size != 4);
- offset = be32_to_cpup(reg);
+ offset = *(u32 *)reg;
while (val_size) {
switch (ctx->val_bytes) {
@@ -45,14 +45,14 @@ static int regmap_mmio_gather_write(void *context,
writeb(*(u8 *)val, ctx->regs + offset);
break;
case 2:
- writew(be16_to_cpup(val), ctx->regs + offset);
+ writew(*(u16 *)val, ctx->regs + offset);
break;
case 4:
- writel(be32_to_cpup(val), ctx->regs + offset);
+ writel(*(u32 *)val, ctx->regs + offset);
break;
#ifdef CONFIG_64BIT
case 8:
- writeq(be64_to_cpup(val), ctx->regs + offset);
+ writeq(*(u64 *)val, ctx->regs + offset);
break;
#endif
default:
@@ -83,7 +83,7 @@ static int regmap_mmio_read(void *context,
BUG_ON(reg_size != 4);
- offset = be32_to_cpup(reg);
+ offset = *(u32 *)reg;
while (val_size) {
switch (ctx->val_bytes) {
@@ -91,14 +91,14 @@ static int regmap_mmio_read(void *context,
*(u8 *)val = readb(ctx->regs + offset);
break;
case 2:
- *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset));
+ *(u16 *)val = readw(ctx->regs + offset);
break;
case 4:
- *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset));
+ *(u32 *)val = readl(ctx->regs + offset);
break;
#ifdef CONFIG_64BIT
case 8:
- *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset));
+ *(u64 *)val = readq(ctx->regs + offset);
break;
#endif
default:
@@ -124,9 +124,11 @@ static struct regmap_bus regmap_mmio = {
.gather_write = regmap_mmio_gather_write,
.read = regmap_mmio_read,
.free_context = regmap_mmio_free_context,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
+static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
@@ -162,7 +164,15 @@ struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
if (config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
- ctx = kzalloc(GFP_KERNEL, sizeof(*ctx));
+ switch (config->reg_format_endian) {
+ case REGMAP_ENDIAN_DEFAULT:
+ case REGMAP_ENDIAN_NATIVE:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0bcda48..c241ae2 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -15,12 +15,25 @@
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <linux/rbtree.h>
#define CREATE_TRACE_POINTS
#include <trace/events/regmap.h>
#include "internal.h"
+/*
+ * Sometimes for failures during very early init the trace
+ * infrastructure isn't available early enough to be used. For this
+ * sort of problem defining LOG_DEVICE will add printks for basic
+ * register I/O on a specific device.
+ */
+#undef LOG_DEVICE
+
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change);
+
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
if (map->max_register && reg > map->max_register)
@@ -119,13 +132,19 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
b[0] = val << shift;
}
-static void regmap_format_16(void *buf, unsigned int val, unsigned int shift)
+static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
{
__be16 *b = buf;
b[0] = cpu_to_be16(val << shift);
}
+static void regmap_format_16_native(void *buf, unsigned int val,
+ unsigned int shift)
+{
+ *(u16 *)buf = val << shift;
+}
+
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
{
u8 *b = buf;
@@ -137,13 +156,19 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
b[2] = val;
}
-static void regmap_format_32(void *buf, unsigned int val, unsigned int shift)
+static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
{
__be32 *b = buf;
b[0] = cpu_to_be32(val << shift);
}
+static void regmap_format_32_native(void *buf, unsigned int val,
+ unsigned int shift)
+{
+ *(u32 *)buf = val << shift;
+}
+
static unsigned int regmap_parse_8(void *buf)
{
u8 *b = buf;
@@ -151,7 +176,7 @@ static unsigned int regmap_parse_8(void *buf)
return b[0];
}
-static unsigned int regmap_parse_16(void *buf)
+static unsigned int regmap_parse_16_be(void *buf)
{
__be16 *b = buf;
@@ -160,6 +185,11 @@ static unsigned int regmap_parse_16(void *buf)
return b[0];
}
+static unsigned int regmap_parse_16_native(void *buf)
+{
+ return *(u16 *)buf;
+}
+
static unsigned int regmap_parse_24(void *buf)
{
u8 *b = buf;
@@ -170,7 +200,7 @@ static unsigned int regmap_parse_24(void *buf)
return ret;
}
-static unsigned int regmap_parse_32(void *buf)
+static unsigned int regmap_parse_32_be(void *buf)
{
__be32 *b = buf;
@@ -179,6 +209,11 @@ static unsigned int regmap_parse_32(void *buf)
return b[0];
}
+static unsigned int regmap_parse_32_native(void *buf)
+{
+ return *(u32 *)buf;
+}
+
static void regmap_lock_mutex(struct regmap *map)
{
mutex_lock(&map->mutex);
@@ -208,6 +243,67 @@ static void dev_get_regmap_release(struct device *dev, void *res)
*/
}
+static bool _regmap_range_add(struct regmap *map,
+ struct regmap_range_node *data)
+{
+ struct rb_root *root = &map->range_tree;
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ while (*new) {
+ struct regmap_range_node *this =
+ container_of(*new, struct regmap_range_node, node);
+
+ parent = *new;
+ if (data->range_max < this->range_min)
+ new = &((*new)->rb_left);
+ else if (data->range_min > this->range_max)
+ new = &((*new)->rb_right);
+ else
+ return false;
+ }
+
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+
+ return true;
+}
+
+static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
+ unsigned int reg)
+{
+ struct rb_node *node = map->range_tree.rb_node;
+
+ while (node) {
+ struct regmap_range_node *this =
+ container_of(node, struct regmap_range_node, node);
+
+ if (reg < this->range_min)
+ node = node->rb_left;
+ else if (reg > this->range_max)
+ node = node->rb_right;
+ else
+ return this;
+ }
+
+ return NULL;
+}
+
+static void regmap_range_exit(struct regmap *map)
+{
+ struct rb_node *next;
+ struct regmap_range_node *range_node;
+
+ next = rb_first(&map->range_tree);
+ while (next) {
+ range_node = rb_entry(next, struct regmap_range_node, node);
+ next = rb_next(&range_node->node);
+ rb_erase(&range_node->node, &map->range_tree);
+ kfree(range_node);
+ }
+
+ kfree(map->selector_work_buf);
+}
+
/**
* regmap_init(): Initialise register map
*
@@ -227,6 +323,8 @@ struct regmap *regmap_init(struct device *dev,
{
struct regmap *map, **m;
int ret = -EINVAL;
+ enum regmap_endian reg_endian, val_endian;
+ int i, j;
if (!bus || !config)
goto err;
@@ -246,11 +344,11 @@ struct regmap *regmap_init(struct device *dev,
map->lock = regmap_lock_mutex;
map->unlock = regmap_unlock_mutex;
}
- map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
- map->format.buf_size += map->format.pad_bytes;
+ map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
+ config->val_bits + config->pad_bits, 8);
map->reg_shift = config->pad_bits % 8;
if (config->reg_stride)
map->reg_stride = config->reg_stride;
@@ -275,6 +373,18 @@ struct regmap *regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
+ reg_endian = config->reg_format_endian;
+ if (reg_endian == REGMAP_ENDIAN_DEFAULT)
+ reg_endian = bus->reg_format_endian_default;
+ if (reg_endian == REGMAP_ENDIAN_DEFAULT)
+ reg_endian = REGMAP_ENDIAN_BIG;
+
+ val_endian = config->val_format_endian;
+ if (val_endian == REGMAP_ENDIAN_DEFAULT)
+ val_endian = bus->val_format_endian_default;
+ if (val_endian == REGMAP_ENDIAN_DEFAULT)
+ val_endian = REGMAP_ENDIAN_BIG;
+
switch (config->reg_bits + map->reg_shift) {
case 2:
switch (config->val_bits) {
@@ -321,11 +431,29 @@ struct regmap *regmap_init(struct device *dev,
break;
case 16:
- map->format.format_reg = regmap_format_16;
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_16_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_reg = regmap_format_16_native;
+ break;
+ default:
+ goto err_map;
+ }
break;
case 32:
- map->format.format_reg = regmap_format_32;
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_32_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_reg = regmap_format_32_native;
+ break;
+ default:
+ goto err_map;
+ }
break;
default:
@@ -338,21 +466,47 @@ struct regmap *regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_8;
break;
case 16:
- map->format.format_val = regmap_format_16;
- map->format.parse_val = regmap_parse_16;
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_16_be;
+ map->format.parse_val = regmap_parse_16_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_val = regmap_format_16_native;
+ map->format.parse_val = regmap_parse_16_native;
+ break;
+ default:
+ goto err_map;
+ }
break;
case 24:
+ if (val_endian != REGMAP_ENDIAN_BIG)
+ goto err_map;
map->format.format_val = regmap_format_24;
map->format.parse_val = regmap_parse_24;
break;
case 32:
- map->format.format_val = regmap_format_32;
- map->format.parse_val = regmap_parse_32;
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_32_be;
+ map->format.parse_val = regmap_parse_32_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_val = regmap_format_32_native;
+ map->format.parse_val = regmap_parse_32_native;
+ break;
+ default:
+ goto err_map;
+ }
break;
}
- if (map->format.format_write)
+ if (map->format.format_write) {
+ if ((reg_endian != REGMAP_ENDIAN_BIG) ||
+ (val_endian != REGMAP_ENDIAN_BIG))
+ goto err_map;
map->use_single_rw = true;
+ }
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
@@ -364,26 +518,88 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
- regmap_debugfs_init(map, config->name);
+ map->range_tree = RB_ROOT;
+ for (i = 0; i < config->n_ranges; i++) {
+ const struct regmap_range_cfg *range_cfg = &config->ranges[i];
+ struct regmap_range_node *new;
+
+ /* Sanity check */
+ if (range_cfg->range_max < range_cfg->range_min ||
+ range_cfg->range_max > map->max_register ||
+ range_cfg->selector_reg > map->max_register ||
+ range_cfg->window_len == 0)
+ goto err_range;
+
+ /* Make sure, that this register range has no selector
+ or data window within its boundary */
+ for (j = 0; j < config->n_ranges; j++) {
+ unsigned sel_reg = config->ranges[j].selector_reg;
+ unsigned win_min = config->ranges[j].window_start;
+ unsigned win_max = win_min +
+ config->ranges[j].window_len - 1;
+
+ if (range_cfg->range_min <= sel_reg &&
+ sel_reg <= range_cfg->range_max) {
+ goto err_range;
+ }
+
+ if (!(win_max < range_cfg->range_min ||
+ win_min > range_cfg->range_max)) {
+ goto err_range;
+ }
+ }
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (new == NULL) {
+ ret = -ENOMEM;
+ goto err_range;
+ }
+
+ new->range_min = range_cfg->range_min;
+ new->range_max = range_cfg->range_max;
+ new->selector_reg = range_cfg->selector_reg;
+ new->selector_mask = range_cfg->selector_mask;
+ new->selector_shift = range_cfg->selector_shift;
+ new->window_start = range_cfg->window_start;
+ new->window_len = range_cfg->window_len;
+
+ if (_regmap_range_add(map, new) == false) {
+ kfree(new);
+ goto err_range;
+ }
+
+ if (map->selector_work_buf == NULL) {
+ map->selector_work_buf =
+ kzalloc(map->format.buf_size, GFP_KERNEL);
+ if (map->selector_work_buf == NULL) {
+ ret = -ENOMEM;
+ goto err_range;
+ }
+ }
+ }
ret = regcache_init(map, config);
if (ret < 0)
- goto err_free_workbuf;
+ goto err_range;
+
+ regmap_debugfs_init(map, config->name);
/* Add a devres resource for dev_get_regmap() */
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
if (!m) {
ret = -ENOMEM;
- goto err_cache;
+ goto err_debugfs;
}
*m = map;
devres_add(dev, m);
return map;
-err_cache:
+err_debugfs:
+ regmap_debugfs_exit(map);
regcache_exit(map);
-err_free_workbuf:
+err_range:
+ regmap_range_exit(map);
kfree(map->work_buf);
err_map:
kfree(map);
@@ -471,6 +687,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
return ret;
}
+EXPORT_SYMBOL_GPL(regmap_reinit_cache);
/**
* regmap_exit(): Free a previously allocated register map
@@ -479,6 +696,7 @@ void regmap_exit(struct regmap *map)
{
regcache_exit(map);
regmap_debugfs_exit(map);
+ regmap_range_exit(map);
if (map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
@@ -524,6 +742,57 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
}
EXPORT_SYMBOL_GPL(dev_get_regmap);
+static int _regmap_select_page(struct regmap *map, unsigned int *reg,
+ unsigned int val_num)
+{
+ struct regmap_range_node *range;
+ void *orig_work_buf;
+ unsigned int win_offset;
+ unsigned int win_page;
+ bool page_chg;
+ int ret;
+
+ range = _regmap_range_lookup(map, *reg);
+ if (range) {
+ win_offset = (*reg - range->range_min) % range->window_len;
+ win_page = (*reg - range->range_min) / range->window_len;
+
+ if (val_num > 1) {
+ /* Bulk write shouldn't cross range boundary */
+ if (*reg + val_num - 1 > range->range_max)
+ return -EINVAL;
+
+ /* ... or single page boundary */
+ if (val_num > range->window_len - win_offset)
+ return -EINVAL;
+ }
+
+ /* It is possible to have selector register inside data window.
+ In that case, selector register is located on every page and
+ it needs no page switching, when accessed alone. */
+ if (val_num > 1 ||
+ range->window_start + win_offset != range->selector_reg) {
+ /* Use separate work_buf during page switching */
+ orig_work_buf = map->work_buf;
+ map->work_buf = map->selector_work_buf;
+
+ ret = _regmap_update_bits(map, range->selector_reg,
+ range->selector_mask,
+ win_page << range->selector_shift,
+ &page_chg);
+
+ map->work_buf = orig_work_buf;
+
+ if (ret < 0)
+ return ret;
+ }
+
+ *reg = range->window_start + win_offset;
+ }
+
+ return 0;
+}
+
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
@@ -561,6 +830,10 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
}
}
+ ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
+ if (ret < 0)
+ return ret;
+
map->format.format_reg(map->work_buf, reg, map->reg_shift);
u8[0] |= map->write_flag_mask;
@@ -621,9 +894,18 @@ int _regmap_write(struct regmap *map, unsigned int reg,
}
}
+#ifdef LOG_DEVICE
+ if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ dev_info(map->dev, "%x <= %x\n", reg, val);
+#endif
+
trace_regmap_reg_write(map->dev, reg, val);
if (map->format.format_write) {
+ ret = _regmap_select_page(map, &reg, 1);
+ if (ret < 0)
+ return ret;
+
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map->dev, reg, 1);
@@ -781,6 +1063,10 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8 *u8 = map->work_buf;
int ret;
+ ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
+ if (ret < 0)
+ return ret;
+
map->format.format_reg(map->work_buf, reg, map->reg_shift);
/*
@@ -824,6 +1110,12 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
if (ret == 0) {
*val = map->format.parse_val(map->work_buf);
+
+#ifdef LOG_DEVICE
+ if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ dev_info(map->dev, "%x => %x\n", reg, *val);
+#endif
+
trace_regmap_reg_read(map->dev, reg, *val);
}
@@ -980,11 +1272,9 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int ret;
unsigned int tmp, orig;
- map->lock(map);
-
ret = _regmap_read(map, reg, &orig);
if (ret != 0)
- goto out;
+ return ret;
tmp = orig & ~mask;
tmp |= val & mask;
@@ -996,9 +1286,6 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
*change = false;
}
-out:
- map->unlock(map);
-
return ret;
}
@@ -1016,7 +1303,13 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
bool change;
- return _regmap_update_bits(map, reg, mask, val, &change);
+ int ret;
+
+ map->lock(map);
+ ret = _regmap_update_bits(map, reg, mask, val, &change);
+ map->unlock(map);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits);
@@ -1036,7 +1329,12 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change)
{
- return _regmap_update_bits(map, reg, mask, val, change);
+ int ret;
+
+ map->lock(map);
+ ret = _regmap_update_bits(map, reg, mask, val, change);
+ map->unlock(map);
+ return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index ba29b2e..72b5e72 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -42,7 +42,7 @@ struct device *soc_device_to_device(struct soc_device *soc_dev)
return &soc_dev->dev;
}
-static mode_t soc_attribute_mode(struct kobject *kobj,
+static umode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index fb7c80f..06b3207a 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -46,6 +46,25 @@ config BCMA_DRIVER_MIPS
If unsure, say N
+config BCMA_SFLASH
+ bool
+ depends on BCMA_DRIVER_MIPS && BROKEN
+ default y
+
+config BCMA_NFLASH
+ bool
+ depends on BCMA_DRIVER_MIPS && BROKEN
+ default y
+
+config BCMA_DRIVER_GMAC_CMN
+ bool "BCMA Broadcom GBIT MAC COMMON core driver"
+ depends on BCMA
+ help
+ Driver for the Broadcom GBIT MAC COMMON core attached to Broadcom
+ specific Advanced Microcontroller Bus.
+
+ If unsure, say N
+
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 82de24e..8ad42d4 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,8 +1,11 @@
bcma-y += main.o scan.o core.o sprom.o
bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
+bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o
+bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o
bcma-y += driver_pci.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
+bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
obj-$(CONFIG_BCMA) += bcma.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index b81755b..3cf9cc9 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -10,6 +10,15 @@
#define BCMA_CORE_SIZE 0x1000
+#define bcma_err(bus, fmt, ...) \
+ pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_warn(bus, fmt, ...) \
+ pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_info(bus, fmt, ...) \
+ pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_debug(bus, fmt, ...) \
+ pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+
struct bcma_bus;
/* main.c */
@@ -42,6 +51,28 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
+#ifdef CONFIG_BCMA_SFLASH
+/* driver_chipcommon_sflash.c */
+int bcma_sflash_init(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "Serial flash not supported\n");
+ return 0;
+}
+#endif /* CONFIG_BCMA_SFLASH */
+
+#ifdef CONFIG_BCMA_NFLASH
+/* driver_chipcommon_nflash.c */
+int bcma_nflash_init(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "NAND flash not supported\n");
+ return 0;
+}
+#endif /* CONFIG_BCMA_NFLASH */
+
#ifdef CONFIG_BCMA_HOST_PCI
/* host_pci.c */
extern int __init bcma_host_pci_init(void);
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index bc6e892..63c8b47 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -75,7 +75,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
udelay(10);
}
if (i)
- pr_err("HT force timeout\n");
+ bcma_err(core->bus, "HT force timeout\n");
break;
case BCMA_CLKMODE_DYNAMIC:
bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
@@ -102,9 +102,9 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
udelay(10);
}
if (i)
- pr_err("PLL enable timeout\n");
+ bcma_err(core->bus, "PLL enable timeout\n");
} else {
- pr_warn("Disabling PLL not supported yet!\n");
+ bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
}
}
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
@@ -120,8 +120,8 @@ u32 bcma_core_dma_translation(struct bcma_device *core)
else
return BCMA_DMA_TRANSLATION_DMA32_CMT;
default:
- pr_err("DMA translation unknown for host %d\n",
- core->bus->hosttype);
+ bcma_err(core->bus, "DMA translation unknown for host %d\n",
+ core->bus->hosttype);
}
return BCMA_DMA_TRANSLATION_NONE;
}
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index e9f1b3f..a4c3ebc 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -44,7 +44,7 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_init(cc);
if (cc->capabilities & BCMA_CC_CAP_PCTL)
- pr_err("Power control not implemented!\n");
+ bcma_err(cc->core->bus, "Power control not implemented!\n");
if (cc->core->id.rev >= 16) {
if (cc->core->bus->sprom.leddc_on_time &&
@@ -137,8 +137,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
| BCMA_CC_CORECTL_UARTCLKEN);
}
} else {
- pr_err("serial not supported on this device ccrev: 0x%x\n",
- ccrev);
+ bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev);
return;
}
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
new file mode 100644
index 0000000..574d624
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -0,0 +1,19 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon NAND flash interface
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_driver_chipcommon.h>
+#include <linux/delay.h>
+
+#include "bcma_private.h"
+
+/* Initialize NAND flash access */
+int bcma_nflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "NAND flash support is broken\n");
+ return 0;
+}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index a058842..4432617 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -3,7 +3,8 @@
* ChipCommon Power Management Unit driver
*
* Copyright 2009, Michael Buesch <m@bues.ch>
- * Copyright 2007, Broadcom Corporation
+ * Copyright 2007, 2011, Broadcom Corporation
+ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -54,39 +55,19 @@ void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
}
EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
-static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
-{
- struct bcma_bus *bus = cc->core->bus;
-
- switch (bus->chipinfo.id) {
- case 0x4313:
- case 0x4331:
- case 43224:
- case 43225:
- break;
- default:
- pr_err("PLL init unknown for device 0x%04X\n",
- bus->chipinfo.id);
- }
-}
-
static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
u32 min_msk = 0, max_msk = 0;
switch (bus->chipinfo.id) {
- case 0x4313:
+ case BCMA_CHIP_ID_BCM4313:
min_msk = 0x200D;
max_msk = 0xFFFF;
break;
- case 0x4331:
- case 43224:
- case 43225:
- break;
default:
- pr_err("PMU resource config unknown for device 0x%04X\n",
- bus->chipinfo.id);
+ bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
+ bus->chipinfo.id);
}
/* Set the resource masks. */
@@ -94,22 +75,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
if (max_msk)
bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
-}
-
-void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
-{
- struct bcma_bus *bus = cc->core->bus;
- switch (bus->chipinfo.id) {
- case 0x4313:
- case 0x4331:
- case 43224:
- case 43225:
- break;
- default:
- pr_err("PMU switch/regulators init unknown for device "
- "0x%04X\n", bus->chipinfo.id);
- }
+ /* Add some delay; allow resources to come up and settle. */
+ mdelay(2);
}
/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
@@ -123,8 +91,11 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
val |= BCMA_CHIPCTL_4331_EXTPA_EN;
if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+ else if (bus->chipinfo.rev > 0)
+ val |= BCMA_CHIPCTL_4331_EXTPA_EN2;
} else {
val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
+ val &= ~BCMA_CHIPCTL_4331_EXTPA_EN2;
val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
}
bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
@@ -135,26 +106,38 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
- case 0x4313:
- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
+ case BCMA_CHIP_ID_BCM4313:
+ /* enable 12 mA drive strenth for 4313 and set chipControl
+ register bit 1 */
+ bcma_chipco_chipctl_maskset(cc, 0,
+ BCMA_CCTRL_4313_12MA_LED_DRIVE,
+ BCMA_CCTRL_4313_12MA_LED_DRIVE);
break;
- case 0x4331:
- /* BCM4331 workaround is SPROM-related, we put it in sprom.c */
+ case BCMA_CHIP_ID_BCM4331:
+ case BCMA_CHIP_ID_BCM43431:
+ /* Ext PA lines must be enabled for tx on BCM4331 */
+ bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
break;
- case 43224:
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43421:
+ /* enable 12 mA drive strenth for 43224 and set chipControl
+ register bit 15 */
if (bus->chipinfo.rev == 0) {
- pr_err("Workarounds for 43224 rev 0 not fully "
- "implemented\n");
- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0);
+ bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
+ BCMA_CCTRL_43224_GPIO_TOGGLE,
+ BCMA_CCTRL_43224_GPIO_TOGGLE);
+ bcma_chipco_chipctl_maskset(cc, 0,
+ BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
+ BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
} else {
- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
+ bcma_chipco_chipctl_maskset(cc, 0,
+ BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
+ BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
}
break;
- case 43225:
- break;
default:
- pr_err("Workarounds unknown for device 0x%04X\n",
- bus->chipinfo.id);
+ bcma_debug(bus, "Workarounds unknown or not needed for device 0x%04X\n",
+ bus->chipinfo.id);
}
}
@@ -165,8 +148,8 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
- pr_debug("Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
- pmucap);
+ bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
+ cc->pmu.rev, pmucap);
if (cc->pmu.rev == 1)
bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
@@ -175,12 +158,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
BCMA_CC_PMU_CTL_NOILPONW);
- if (cc->core->id.id == 0x4329 && cc->core->id.rev == 2)
- pr_err("Fix for 4329b0 bad LPOM state not implemented!\n");
-
- bcma_pmu_pll_init(cc);
bcma_pmu_resources_init(cc);
- bcma_pmu_swreg_init(cc);
bcma_pmu_workarounds(cc);
}
@@ -189,23 +167,22 @@ u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
- case 0x4716:
- case 0x4748:
- case 47162:
- case 0x4313:
- case 0x5357:
- case 0x4749:
- case 53572:
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM4748:
+ case BCMA_CHIP_ID_BCM47162:
+ case BCMA_CHIP_ID_BCM4313:
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ case BCMA_CHIP_ID_BCM53572:
/* always 20Mhz */
return 20000 * 1000;
- case 0x5356:
- case 0x5300:
+ case BCMA_CHIP_ID_BCM5356:
+ case BCMA_CHIP_ID_BCM4706:
/* always 25Mhz */
return 25000 * 1000;
default:
- pr_warn("No ALP clock specified for %04X device, "
- "pmu rev. %d, using default %d Hz\n",
- bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
+ bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
+ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
}
return BCMA_CC_PMU_ALP_CLOCK;
}
@@ -222,7 +199,8 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
BUG_ON(!m || m > 4);
- if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) {
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) {
/* Detect failure in clock setting */
tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
if (tmp & 0x40000)
@@ -248,33 +226,62 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
return (fc / div) * 1000000;
}
+static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
+{
+ u32 tmp, ndiv, p1div, p2div;
+ u32 clock;
+
+ BUG_ON(!m || m > 4);
+
+ /* Get N, P1 and P2 dividers to determine CPU clock */
+ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PMU6_4706_PROCPLL_OFF);
+ ndiv = (tmp & BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK)
+ >> BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT;
+ p1div = (tmp & BCMA_CC_PMU6_4706_PROC_P1DIV_MASK)
+ >> BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT;
+ p2div = (tmp & BCMA_CC_PMU6_4706_PROC_P2DIV_MASK)
+ >> BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT;
+
+ tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+ if (tmp & BCMA_CC_CHIPST_4706_PKG_OPTION)
+ /* Low cost bonding: Fixed reference clock 25MHz and m = 4 */
+ clock = (25000000 / 4) * ndiv * p2div / p1div;
+ else
+ /* Fixed reference clock 25MHz and m = 2 */
+ clock = (25000000 / 2) * ndiv * p2div / p1div;
+
+ if (m == BCMA_CC_PMU5_MAINPLL_SSB)
+ clock = clock / 4;
+
+ return clock;
+}
+
/* query bus clock frequency for PMU-enabled chipcommon */
u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
- case 0x4716:
- case 0x4748:
- case 47162:
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM4748:
+ case BCMA_CHIP_ID_BCM47162:
return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
- case 0x5356:
+ case BCMA_CHIP_ID_BCM5356:
return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
- case 0x5357:
- case 0x4749:
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
- case 0x5300:
- return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
- BCMA_CC_PMU5_MAINPLL_SSB);
- case 53572:
+ case BCMA_CHIP_ID_BCM4706:
+ return bcma_pmu_clock_bcm4706(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case BCMA_CHIP_ID_BCM53572:
return 75000000;
default:
- pr_warn("No backplane clock specified for %04X device, "
- "pmu rev. %d, using default %d Hz\n",
- bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
+ bcma_warn(bus, "No backplane clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
+ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
}
return BCMA_CC_PMU_HT_CLOCK;
}
@@ -284,17 +291,21 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
- if (bus->chipinfo.id == 53572)
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
return 300000000;
if (cc->pmu.rev >= 5) {
u32 pll;
switch (bus->chipinfo.id) {
- case 0x5356:
+ case BCMA_CHIP_ID_BCM4706:
+ return bcma_pmu_clock_bcm4706(cc,
+ BCMA_CC_PMU4706_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_CPU);
+ case BCMA_CHIP_ID_BCM5356:
pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
break;
- case 0x5357:
- case 0x4749:
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
break;
default:
@@ -302,10 +313,188 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
break;
}
- /* TODO: if (bus->chipinfo.id == 0x5300)
- return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */
return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
}
return bcma_pmu_get_clockcontrol(cc);
}
+
+static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
+ u32 value)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+}
+
+void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
+{
+ u32 tmp = 0;
+ u8 phypll_offset = 0;
+ u8 bcm5357_bcm43236_p1div[] = {0x1, 0x5, 0x5};
+ u8 bcm5357_bcm43236_ndiv[] = {0x30, 0xf6, 0xfc};
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ case BCMA_CHIP_ID_BCM53572:
+ /* 5357[ab]0, 43236[ab]0, and 6362b0 */
+
+ /* BCM5357 needs to touch PLL1_PLLCTL[02],
+ so offset PLL0_PLLCTL[02] by 6 */
+ phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
+
+ /* RMW only the P1 divider */
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+ BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
+ tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
+ tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+
+ /* RMW only the int feedback divider */
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+ BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
+ tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
+ tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+
+ tmp = 1 << 10;
+ break;
+
+ case BCMA_CHIP_ID_BCM4331:
+ case BCMA_CHIP_ID_BCM43431:
+ if (spuravoid == 2) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11500014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x0FC00a08);
+ } else if (spuravoid == 1) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11500014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x0F600a08);
+ } else {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11100014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03000a08);
+ }
+ tmp = 1 << 10;
+ break;
+
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43225:
+ case BCMA_CHIP_ID_BCM43421:
+ if (spuravoid == 1) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11500010);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x000C0C06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x0F600a08);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x2001E920);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ } else {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11100010);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x000c0c06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03000a08);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x200005c0);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ }
+ tmp = 1 << 10;
+ break;
+
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM4748:
+ case BCMA_CHIP_ID_BCM47162:
+ if (spuravoid == 1) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11500060);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x080C0C06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x0F600000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x2001E924);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ } else {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11100060);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x080c0c06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x200005c0);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ }
+
+ tmp = 3 << 9;
+ break;
+
+ case BCMA_CHIP_ID_BCM43227:
+ case BCMA_CHIP_ID_BCM43228:
+ case BCMA_CHIP_ID_BCM43428:
+ /* LCNXN */
+ /* PLL Settings for spur avoidance on/off mode,
+ no on2 support for 43228A0 */
+ if (spuravoid == 1) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x01100014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x040C0C06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03140A08);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00333333);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x202C2820);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ } else {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11100014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x040c0c06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03000a08);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x200005c0);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ }
+ tmp = 1 << 10;
+ break;
+ default:
+ bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
+ bus->chipinfo.id);
+ break;
+ }
+
+ tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
+ bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
+}
+EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
new file mode 100644
index 0000000..6e157a5
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -0,0 +1,19 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon serial flash interface
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_driver_chipcommon.h>
+#include <linux/delay.h>
+
+#include "bcma_private.h"
+
+/* Initialize serial flash access */
+int bcma_sflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "Serial flash support is broken\n");
+ return 0;
+}
diff --git a/drivers/bcma/driver_gmac_cmn.c b/drivers/bcma/driver_gmac_cmn.c
new file mode 100644
index 0000000..834225f
--- /dev/null
+++ b/drivers/bcma/driver_gmac_cmn.c
@@ -0,0 +1,14 @@
+/*
+ * Broadcom specific AMBA
+ * GBIT MAC COMMON Core
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/bcma/bcma.h>
+
+void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+{
+ mutex_init(&gc->phy_mutex);
+}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index c3e9dff..b013b04 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -22,15 +22,15 @@
/* The 47162a0 hangs when reading MIPS DMP registers registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
- return dev->bus->chipinfo.id == 47162 && dev->bus->chipinfo.rev == 0 &&
- dev->id.id == BCMA_CORE_MIPS_74K;
+ return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
+ dev->bus->chipinfo.rev == 0 && dev->id.id == BCMA_CORE_MIPS_74K;
}
/* The 5357b0 hangs when reading USB20H DMP registers */
static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
{
- return (dev->bus->chipinfo.id == 0x5357 ||
- dev->bus->chipinfo.id == 0x4749) &&
+ return (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+ dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) &&
dev->bus->chipinfo.pkg == 11 &&
dev->id.id == BCMA_CORE_USB20_HOST;
}
@@ -143,8 +143,8 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
1 << irqflag);
}
- pr_info("set_irq: core 0x%04x, irq %d => %d\n",
- dev->id.id, oldirq + 2, irq + 2);
+ bcma_info(bus, "set_irq: core 0x%04x, irq %d => %d\n",
+ dev->id.id, oldirq + 2, irq + 2);
}
static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
@@ -173,7 +173,7 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
return bcma_pmu_get_clockcpu(&bus->drv_cc);
- pr_err("No PMU available, need this to get the cpu clock\n");
+ bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
return 0;
}
EXPORT_SYMBOL(bcma_cpu_clock);
@@ -185,10 +185,11 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
case BCMA_CC_FLASHT_ATSER:
- pr_err("Serial flash not supported.\n");
+ bcma_debug(bus, "Found serial flash\n");
+ bcma_sflash_init(&bus->drv_cc);
break;
case BCMA_CC_FLASHT_PARA:
- pr_info("found parallel flash.\n");
+ bcma_debug(bus, "Found parallel flash\n");
bus->drv_cc.pflash.window = 0x1c000000;
bus->drv_cc.pflash.window_size = 0x02000000;
@@ -199,7 +200,15 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
bus->drv_cc.pflash.buswidth = 2;
break;
default:
- pr_err("flash not supported.\n");
+ bcma_err(bus, "Flash type not supported\n");
+ }
+
+ if (bus->drv_cc.core->id.rev == 38 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ if (bus->drv_cc.capabilities & BCMA_CC_CAP_NFLASH) {
+ bcma_debug(bus, "Found NAND flash\n");
+ bcma_nflash_init(&bus->drv_cc);
+ }
}
}
@@ -209,7 +218,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
struct bcma_device *core;
bus = mcore->core->bus;
- pr_info("Initializing MIPS core...\n");
+ bcma_info(bus, "Initializing MIPS core...\n");
if (!mcore->setup_done)
mcore->assigned_irqs = 1;
@@ -244,7 +253,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
break;
}
}
- pr_info("IRQ reconfiguration done\n");
+ bcma_info(bus, "IRQ reconfiguration done\n");
bcma_core_mips_dump_irq(bus);
if (mcore->setup_done)
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 9a96f14..c32ebd5 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
bool enable)
{
- struct pci_dev *pdev = pc->core->bus->host_pci;
+ struct pci_dev *pdev;
u32 coremask, tmp;
int err = 0;
- if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
+ if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
/* This bcma device is not on a PCI host-bus. So the IRQs are
* not routed through the PCI core.
* So we must not enable routing through the PCI core. */
goto out;
}
+ pdev = pc->core->bus->host_pci;
+
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
if (err)
goto out;
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index b9a86ed..cbae2c2 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -36,7 +36,7 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
return false;
if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
- pr_info("This PCI core is disabled and not working\n");
+ bcma_info(bus, "This PCI core is disabled and not working\n");
return false;
}
@@ -215,7 +215,8 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
} else {
writel(val, mmio);
- if (chipid == 0x4716 || chipid == 0x4748)
+ if (chipid == BCMA_CHIP_ID_BCM4716 ||
+ chipid == BCMA_CHIP_ID_BCM4748)
readl(mmio);
}
@@ -340,6 +341,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
*/
static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
{
+ struct bcma_bus *bus = pc->core->bus;
u8 cap_ptr, root_ctrl, root_cap, dev;
u16 val16;
int i;
@@ -378,7 +380,8 @@ static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
udelay(10);
}
if (val16 == 0x1)
- pr_err("PCI: Broken device in slot %d\n", dev);
+ bcma_err(bus, "PCI: Broken device in slot %d\n",
+ dev);
}
}
}
@@ -391,11 +394,11 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
u32 pci_membase_1G;
unsigned long io_map_base;
- pr_info("PCIEcore in host mode found\n");
+ bcma_info(bus, "PCIEcore in host mode found\n");
pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
if (!pc_host) {
- pr_err("can not allocate memory");
+ bcma_err(bus, "can not allocate memory");
return;
}
@@ -434,13 +437,14 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
* as mips can't generate 64-bit address on the
* backplane.
*/
- if (bus->chipinfo.id == 0x4716 || bus->chipinfo.id == 0x4748) {
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
- } else if (bus->chipinfo.id == 0x5300) {
+ } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 6c05cf4..11b32d2 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -18,7 +18,7 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
core->wrap);
core->bus->mapped_core = core;
- pr_debug("Switched to core: 0x%X\n", core->id.id);
+ bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
}
/* Provides access to the requested core. Returns base offset that has to be
@@ -188,7 +188,7 @@ static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
/* SSB needed additional powering up, do we have any AMBA PCI cards? */
if (!pci_is_pcie(dev))
- pr_err("PCI card detected, report problems.\n");
+ bcma_err(bus, "PCI card detected, report problems.\n");
/* Map MMIO */
err = -ENOMEM;
@@ -268,6 +268,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7e138ec..758af9c 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -61,6 +61,13 @@ static struct bus_type bcma_bus_type = {
.dev_attrs = bcma_device_attrs,
};
+static u16 bcma_cc_core_id(struct bcma_bus *bus)
+{
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
+ return BCMA_CORE_4706_CHIPCOMMON;
+ return BCMA_CORE_CHIPCOMMON;
+}
+
struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
{
struct bcma_device *core;
@@ -91,10 +98,12 @@ static int bcma_register_cores(struct bcma_bus *bus)
list_for_each_entry(core, &bus->cores, list) {
/* We support that cores ourself */
switch (core->id.id) {
+ case BCMA_CORE_4706_CHIPCOMMON:
case BCMA_CORE_CHIPCOMMON:
case BCMA_CORE_PCI:
case BCMA_CORE_PCIE:
case BCMA_CORE_MIPS_74K:
+ case BCMA_CORE_4706_MAC_GBIT_COMMON:
continue;
}
@@ -118,8 +127,9 @@ static int bcma_register_cores(struct bcma_bus *bus)
err = device_register(&core->dev);
if (err) {
- pr_err("Could not register dev for core 0x%03X\n",
- core->id.id);
+ bcma_err(bus,
+ "Could not register dev for core 0x%03X\n",
+ core->id.id);
continue;
}
core->dev_registered = true;
@@ -151,12 +161,12 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
- pr_err("Failed to scan: %d\n", err);
+ bcma_err(bus, "Failed to scan: %d\n", err);
return -1;
}
/* Init CC core */
- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_init(&bus->drv_cc);
@@ -176,17 +186,24 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_init(&bus->drv_pci);
}
+ /* Init GBIT MAC COMMON core */
+ core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
+ if (core) {
+ bus->drv_gmac_cmn.core = core;
+ bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
+ }
+
/* Try to get SPROM */
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
- pr_err("No SPROM available\n");
+ bcma_err(bus, "No SPROM available\n");
} else if (err)
- pr_err("Failed to get SPROM: %d\n", err);
+ bcma_err(bus, "Failed to get SPROM: %d\n", err);
/* Register found cores */
bcma_register_cores(bus);
- pr_info("Bus registered\n");
+ bcma_info(bus, "Bus registered\n");
return 0;
}
@@ -207,14 +224,14 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
bcma_init_bus(bus);
match.manuf = BCMA_MANUF_BCM;
- match.id = BCMA_CORE_CHIPCOMMON;
+ match.id = bcma_cc_core_id(bus);
match.class = BCMA_CL_SIM;
match.rev = BCMA_ANY_REV;
/* Scan for chip common core */
err = bcma_bus_scan_early(bus, &match, core_cc);
if (err) {
- pr_err("Failed to scan for common core: %d\n", err);
+ bcma_err(bus, "Failed to scan for common core: %d\n", err);
return -1;
}
@@ -226,12 +243,12 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
/* Scan for mips core */
err = bcma_bus_scan_early(bus, &match, core_mips);
if (err) {
- pr_err("Failed to scan for mips core: %d\n", err);
+ bcma_err(bus, "Failed to scan for mips core: %d\n", err);
return -1;
}
/* Init CC core */
- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_init(&bus->drv_cc);
@@ -244,7 +261,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
bcma_core_mips_init(&bus->drv_mips);
}
- pr_info("Early bus registered\n");
+ bcma_info(bus, "Early bus registered\n");
return 0;
}
@@ -270,8 +287,7 @@ int bcma_bus_resume(struct bcma_bus *bus)
struct bcma_device *core;
/* Init CC core */
- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
- if (core) {
+ if (bus->drv_cc.core) {
bus->drv_cc.setup_done = false;
bcma_core_chipcommon_init(&bus->drv_cc);
}
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 5ed0718..5672b13 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -21,6 +21,7 @@ struct bcma_device_id_name {
};
static const struct bcma_device_id_name bcma_arm_device_names[] = {
+ { BCMA_CORE_4706_MAC_GBIT_COMMON, "BCM4706 GBit MAC Common" },
{ BCMA_CORE_ARM_1176, "ARM 1176" },
{ BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
{ BCMA_CORE_ARM_CM3, "ARM CM3" },
@@ -28,6 +29,11 @@ static const struct bcma_device_id_name bcma_arm_device_names[] = {
static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_OOB_ROUTER, "OOB Router" },
+ { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
+ { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
+ { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
+ { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
+ { BCMA_CORE_ALTA, "ALTA (I2S)" },
{ BCMA_CORE_INVALID, "Invalid" },
{ BCMA_CORE_CHIPCOMMON, "ChipCommon" },
{ BCMA_CORE_ILINE20, "ILine 20" },
@@ -289,11 +295,15 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
/* check if component is a core at all */
if (wrappers[0] + wrappers[1] == 0) {
- /* we could save addrl of the router
- if (cid == BCMA_CORE_OOB_ROUTER)
- */
- bcma_erom_skip_component(bus, eromptr);
- return -ENXIO;
+ /* Some specific cores don't need wrappers */
+ switch (core->id.id) {
+ case BCMA_CORE_4706_MAC_GBIT_COMMON:
+ /* Not used yet: case BCMA_CORE_OOB_ROUTER: */
+ break;
+ default:
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENXIO;
+ }
}
if (bcma_erom_is_bridge(bus, eromptr)) {
@@ -334,7 +344,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
if (tmp <= 0) {
return -EILSEQ;
} else {
- pr_info("Bridge found\n");
+ bcma_info(bus, "Bridge found\n");
return -ENXIO;
}
}
@@ -421,8 +431,8 @@ void bcma_init_bus(struct bcma_bus *bus)
chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
- pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
- chipinfo->id, chipinfo->rev, chipinfo->pkg);
+ bcma_info(bus, "Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
+ chipinfo->id, chipinfo->rev, chipinfo->pkg);
bus->init_done = true;
}
@@ -476,13 +486,12 @@ int bcma_bus_scan(struct bcma_bus *bus)
other_core = bcma_find_core_reverse(bus, core->id.id);
core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
- pr_info("Core %d found: %s "
- "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
- core->core_index, bcma_device_name(&core->id),
- core->id.manuf, core->id.id, core->id.rev,
- core->id.class);
+ bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+ core->core_index, bcma_device_name(&core->id),
+ core->id.manuf, core->id.id, core->id.rev,
+ core->id.class);
- list_add(&core->list, &bus->cores);
+ list_add_tail(&core->list, &bus->cores);
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
@@ -532,13 +541,12 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
core->core_index = core_num++;
bus->nr_cores++;
- pr_info("Core %d found: %s "
- "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
- core->core_index, bcma_device_name(&core->id),
- core->id.manuf, core->id.id, core->id.rev,
- core->id.class);
+ bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+ core->core_index, bcma_device_name(&core->id),
+ core->id.manuf, core->id.id, core->id.rev,
+ core->id.class);
- list_add(&core->list, &bus->cores);
+ list_add_tail(&core->list, &bus->cores);
err = 0;
break;
}
diff --git a/drivers/bcma/scan.h b/drivers/bcma/scan.h
index 113e6a6..30eb475 100644
--- a/drivers/bcma/scan.h
+++ b/drivers/bcma/scan.h
@@ -27,7 +27,7 @@
#define SCAN_CIB_NMW 0x0007C000
#define SCAN_CIB_NMW_SHIFT 14
#define SCAN_CIB_NSW 0x00F80000
-#define SCAN_CIB_NSW_SHIFT 17
+#define SCAN_CIB_NSW_SHIFT 19
#define SCAN_CIB_REV 0xFF000000
#define SCAN_CIB_REV_SHIFT 24
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index c7f9335..26823d9 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -60,11 +60,11 @@ static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
if (err)
goto fail;
- pr_debug("Using SPROM revision %d provided by"
- " platform.\n", bus->sprom.revision);
+ bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
+ bus->sprom.revision);
return 0;
fail:
- pr_warn("Using fallback SPROM failed (err %d)\n", err);
+ bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
return err;
}
@@ -468,11 +468,11 @@ static bool bcma_sprom_ext_available(struct bcma_bus *bus)
/* older chipcommon revisions use chip status register */
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
- case 0x4313:
+ case BCMA_CHIP_ID_BCM4313:
present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
break;
- case 0x4331:
+ case BCMA_CHIP_ID_BCM4331:
present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
break;
@@ -494,16 +494,16 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
- case 0x4313:
+ case BCMA_CHIP_ID_BCM4313:
present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
break;
- case 0x4331:
+ case BCMA_CHIP_ID_BCM4331:
present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
break;
- case 43224:
- case 43225:
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43225:
/* for these chips OTP is always available */
present = true;
break;
@@ -579,13 +579,15 @@ int bcma_sprom_get(struct bcma_bus *bus)
if (!sprom)
return -ENOMEM;
- if (bus->chipinfo.id == 0x4331)
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
- pr_debug("SPROM offset 0x%x\n", offset);
+ bcma_debug(bus, "SPROM offset 0x%x\n", offset);
bcma_sprom_read(bus, offset, sprom);
- if (bus->chipinfo.id == 0x4331)
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
err = bcma_sprom_valid(sprom);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index cf0e63d..e54e31b 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -65,39 +65,80 @@ struct drbd_atodb_wait {
int w_al_write_transaction(struct drbd_conf *, struct drbd_work *, int);
+void *drbd_md_get_buffer(struct drbd_conf *mdev)
+{
+ int r;
+
+ wait_event(mdev->misc_wait,
+ (r = atomic_cmpxchg(&mdev->md_io_in_use, 0, 1)) == 0 ||
+ mdev->state.disk <= D_FAILED);
+
+ return r ? NULL : page_address(mdev->md_io_page);
+}
+
+void drbd_md_put_buffer(struct drbd_conf *mdev)
+{
+ if (atomic_dec_and_test(&mdev->md_io_in_use))
+ wake_up(&mdev->misc_wait);
+}
+
+static bool md_io_allowed(struct drbd_conf *mdev)
+{
+ enum drbd_disk_state ds = mdev->state.disk;
+ return ds >= D_NEGOTIATING || ds == D_ATTACHING;
+}
+
+void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ unsigned int *done)
+{
+ long dt = bdev->dc.disk_timeout * HZ / 10;
+ if (dt == 0)
+ dt = MAX_SCHEDULE_TIMEOUT;
+
+ dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
+ if (dt == 0)
+ dev_err(DEV, "meta-data IO operation timed out\n");
+}
+
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev,
struct page *page, sector_t sector,
int rw, int size)
{
struct bio *bio;
- struct drbd_md_io md_io;
int ok;
- md_io.mdev = mdev;
- init_completion(&md_io.event);
- md_io.error = 0;
+ mdev->md_io.done = 0;
+ mdev->md_io.error = -ENODEV;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
ok = (bio_add_page(bio, page, size, 0) == size);
if (!ok)
goto out;
- bio->bi_private = &md_io;
+ bio->bi_private = &mdev->md_io;
bio->bi_end_io = drbd_md_io_complete;
bio->bi_rw = rw;
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* Corresponding put_ldev in drbd_md_io_complete() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n");
+ ok = 0;
+ goto out;
+ }
+
+ bio_get(bio); /* one bio_put() is in the completion handler */
+ atomic_inc(&mdev->md_io_in_use); /* drbd_md_put_buffer() is in the completion handler */
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_for_completion(&md_io.event);
- ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
+ wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
+ ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
out:
bio_put(bio);
@@ -111,7 +152,7 @@ int drbd_md_sync_page_io(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
int offset = 0;
struct page *iop = mdev->md_io_page;
- D_ASSERT(mutex_is_locked(&mdev->md_io_mutex));
+ D_ASSERT(atomic_read(&mdev->md_io_in_use) == 1);
BUG_ON(!bdev->md_bdev);
@@ -328,8 +369,13 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
return 1;
}
- mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
- buffer = (struct al_transaction *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev); /* protects md_io_buffer, al_tr_cycle, ... */
+ if (!buffer) {
+ dev_err(DEV, "disk failed while waiting for md_io buffer\n");
+ complete(&((struct update_al_work *)w)->event);
+ put_ldev(mdev);
+ return 1;
+ }
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
buffer->tr_number = cpu_to_be32(mdev->al_tr_number);
@@ -374,7 +420,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
D_ASSERT(mdev->al_tr_pos < MD_AL_MAX_SIZE);
mdev->al_tr_number++;
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
@@ -443,8 +489,9 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
/* lock out all other meta data io for now,
* and make sure the page is mapped.
*/
- mutex_lock(&mdev->md_io_mutex);
- buffer = page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ return 0;
/* Find the valid transaction in the log */
for (i = 0; i <= mx; i++) {
@@ -452,7 +499,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (rv == 0)
continue;
if (rv == -1) {
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 0;
}
cnr = be32_to_cpu(buffer->tr_number);
@@ -478,7 +525,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!found_valid) {
dev_warn(DEV, "No usable activity log found.\n");
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 1;
}
@@ -493,7 +540,7 @@ int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
rv = drbd_al_read_tr(mdev, bdev, buffer, i);
ERR_IF(rv == 0) goto cancel;
if (rv == -1) {
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
return 0;
}
@@ -534,7 +581,7 @@ cancel:
mdev->al_tr_pos = 0;
/* ok, we are done with it */
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
dev_info(DEV, "Found %d transactions (%d active extents) in activity log.\n",
transactions, active_extents);
@@ -671,16 +718,20 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
else
ext->rs_failed += count;
if (ext->rs_left < ext->rs_failed) {
- dev_err(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
- "rs_failed=%d count=%d\n",
+ dev_warn(DEV, "BAD! sector=%llus enr=%u rs_left=%d "
+ "rs_failed=%d count=%d cstate=%s\n",
(unsigned long long)sector,
ext->lce.lc_number, ext->rs_left,
- ext->rs_failed, count);
- dump_stack();
-
- lc_put(mdev->resync, &ext->lce);
- drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
- return;
+ ext->rs_failed, count,
+ drbd_conn_str(mdev->state.conn));
+
+ /* We don't expect to be able to clear more bits
+ * than have been set when we originally counted
+ * the set bits to cache that value in ext->rs_left.
+ * Whatever the reason (disconnect during resync,
+ * delayed local completion of an application write),
+ * try to fix it up by recounting here. */
+ ext->rs_left = drbd_bm_e_weight(mdev, enr);
}
} else {
/* Normally this element should be in the cache,
@@ -1192,6 +1243,7 @@ int drbd_rs_del_all(struct drbd_conf *mdev)
put_ldev(mdev);
}
spin_unlock_irq(&mdev->al_lock);
+ wake_up(&mdev->al_wait);
return 0;
}
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 3030201..fcb956b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -205,7 +205,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev)
static void bm_store_page_idx(struct page *page, unsigned long idx)
{
BUG_ON(0 != (idx & ~BM_PAGE_IDX_MASK));
- page_private(page) |= idx;
+ set_page_private(page, idx);
}
static unsigned long bm_page_to_idx(struct page *page)
@@ -886,12 +886,21 @@ void drbd_bm_clear_all(struct drbd_conf *mdev)
struct bm_aio_ctx {
struct drbd_conf *mdev;
atomic_t in_flight;
- struct completion done;
+ unsigned int done;
unsigned flags;
#define BM_AIO_COPY_PAGES 1
int error;
+ struct kref kref;
};
+static void bm_aio_ctx_destroy(struct kref *kref)
+{
+ struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);
+
+ put_ldev(ctx->mdev);
+ kfree(ctx);
+}
+
/* bv_page may be a copy, or may be the original */
static void bm_async_io_complete(struct bio *bio, int error)
{
@@ -930,20 +939,21 @@ static void bm_async_io_complete(struct bio *bio, int error)
bm_page_unlock_io(mdev, idx);
- /* FIXME give back to page pool */
if (ctx->flags & BM_AIO_COPY_PAGES)
- put_page(bio->bi_io_vec[0].bv_page);
+ mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
bio_put(bio);
- if (atomic_dec_and_test(&ctx->in_flight))
- complete(&ctx->done);
+ if (atomic_dec_and_test(&ctx->in_flight)) {
+ ctx->done = 1;
+ wake_up(&mdev->misc_wait);
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+ }
}
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
- /* we are process context. we always get a bio */
- struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+ struct bio *bio = bio_alloc_drbd(GFP_NOIO);
struct drbd_conf *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
struct page *page;
@@ -966,10 +976,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) {
- /* FIXME alloc_page is good enough for now, but actually needs
- * to use pre-allocated page pool */
void *src, *dest;
- page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
+ page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT);
dest = kmap_atomic(page);
src = kmap_atomic(b->bm_pages[page_nr]);
memcpy(dest, src, PAGE_SIZE);
@@ -981,6 +989,8 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio->bi_bdev = mdev->ldev->md_bdev;
bio->bi_sector = on_disk_sector;
+ /* bio_add_page of a single page to an empty bio will always succeed,
+ * according to api. Do we want to assert that? */
bio_add_page(bio, page, len, 0);
bio->bi_private = ctx;
bio->bi_end_io = bm_async_io_complete;
@@ -999,14 +1009,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
/*
* bm_rw: read/write the whole bitmap from/to its on disk location.
*/
-static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_idx) __must_hold(local)
+static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
- struct bm_aio_ctx ctx = {
- .mdev = mdev,
- .in_flight = ATOMIC_INIT(1),
- .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
- .flags = lazy_writeout_upper_idx ? BM_AIO_COPY_PAGES : 0,
- };
+ struct bm_aio_ctx *ctx;
struct drbd_bitmap *b = mdev->bitmap;
int num_pages, i, count = 0;
unsigned long now;
@@ -1021,7 +1026,27 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
* For lazy writeout, we don't care for ongoing changes to the bitmap,
* as we submit copies of pages anyways.
*/
- if (!ctx.flags)
+
+ ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+ if (!ctx)
+ return -ENOMEM;
+
+ *ctx = (struct bm_aio_ctx) {
+ .mdev = mdev,
+ .in_flight = ATOMIC_INIT(1),
+ .done = 0,
+ .flags = flags,
+ .error = 0,
+ .kref = { ATOMIC_INIT(2) },
+ };
+
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
+ kfree(ctx);
+ return -ENODEV;
+ }
+
+ if (!ctx->flags)
WARN_ON(!(BM_LOCKED_MASK & b->bm_flags));
num_pages = b->bm_number_of_pages;
@@ -1046,29 +1071,38 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
continue;
}
}
- atomic_inc(&ctx.in_flight);
- bm_page_io_async(&ctx, i, rw);
+ atomic_inc(&ctx->in_flight);
+ bm_page_io_async(ctx, i, rw);
++count;
cond_resched();
}
/*
- * We initialize ctx.in_flight to one to make sure bm_async_io_complete
- * will not complete() early, and decrement / test it here. If there
+ * We initialize ctx->in_flight to one to make sure bm_async_io_complete
+ * will not set ctx->done early, and decrement / test it here. If there
* are still some bios in flight, we need to wait for them here.
+ * If all IO is done already (or nothing had been submitted), there is
+ * no need to wait. Still, we need to put the kref associated with the
+ * "in_flight reached zero, all done" event.
*/
- if (!atomic_dec_and_test(&ctx.in_flight))
- wait_for_completion(&ctx.done);
+ if (!atomic_dec_and_test(&ctx->in_flight))
+ wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ else
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+
dev_info(DEV, "bitmap %s of %u pages took %lu jiffies\n",
rw == WRITE ? "WRITE" : "READ",
count, jiffies - now);
- if (ctx.error) {
+ if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(mdev, 1, true);
- err = -EIO; /* ctx.error ? */
+ err = -EIO; /* ctx->error ? */
}
+ if (atomic_read(&ctx->in_flight))
+ err = -EIO; /* Disk failed during IO... */
+
now = jiffies;
if (rw == WRITE) {
drbd_md_flush(mdev);
@@ -1082,6 +1116,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n",
ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now);
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
return err;
}
@@ -1091,7 +1126,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned lazy_writeout_upper_id
*/
int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, READ, 0);
+ return bm_rw(mdev, READ, 0, 0);
}
/**
@@ -1102,7 +1137,7 @@ int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local)
*/
int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
{
- return bm_rw(mdev, WRITE, 0);
+ return bm_rw(mdev, WRITE, 0, 0);
}
/**
@@ -1112,7 +1147,23 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local)
*/
int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(local)
{
- return bm_rw(mdev, WRITE, upper_idx);
+ return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, upper_idx);
+}
+
+/**
+ * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
+ * @mdev: DRBD device.
+ *
+ * Will only write pages that have changed since last IO.
+ * In contrast to drbd_bm_write(), this will copy the bitmap pages
+ * to temporary writeout pages. It is intended to trigger a full write-out
+ * while still allowing the bitmap to change, for example if a resync or online
+ * verify is aborted due to a failed peer disk, while local IO continues, or
+ * pending resync acks are still being processed.
+ */
+int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local)
+{
+ return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
}
@@ -1130,28 +1181,45 @@ int drbd_bm_write_lazy(struct drbd_conf *mdev, unsigned upper_idx) __must_hold(l
*/
int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local)
{
- struct bm_aio_ctx ctx = {
+ struct bm_aio_ctx *ctx;
+ int err;
+
+ if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
+ dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
+ return 0;
+ }
+
+ ctx = kmalloc(sizeof(struct bm_aio_ctx), GFP_NOIO);
+ if (!ctx)
+ return -ENOMEM;
+
+ *ctx = (struct bm_aio_ctx) {
.mdev = mdev,
.in_flight = ATOMIC_INIT(1),
- .done = COMPLETION_INITIALIZER_ONSTACK(ctx.done),
+ .done = 0,
.flags = BM_AIO_COPY_PAGES,
+ .error = 0,
+ .kref = { ATOMIC_INIT(2) },
};
- if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
- dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
- return 0;
+ if (!get_ldev_if_state(mdev, D_ATTACHING)) { /* put is in bm_aio_ctx_destroy() */
+ dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
+ kfree(ctx);
+ return -ENODEV;
}
- bm_page_io_async(&ctx, idx, WRITE_SYNC);
- wait_for_completion(&ctx.done);
+ bm_page_io_async(ctx, idx, WRITE_SYNC);
+ wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
- if (ctx.error)
+ if (ctx->error)
drbd_chk_io_error(mdev, 1, true);
/* that should force detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
- return ctx.error;
+ err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
+ kref_put(&ctx->kref, &bm_aio_ctx_destroy);
+ return err;
}
/* NOTE
@@ -1407,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
first_word = 0;
spin_lock_irq(&b->bm_lock);
}
-
/* last page (respectively only page, for first page == last page) */
last_word = MLPP(el >> LN2_BPL);
- bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+ /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+ * ==> e = 32767, el = 32768, last_page = 2,
+ * and now last_word = 0.
+ * We do not want to touch last_page in this case,
+ * as we did not allocate it, it is not present in bitmap->bm_pages.
+ */
+ if (last_word)
+ bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
/* possibly trailing bits.
* example: (e & 63) == 63, el will be e+1.
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 8d68056..02f013a 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -712,7 +712,6 @@ struct drbd_request {
struct list_head tl_requests; /* ring list in the transfer log */
struct bio *master_bio; /* master bio pointer */
unsigned long rq_state; /* see comments above _req_mod() */
- int seq_num;
unsigned long start_time;
};
@@ -851,6 +850,7 @@ enum {
NEW_CUR_UUID, /* Create new current UUID when thawing IO */
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
+ STATE_SENT, /* Do not change state/UUIDs while this is set */
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -862,31 +862,30 @@ enum bm_flag {
BM_P_VMALLOCED = 0x10000, /* internal use only, will be masked out */
/* currently locked for bulk operation */
- BM_LOCKED_MASK = 0x7,
+ BM_LOCKED_MASK = 0xf,
/* in detail, that is: */
BM_DONT_CLEAR = 0x1,
BM_DONT_SET = 0x2,
BM_DONT_TEST = 0x4,
+ /* so we can mark it locked for bulk operation,
+ * and still allow all non-bulk operations */
+ BM_IS_LOCKED = 0x8,
+
/* (test bit, count bit) allowed (common case) */
- BM_LOCKED_TEST_ALLOWED = 0x3,
+ BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
/* testing bits, as well as setting new bits allowed, but clearing bits
* would be unexpected. Used during bitmap receive. Setting new bits
* requires sending of "out-of-sync" information, though. */
- BM_LOCKED_SET_ALLOWED = 0x1,
+ BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
- /* clear is not expected while bitmap is locked for bulk operation */
+ /* for drbd_bm_write_copy_pages, everything is allowed,
+ * only concurrent bulk operations are locked out. */
+ BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
};
-
-/* TODO sort members for performance
- * MAYBE group them further */
-
-/* THINK maybe we actually want to use the default "event/%s" worker threads
- * or similar in linux 2.6, which uses per cpu data and threads.
- */
struct drbd_work_queue {
struct list_head q;
struct semaphore s; /* producers up it, worker down()s it */
@@ -938,8 +937,7 @@ struct drbd_backing_dev {
};
struct drbd_md_io {
- struct drbd_conf *mdev;
- struct completion event;
+ unsigned int done;
int error;
};
@@ -1022,6 +1020,7 @@ struct drbd_conf {
struct drbd_tl_epoch *newest_tle;
struct drbd_tl_epoch *oldest_tle;
struct list_head out_of_sequence_requests;
+ struct list_head barrier_acked_requests;
struct hlist_head *tl_hash;
unsigned int tl_hash_s;
@@ -1056,6 +1055,8 @@ struct drbd_conf {
struct crypto_hash *csums_tfm;
struct crypto_hash *verify_tfm;
+ unsigned long last_reattach_jif;
+ unsigned long last_reconnect_jif;
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread asender;
@@ -1094,7 +1095,8 @@ struct drbd_conf {
wait_queue_head_t ee_wait;
struct page *md_io_page; /* one page buffer for md_io */
struct page *md_io_tmpp; /* for logical_block_size != 512 */
- struct mutex md_io_mutex; /* protects the md_io_buffer */
+ struct drbd_md_io md_io;
+ atomic_t md_io_in_use; /* protects the md_io, md_io_page and md_io_tmpp */
spinlock_t al_lock;
wait_queue_head_t al_wait;
struct lru_cache *act_log; /* activity log */
@@ -1228,8 +1230,8 @@ extern int drbd_send_uuids(struct drbd_conf *mdev);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev);
extern int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev);
extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags);
-extern int _drbd_send_state(struct drbd_conf *mdev);
-extern int drbd_send_state(struct drbd_conf *mdev);
+extern int drbd_send_state(struct drbd_conf *mdev, union drbd_state s);
+extern int drbd_send_current_state(struct drbd_conf *mdev);
extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
enum drbd_packets cmd, struct p_header80 *h,
size_t size, unsigned msg_flags);
@@ -1461,6 +1463,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr);
extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local);
extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local);
extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local);
+extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local);
extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev,
unsigned long al_enr);
extern size_t drbd_bm_words(struct drbd_conf *mdev);
@@ -1493,11 +1496,38 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
extern mempool_t *drbd_request_mempool;
extern mempool_t *drbd_ee_mempool;
-extern struct page *drbd_pp_pool; /* drbd's page pool */
+/* drbd's page pool, used to buffer data received from the peer,
+ * or data requested by the peer.
+ *
+ * This does not have an emergency reserve.
+ *
+ * When allocating from this pool, it first takes pages from the pool.
+ * Only if the pool is depleted will try to allocate from the system.
+ *
+ * The assumption is that pages taken from this pool will be processed,
+ * and given back, "quickly", and then can be recycled, so we can avoid
+ * frequent calls to alloc_page(), and still will be able to make progress even
+ * under memory pressure.
+ */
+extern struct page *drbd_pp_pool;
extern spinlock_t drbd_pp_lock;
extern int drbd_pp_vacant;
extern wait_queue_head_t drbd_pp_wait;
+/* We also need a standard (emergency-reserve backed) page pool
+ * for meta data IO (activity log, bitmap).
+ * We can keep it global, as long as it is used as "N pages at a time".
+ * 128 should be plenty, currently we probably can get away with as few as 1.
+ */
+#define DRBD_MIN_POOL_PAGES 128
+extern mempool_t *drbd_md_io_page_pool;
+
+/* We also need to make sure we get a bio
+ * when we need it for housekeeping purposes */
+extern struct bio_set *drbd_md_io_bio_set;
+/* to allocate from that set */
+extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
+
extern rwlock_t global_state_lock;
extern struct drbd_conf *drbd_new_device(unsigned int minor);
@@ -1536,8 +1566,12 @@ extern void resume_next_sg(struct drbd_conf *mdev);
extern void suspend_other_sg(struct drbd_conf *mdev);
extern int drbd_resync_finished(struct drbd_conf *mdev);
/* maybe rather drbd_main.c ? */
+extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
+extern void drbd_md_put_buffer(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
- struct drbd_backing_dev *bdev, sector_t sector, int rw);
+ struct drbd_backing_dev *bdev, sector_t sector, int rw);
+extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+ unsigned int *done);
extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
@@ -1754,19 +1788,6 @@ static inline struct page *page_chain_next(struct page *page)
#define page_chain_for_each_safe(page, n) \
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
-static inline int drbd_bio_has_active_page(struct bio *bio)
-{
- struct bio_vec *bvec;
- int i;
-
- __bio_for_each_segment(bvec, bio, i, 0) {
- if (page_count(bvec->bv_page) > 1)
- return 1;
- }
-
- return 0;
-}
-
static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
{
struct page *page = e->pages;
@@ -1777,7 +1798,6 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
return 0;
}
-
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
@@ -2230,7 +2250,7 @@ static inline void drbd_get_syncer_progress(struct drbd_conf *mdev,
* Note: currently we don't support such large bitmaps on 32bit
* arch anyways, but no harm done to be prepared for it here.
*/
- unsigned int shift = mdev->rs_total >= (1ULL << 32) ? 16 : 10;
+ unsigned int shift = mdev->rs_total > UINT_MAX ? 16 : 10;
unsigned long left = *bits_left >> shift;
unsigned long total = 1UL + (mdev->rs_total >> shift);
unsigned long tmp = 1000UL - left * 1000UL/total;
@@ -2306,12 +2326,12 @@ static inline int drbd_state_is_stable(struct drbd_conf *mdev)
case D_OUTDATED:
case D_CONSISTENT:
case D_UP_TO_DATE:
+ case D_FAILED:
/* disk state is stable as well. */
break;
/* no new io accepted during tansitional states */
case D_ATTACHING:
- case D_FAILED:
case D_NEGOTIATING:
case D_UNKNOWN:
case D_MASK:
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 211fc44..920ede2 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -139,6 +139,8 @@ struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
mempool_t *drbd_request_mempool;
mempool_t *drbd_ee_mempool;
+mempool_t *drbd_md_io_page_pool;
+struct bio_set *drbd_md_io_bio_set;
/* I do not use a standard mempool, because:
1) I want to hand out the pre-allocated objects first.
@@ -159,7 +161,24 @@ static const struct block_device_operations drbd_ops = {
.release = drbd_release,
};
-#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
+static void bio_destructor_drbd(struct bio *bio)
+{
+ bio_free(bio, drbd_md_io_bio_set);
+}
+
+struct bio *bio_alloc_drbd(gfp_t gfp_mask)
+{
+ struct bio *bio;
+
+ if (!drbd_md_io_bio_set)
+ return bio_alloc(gfp_mask, 1);
+
+ bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
+ if (!bio)
+ return NULL;
+ bio->bi_destructor = bio_destructor_drbd;
+ return bio;
+}
#ifdef __CHECKER__
/* When checking with sparse, and this is an inline function, sparse will
@@ -208,6 +227,7 @@ static int tl_init(struct drbd_conf *mdev)
mdev->oldest_tle = b;
mdev->newest_tle = b;
INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
+ INIT_LIST_HEAD(&mdev->barrier_acked_requests);
mdev->tl_hash = NULL;
mdev->tl_hash_s = 0;
@@ -246,9 +266,7 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
new->n_writes = 0;
newest_before = mdev->newest_tle;
- /* never send a barrier number == 0, because that is special-cased
- * when using TCQ for our write ordering code */
- new->br_number = (newest_before->br_number+1) ?: 1;
+ new->br_number = newest_before->br_number+1;
if (mdev->newest_tle != new) {
mdev->newest_tle->next = new;
mdev->newest_tle = new;
@@ -311,7 +329,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
These have been list_move'd to the out_of_sequence_requests list in
_req_mod(, barrier_acked) above.
*/
- list_del_init(&b->requests);
+ list_splice_init(&b->requests, &mdev->barrier_acked_requests);
nob = b->next;
if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
@@ -411,6 +429,23 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
b = tmp;
list_splice(&carry_reads, &b->requests);
}
+
+ /* Actions operating on the disk state, also want to work on
+ requests that got barrier acked. */
+ switch (what) {
+ case fail_frozen_disk_io:
+ case restart_frozen_disk_io:
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ _req_mod(req, what);
+ }
+
+ case connection_lost_while_pending:
+ case resend:
+ break;
+ default:
+ dev_err(DEV, "what = %d in _tl_restart()\n", what);
+ }
}
@@ -458,6 +493,38 @@ void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
}
/**
+ * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
+ * @mdev: DRBD device.
+ */
+void tl_abort_disk_io(struct drbd_conf *mdev)
+{
+ struct drbd_tl_epoch *b;
+ struct list_head *le, *tle;
+ struct drbd_request *req;
+
+ spin_lock_irq(&mdev->req_lock);
+ b = mdev->oldest_tle;
+ while (b) {
+ list_for_each_safe(le, tle, &b->requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
+ _req_mod(req, abort_disk_io);
+ }
+ b = b->next;
+ }
+
+ list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
+ req = list_entry(le, struct drbd_request, tl_requests);
+ if (!(req->rq_state & RQ_LOCAL_PENDING))
+ continue;
+ _req_mod(req, abort_disk_io);
+ }
+
+ spin_unlock_irq(&mdev->req_lock);
+}
+
+/**
* cl_wide_st_chg() - true if the state change is a cluster wide one
* @mdev: DRBD device.
* @os: old (current) state.
@@ -470,7 +537,7 @@ static int cl_wide_st_chg(struct drbd_conf *mdev,
((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
(os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
(os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
- (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
+ (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
(os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
(os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
}
@@ -509,8 +576,16 @@ static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
union drbd_state,
union drbd_state);
+enum sanitize_state_warnings {
+ NO_WARNING,
+ ABORTED_ONLINE_VERIFY,
+ ABORTED_RESYNC,
+ CONNECTION_LOST_NEGOTIATING,
+ IMPLICITLY_UPGRADED_DISK,
+ IMPLICITLY_UPGRADED_PDSK,
+};
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, const char **warn_sync_abort);
+ union drbd_state ns, enum sanitize_state_warnings *warn);
int drbd_send_state_req(struct drbd_conf *,
union drbd_state, union drbd_state);
@@ -785,6 +860,13 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
rv = SS_IN_TRANSIENT_STATE;
+ /* While establishing a connection only allow cstate to change.
+ Delay/refuse role changes, detach attach etc... */
+ if (test_bit(STATE_SENT, &mdev->flags) &&
+ !(os.conn == C_WF_REPORT_PARAMS ||
+ (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
+ rv = SS_IN_TRANSIENT_STATE;
+
if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
rv = SS_NEED_CONNECTION;
@@ -803,6 +885,21 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
+static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
+{
+ static const char *msg_table[] = {
+ [NO_WARNING] = "",
+ [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
+ [ABORTED_RESYNC] = "Resync aborted.",
+ [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
+ [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
+ [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
+ };
+
+ if (warn != NO_WARNING)
+ dev_warn(DEV, "%s\n", msg_table[warn]);
+}
+
/**
* sanitize_state() - Resolves implicitly necessary additional changes to a state transition
* @mdev: DRBD device.
@@ -814,11 +911,14 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
* to D_UNKNOWN. This rule and many more along those lines are in this function.
*/
static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
- union drbd_state ns, const char **warn_sync_abort)
+ union drbd_state ns, enum sanitize_state_warnings *warn)
{
enum drbd_fencing_p fp;
enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
+ if (warn)
+ *warn = NO_WARNING;
+
fp = FP_DONT_CARE;
if (get_ldev(mdev)) {
fp = mdev->ldev->dc.fencing;
@@ -833,18 +933,13 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
* If you try to go into some Sync* state, that shall fail (elsewhere). */
if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
- ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
+ ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
ns.conn = os.conn;
/* we cannot fail (again) if we already detached */
if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
ns.disk = D_DISKLESS;
- /* if we are only D_ATTACHING yet,
- * we can (and should) go directly to D_DISKLESS. */
- if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
- ns.disk = D_DISKLESS;
-
/* After C_DISCONNECTING only C_STANDALONE may follow */
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn;
@@ -863,10 +958,9 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
/* Abort resync if a disk fails/detaches */
if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
(ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
- if (warn_sync_abort)
- *warn_sync_abort =
- os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
- "Online-verify" : "Resync";
+ if (warn)
+ *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
+ ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
ns.conn = C_CONNECTED;
}
@@ -877,7 +971,8 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.disk = mdev->new_state_tmp.disk;
ns.pdsk = mdev->new_state_tmp.pdsk;
} else {
- dev_alert(DEV, "Connection lost while negotiating, no data!\n");
+ if (warn)
+ *warn = CONNECTION_LOST_NEGOTIATING;
ns.disk = D_DISKLESS;
ns.pdsk = D_UNKNOWN;
}
@@ -959,16 +1054,16 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.disk = disk_max;
if (ns.disk < disk_min) {
- dev_warn(DEV, "Implicitly set disk from %s to %s\n",
- drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_DISK;
ns.disk = disk_min;
}
if (ns.pdsk > pdsk_max)
ns.pdsk = pdsk_max;
if (ns.pdsk < pdsk_min) {
- dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
- drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
+ if (warn)
+ *warn = IMPLICITLY_UPGRADED_PDSK;
ns.pdsk = pdsk_min;
}
@@ -1045,12 +1140,12 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
{
union drbd_state os;
enum drbd_state_rv rv = SS_SUCCESS;
- const char *warn_sync_abort = NULL;
+ enum sanitize_state_warnings ssw;
struct after_state_chg_work *ascw;
os = mdev->state;
- ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
+ ns = sanitize_state(mdev, os, ns, &ssw);
if (ns.i == os.i)
return SS_NOTHING_TO_DO;
@@ -1076,8 +1171,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
return rv;
}
- if (warn_sync_abort)
- dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
+ print_sanitize_warnings(mdev, ssw);
{
char *pbp, pb[300];
@@ -1243,7 +1337,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
drbd_thread_stop_nowait(&mdev->receiver);
/* Upon network failure, we need to restart the receiver. */
- if (os.conn > C_TEAR_DOWN &&
+ if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
drbd_thread_restart_nowait(&mdev->receiver);
@@ -1251,6 +1345,15 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
drbd_resume_al(mdev);
+ /* remember last connect and attach times so request_timer_fn() won't
+ * kill newly established sessions while we are still trying to thaw
+ * previously frozen IO */
+ if (os.conn != C_WF_REPORT_PARAMS && ns.conn == C_WF_REPORT_PARAMS)
+ mdev->last_reconnect_jif = jiffies;
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
+ mdev->last_reattach_jif = jiffies;
+
ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
if (ascw) {
ascw->os = os;
@@ -1354,12 +1457,16 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Here we have the actions that are performed after a
state change. This function might sleep */
+ if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+
nsm.i = -1;
if (ns.susp_nod) {
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
what = resend;
- if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
+ if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
+ ns.disk > D_NEGOTIATING)
what = restart_frozen_disk_io;
if (what != nothing)
@@ -1408,7 +1515,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
}
/* No point in queuing send_bitmap if we don't have a connection
* anymore, so check also the _current_ state, not only the new state
@@ -1441,11 +1548,11 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
}
if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
- if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
+ if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
+ mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
drbd_uuid_new_current(mdev);
drbd_send_uuids(mdev);
}
-
/* D_DISKLESS Peer becomes secondary */
if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
/* We may still be Primary ourselves.
@@ -1473,14 +1580,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
drbd_send_sizes(mdev, 0, 0); /* to start sync... */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
}
/* We want to pause/continue resync, tell peer. */
if (ns.conn >= C_CONNECTED &&
((os.aftr_isp != ns.aftr_isp) ||
(os.user_isp != ns.user_isp)))
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
/* In case one of the isp bits got set, suspend other devices. */
if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
@@ -1490,10 +1597,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Make sure the peer gets informed about eventual state
changes (ISP bits) while we were in WFReportParams. */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
/* We are in the progress to start a full sync... */
if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
@@ -1513,33 +1620,38 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* first half of local IO error, failure to attach,
* or administrative detach */
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh;
- int was_io_error;
+ enum drbd_io_error_p eh = EP_PASS_ON;
+ int was_io_error = 0;
/* corresponding get_ldev was in __drbd_set_state, to serialize
- * our cleanup here with the transition to D_DISKLESS,
- * so it is safe to dreference ldev here. */
- eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
-
- /* current state still has to be D_FAILED,
- * there is only one way out: to D_DISKLESS,
- * and that may only happen after our put_ldev below. */
- if (mdev->state.disk != D_FAILED)
- dev_err(DEV,
- "ASSERT FAILED: disk is %s during detach\n",
- drbd_disk_str(mdev->state.disk));
-
- if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I am detaching my disk\n");
- else
- dev_err(DEV, "Sending state for detaching disk failed\n");
-
- drbd_rs_cancel_all(mdev);
-
- /* In case we want to get something to stable storage still,
- * this may be the last chance.
- * Following put_ldev may transition to D_DISKLESS. */
- drbd_md_sync(mdev);
+ * our cleanup here with the transition to D_DISKLESS.
+ * But is is still not save to dreference ldev here, since
+ * we might come from an failed Attach before ldev was set. */
+ if (mdev->ldev) {
+ eh = mdev->ldev->dc.on_io_error;
+ was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+ /* Immediately allow completion of all application IO, that waits
+ for completion from the local disk. */
+ tl_abort_disk_io(mdev);
+
+ /* current state still has to be D_FAILED,
+ * there is only one way out: to D_DISKLESS,
+ * and that may only happen after our put_ldev below. */
+ if (mdev->state.disk != D_FAILED)
+ dev_err(DEV,
+ "ASSERT FAILED: disk is %s during detach\n",
+ drbd_disk_str(mdev->state.disk));
+
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
+ drbd_rs_cancel_all(mdev);
+
+ /* In case we want to get something to stable storage still,
+ * this may be the last chance.
+ * Following put_ldev may transition to D_DISKLESS. */
+ drbd_md_sync(mdev);
+ }
put_ldev(mdev);
if (was_io_error && eh == EP_CALL_HELPER)
@@ -1561,16 +1673,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
- if (drbd_send_state(mdev))
- dev_warn(DEV, "Notified peer that I'm now diskless.\n");
+ if (ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
+
/* corresponding get_ldev in __drbd_set_state
* this may finally trigger drbd_ldev_destroy. */
put_ldev(mdev);
}
/* Notify peer that I had a local IO error, and did not detached.. */
- if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
- drbd_send_state(mdev);
+ if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
+ drbd_send_state(mdev, ns);
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
@@ -1588,7 +1701,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* sync target done with resync. Explicitly notify peer, even though
* it should (at least for non-empty resyncs) already know itself. */
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
- drbd_send_state(mdev);
+ drbd_send_state(mdev, ns);
+
+ /* Wake up role changes, that were delayed because of connection establishing */
+ if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
+ clear_bit(STATE_SENT, &mdev->flags);
+ wake_up(&mdev->state_wait);
+ }
/* This triggers bitmap writeout of potentially still unwritten pages
* if the resync finished cleanly, or aborted because of peer disk
@@ -1598,8 +1717,8 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* No harm done if some bits change during this phase.
*/
if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
- drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
- "write from resync_finished", BM_LOCKED_SET_ALLOWED);
+ drbd_queue_bitmap_io(mdev, &drbd_bm_write_copy_pages, NULL,
+ "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED);
put_ldev(mdev);
}
@@ -2057,7 +2176,11 @@ int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
- uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
+ uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ if (uuid && uuid != UUID_JUST_CREATED)
+ uuid = uuid + UUID_NEW_BM_OFFSET;
+ else
+ get_random_bytes(&uuid, sizeof(u64));
drbd_uuid_set(mdev, UI_BITMAP, uuid);
drbd_print_uuids(mdev, "updated sync UUID");
drbd_md_sync(mdev);
@@ -2089,6 +2212,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
}
+ /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
+ if (mdev->agreed_pro_version <= 94)
+ max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+
p.d_size = cpu_to_be64(d_size);
p.u_size = cpu_to_be64(u_size);
p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
@@ -2102,10 +2229,10 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
}
/**
- * drbd_send_state() - Sends the drbd state to the peer
+ * drbd_send_current_state() - Sends the drbd state to the peer
* @mdev: DRBD device.
*/
-int drbd_send_state(struct drbd_conf *mdev)
+int drbd_send_current_state(struct drbd_conf *mdev)
{
struct socket *sock;
struct p_state p;
@@ -2131,6 +2258,37 @@ int drbd_send_state(struct drbd_conf *mdev)
return ok;
}
+/**
+ * drbd_send_state() - After a state change, sends the new state to the peer
+ * @mdev: DRBD device.
+ * @state: the state to send, not necessarily the current state.
+ *
+ * Each state change queues an "after_state_ch" work, which will eventually
+ * send the resulting new state to the peer. If more state changes happen
+ * between queuing and processing of the after_state_ch work, we still
+ * want to send each intermediary state in the order it occurred.
+ */
+int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
+{
+ struct socket *sock;
+ struct p_state p;
+ int ok = 0;
+
+ mutex_lock(&mdev->data.mutex);
+
+ p.state = cpu_to_be32(state.i);
+ sock = mdev->data.socket;
+
+ if (likely(sock != NULL)) {
+ ok = _drbd_send_cmd(mdev, sock, P_STATE,
+ (struct p_header80 *)&p, sizeof(p), 0);
+ }
+
+ mutex_unlock(&mdev->data.mutex);
+
+ return ok;
+}
+
int drbd_send_state_req(struct drbd_conf *mdev,
union drbd_state mask, union drbd_state val)
{
@@ -2615,7 +2773,7 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
if (!_drbd_no_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2629,7 +2787,7 @@ static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
struct bio_vec *bvec;
int i;
/* hint all but last page with MSG_MORE */
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
if (!_drbd_send_page(mdev, bvec->bv_page,
bvec->bv_offset, bvec->bv_len,
i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
@@ -2695,8 +2853,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
p.sector = cpu_to_be64(req->sector);
p.block_id = (unsigned long)req;
- p.seq_num = cpu_to_be32(req->seq_num =
- atomic_add_return(1, &mdev->packet_seq));
+ p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
@@ -2987,8 +3144,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
atomic_set(&mdev->rs_sect_in, 0);
atomic_set(&mdev->rs_sect_ev, 0);
atomic_set(&mdev->ap_in_flight, 0);
+ atomic_set(&mdev->md_io_in_use, 0);
- mutex_init(&mdev->md_io_mutex);
mutex_init(&mdev->data.mutex);
mutex_init(&mdev->meta.mutex);
sema_init(&mdev->data.work.s, 0);
@@ -3126,6 +3283,10 @@ static void drbd_destroy_mempools(void)
/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
+ if (drbd_md_io_bio_set)
+ bioset_free(drbd_md_io_bio_set);
+ if (drbd_md_io_page_pool)
+ mempool_destroy(drbd_md_io_page_pool);
if (drbd_ee_mempool)
mempool_destroy(drbd_ee_mempool);
if (drbd_request_mempool)
@@ -3139,6 +3300,8 @@ static void drbd_destroy_mempools(void)
if (drbd_al_ext_cache)
kmem_cache_destroy(drbd_al_ext_cache);
+ drbd_md_io_bio_set = NULL;
+ drbd_md_io_page_pool = NULL;
drbd_ee_mempool = NULL;
drbd_request_mempool = NULL;
drbd_ee_cache = NULL;
@@ -3162,6 +3325,8 @@ static int drbd_create_mempools(void)
drbd_bm_ext_cache = NULL;
drbd_al_ext_cache = NULL;
drbd_pp_pool = NULL;
+ drbd_md_io_page_pool = NULL;
+ drbd_md_io_bio_set = NULL;
/* caches */
drbd_request_cache = kmem_cache_create(
@@ -3185,6 +3350,16 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
+#ifdef COMPAT_HAVE_BIOSET_CREATE
+ drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
+ if (drbd_md_io_bio_set == NULL)
+ goto Enomem;
+#endif
+
+ drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
+ if (drbd_md_io_page_pool == NULL)
+ goto Enomem;
+
drbd_request_mempool = mempool_create(number,
mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
if (drbd_request_mempool == NULL)
@@ -3262,6 +3437,8 @@ static void drbd_delete_device(unsigned int minor)
if (!mdev)
return;
+ del_timer_sync(&mdev->request_timer);
+
/* paranoia asserts */
if (mdev->open_cnt != 0)
dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
@@ -3666,8 +3843,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!get_ldev_if_state(mdev, D_FAILED))
return;
- mutex_lock(&mdev->md_io_mutex);
- buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
+
memset(buffer, 0, 512);
buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
@@ -3698,7 +3877,8 @@ void drbd_md_sync(struct drbd_conf *mdev)
* since we updated it on metadata. */
mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
+out:
put_ldev(mdev);
}
@@ -3718,8 +3898,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
if (!get_ldev_if_state(mdev, D_ATTACHING))
return ERR_IO_MD_DISK;
- mutex_lock(&mdev->md_io_mutex);
- buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
+ buffer = drbd_md_get_buffer(mdev);
+ if (!buffer)
+ goto out;
if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
/* NOTE: can't do normal error processing here as this is
@@ -3780,7 +3961,8 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
mdev->sync_conf.al_extents = 127;
err:
- mutex_unlock(&mdev->md_io_mutex);
+ drbd_md_put_buffer(mdev);
+ out:
put_ldev(mdev);
return rv;
@@ -4183,12 +4365,11 @@ const char *drbd_buildtag(void)
static char buildtag[38] = "\0uilt-in";
if (buildtag[0] == 0) {
-#ifdef CONFIG_MODULES
- if (THIS_MODULE != NULL)
- sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
- else
+#ifdef MODULE
+ sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
+#else
+ buildtag[0] = 'b';
#endif
- buildtag[0] = 'b';
}
return buildtag;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 946166e..6d4de6a 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -289,7 +289,7 @@ static int _try_outdate_peer_async(void *data)
*/
spin_lock_irq(&mdev->req_lock);
ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS) {
+ if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
@@ -432,7 +432,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
/* if this was forced, we should consider sync */
if (forced)
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
drbd_md_sync(mdev);
@@ -845,9 +845,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
- if (mdev->agreed_pro_version < 94)
- peer = mdev->peer_max_bio_size;
- else if (mdev->agreed_pro_version == 94)
+ if (mdev->agreed_pro_version < 94) {
+ peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
+ } else if (mdev->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
else /* drbd 8.3.8 onwards */
peer = DRBD_MAX_BIO_SIZE;
@@ -1032,7 +1033,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
(unsigned long long) drbd_get_max_capacity(nbc),
(unsigned long long) nbc->dc.disk_size);
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1046,7 +1047,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
}
if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
- retcode = ERR_MD_DISK_TO_SMALL;
+ retcode = ERR_MD_DISK_TOO_SMALL;
dev_warn(DEV, "refusing attach: md-device too small, "
"at least %llu sectors needed for this meta-disk type\n",
(unsigned long long) min_md_device_sectors);
@@ -1057,7 +1058,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* (we may currently be R_PRIMARY with no local disk...) */
if (drbd_get_max_capacity(nbc) <
drbd_get_capacity(mdev->this_bdev)) {
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto fail;
}
@@ -1138,7 +1139,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
- retcode = ERR_DISK_TO_SMALL;
+ retcode = ERR_DISK_TOO_SMALL;
goto force_diskless_dec;
}
@@ -1336,17 +1337,34 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
{
enum drbd_ret_code retcode;
int ret;
+ struct detach dt = {};
+
+ if (!detach_from_tags(mdev, nlp->tag_list, &dt)) {
+ reply->ret_code = ERR_MANDATORY_TAG;
+ goto out;
+ }
+
+ if (dt.detach_force) {
+ drbd_force_state(mdev, NS(disk, D_FAILED));
+ reply->ret_code = SS_SUCCESS;
+ goto out;
+ }
+
drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+ drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+ drbd_md_put_buffer(mdev);
/* D_FAILED will transition to DISKLESS. */
ret = wait_event_interruptible(mdev->misc_wait,
mdev->state.disk != D_FAILED);
drbd_resume_io(mdev);
+
if ((int)retcode == (int)SS_IS_DISKLESS)
retcode = SS_NOTHING_TO_DO;
if (ret)
retcode = ERR_INTR;
reply->ret_code = retcode;
+out:
return 0;
}
@@ -1711,7 +1729,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
if (rs.no_resync && mdev->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
- goto fail;
+ goto fail_ldev;
}
if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
@@ -1738,6 +1756,10 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
fail:
reply->ret_code = retcode;
return 0;
+
+ fail_ldev:
+ put_ldev(mdev);
+ goto fail;
}
static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
@@ -1941,6 +1963,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -1959,6 +1982,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
}
+ drbd_resume_io(mdev);
reply->ret_code = retcode;
return 0;
@@ -1980,6 +2004,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
/* If there is still bitmap IO pending, probably because of a previous
* resync just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -1998,6 +2023,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
} else
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
}
+ drbd_resume_io(mdev);
reply->ret_code = retcode;
return 0;
@@ -2170,11 +2196,13 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
/* w_make_ov_request expects position to be aligned */
mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+ drbd_resume_io(mdev);
return 0;
}
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 2959cdf..869bada 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -52,7 +52,7 @@ void seq_printf_with_thousands_grouping(struct seq_file *seq, long v)
if (unlikely(v >= 1000000)) {
/* cool: > GiByte/s */
seq_printf(seq, "%ld,", v / 1000000);
- v /= 1000000;
+ v %= 1000000;
seq_printf(seq, "%03ld,%03ld", v/1000, v % 1000);
} else if (likely(v >= 1000))
seq_printf(seq, "%ld,%03ld", v/1000, v % 1000);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 436f519..ea4836e 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -466,6 +466,7 @@ static int drbd_accept(struct drbd_conf *mdev, const char **what,
goto out;
}
(*newsock)->ops = sock->ops;
+ __module_get((*newsock)->ops->owner);
out:
return err;
@@ -750,6 +751,7 @@ static int drbd_connect(struct drbd_conf *mdev)
{
struct socket *s, *sock, *msock;
int try, h, ok;
+ enum drbd_state_rv rv;
D_ASSERT(!mdev->data.socket);
@@ -888,25 +890,32 @@ retry:
}
}
- if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
- return 0;
-
sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
atomic_set(&mdev->packet_seq, 0);
mdev->peer_seq = 0;
- drbd_thread_start(&mdev->asender);
-
if (drbd_send_protocol(mdev) == -1)
return -1;
+ set_bit(STATE_SENT, &mdev->flags);
drbd_send_sync_param(mdev, &mdev->sync_conf);
drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
clear_bit(USE_DEGR_WFC_T, &mdev->flags);
clear_bit(RESIZE_PENDING, &mdev->flags);
+
+ spin_lock_irq(&mdev->req_lock);
+ rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
+ if (mdev->state.conn != C_WF_REPORT_PARAMS)
+ clear_bit(STATE_SENT, &mdev->flags);
+ spin_unlock_irq(&mdev->req_lock);
+
+ if (rv < SS_SUCCESS)
+ return 0;
+
+ drbd_thread_start(&mdev->asender);
mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
return 1;
@@ -957,7 +966,7 @@ static void drbd_flush(struct drbd_conf *mdev)
rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
NULL);
if (rv) {
- dev_err(DEV, "local disk flush failed with status %d\n", rv);
+ dev_info(DEV, "local disk flush failed with status %d\n", rv);
/* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0
* if (rv == -EOPNOTSUPP) */
@@ -1001,13 +1010,14 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
if (epoch_size != 0 &&
atomic_read(&epoch->active) == 0 &&
- test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
+ (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
if (!(ev & EV_CLEANUP)) {
spin_unlock(&mdev->epoch_lock);
drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
spin_lock(&mdev->epoch_lock);
}
- dec_unacked(mdev);
+ if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
+ dec_unacked(mdev);
if (mdev->current_epoch != epoch) {
next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
@@ -1096,7 +1106,11 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
/* In most cases, we will only need one bio. But in case the lower
* level restrictions happen to be different at this offset on this
* side than those of the sending peer, we may need to submit the
- * request in more than one bio. */
+ * request in more than one bio.
+ *
+ * Plain bio_alloc is good enough here, this is no DRBD internally
+ * generated bio, but a bio allocated on behalf of the peer.
+ */
next_bio:
bio = bio_alloc(GFP_NOIO, nr_pages);
if (!bio) {
@@ -1583,6 +1597,24 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u
return ok;
}
+static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
+{
+
+ struct drbd_epoch_entry *rs_e;
+ bool rv = 0;
+
+ spin_lock_irq(&mdev->req_lock);
+ list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
+ if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
+ rv = 1;
+ break;
+ }
+ }
+ spin_unlock_irq(&mdev->req_lock);
+
+ return rv;
+}
+
/* Called from receive_Data.
* Synchronize packets on sock with packets on msock.
*
@@ -1826,6 +1858,9 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
list_add(&e->w.list, &mdev->active_ee);
spin_unlock_irq(&mdev->req_lock);
+ if (mdev->state.conn == C_SYNC_TARGET)
+ wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
+
switch (mdev->net_conf->wire_protocol) {
case DRBD_PROT_C:
inc_unacked(mdev);
@@ -2420,7 +2455,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
- dev_info(DEV, "Did not got last syncUUID packet, corrected:\n");
+ dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
return -1;
@@ -2806,10 +2841,10 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
if (apv >= 88) {
if (apv == 88) {
- if (data_size > SHARED_SECRET_MAX) {
- dev_err(DEV, "verify-alg too long, "
- "peer wants %u, accepting only %u byte\n",
- data_size, SHARED_SECRET_MAX);
+ if (data_size > SHARED_SECRET_MAX || data_size == 0) {
+ dev_err(DEV, "verify-alg of wrong size, "
+ "peer wants %u, accepting only up to %u byte\n",
+ data_size, SHARED_SECRET_MAX);
return false;
}
@@ -3168,9 +3203,20 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
os = ns = mdev->state;
spin_unlock_irq(&mdev->req_lock);
- /* peer says his disk is uptodate, while we think it is inconsistent,
- * and this happens while we think we have a sync going on. */
- if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE &&
+ /* If some other part of the code (asender thread, timeout)
+ * already decided to close the connection again,
+ * we must not "re-establish" it here. */
+ if (os.conn <= C_TEAR_DOWN)
+ return false;
+
+ /* If this is the "end of sync" confirmation, usually the peer disk
+ * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
+ * set) resync started in PausedSyncT, or if the timing of pause-/
+ * unpause-sync events has been "just right", the peer disk may
+ * transition from D_CONSISTENT to D_UP_TO_DATE as well.
+ */
+ if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
+ real_peer_disk == D_UP_TO_DATE &&
os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
/* If we are (becoming) SyncSource, but peer is still in sync
* preparation, ignore its uptodate-ness to avoid flapping, it
@@ -3288,7 +3334,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
/* Nowadays only used when forcing a node into primary role and
setting its disk to UpToDate with that */
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
}
@@ -3776,6 +3822,13 @@ static void drbd_disconnect(struct drbd_conf *mdev)
if (mdev->state.conn == C_STANDALONE)
return;
+ /* We are about to start the cleanup after connection loss.
+ * Make sure drbd_make_request knows about that.
+ * Usually we should be in some network failure state already,
+ * but just in case we are not, we fix it up here.
+ */
+ drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
+
/* asender does not clean up anything. it must not interfere, either */
drbd_thread_stop(&mdev->asender);
drbd_free_sock(mdev);
@@ -3803,8 +3856,6 @@ static void drbd_disconnect(struct drbd_conf *mdev)
atomic_set(&mdev->rs_pending_cnt, 0);
wake_up(&mdev->misc_wait);
- del_timer(&mdev->request_timer);
-
/* make sure syncer is stopped and w_resume_next_sg queued */
del_timer_sync(&mdev->resync_timer);
resync_timer_fn((unsigned long)mdev);
@@ -4433,7 +4484,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
if (mdev->state.conn == C_AHEAD &&
atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) {
+ !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
mdev->start_resync_timer.expires = jiffies + HZ;
add_timer(&mdev->start_resync_timer);
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 4a0f314..8e93a6a 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -37,6 +37,7 @@ static void _drbd_start_io_acct(struct drbd_conf *mdev, struct drbd_request *req
const int rw = bio_data_dir(bio);
int cpu;
cpu = part_stat_lock();
+ part_round_stats(cpu, &mdev->vdisk->part0);
part_stat_inc(cpu, &mdev->vdisk->part0, ios[rw]);
part_stat_add(cpu, &mdev->vdisk->part0, sectors[rw], bio_sectors(bio));
part_inc_in_flight(&mdev->vdisk->part0, rw);
@@ -214,8 +215,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
{
const unsigned long s = req->rq_state;
struct drbd_conf *mdev = req->mdev;
- /* only WRITES may end up here without a master bio (on barrier ack) */
- int rw = req->master_bio ? bio_data_dir(req->master_bio) : WRITE;
+ int rw = req->rq_state & RQ_WRITE ? WRITE : READ;
/* we must not complete the master bio, while it is
* still being processed by _drbd_send_zc_bio (drbd_send_dblock)
@@ -230,7 +230,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
return;
if (s & RQ_NET_PENDING)
return;
- if (s & RQ_LOCAL_PENDING)
+ if (s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED))
return;
if (req->master_bio) {
@@ -277,6 +277,9 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
req->master_bio = NULL;
}
+ if (s & RQ_LOCAL_PENDING)
+ return;
+
if ((s & RQ_NET_MASK) == 0 || (s & RQ_NET_DONE)) {
/* this is disconnected (local only) operation,
* or protocol C P_WRITE_ACK,
@@ -429,7 +432,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
case completed_ok:
- if (bio_data_dir(req->master_bio) == WRITE)
+ if (req->rq_state & RQ_WRITE)
mdev->writ_cnt += req->size>>9;
else
mdev->read_cnt += req->size>>9;
@@ -438,7 +441,14 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
+ break;
+
+ case abort_disk_io:
+ req->rq_state |= RQ_LOCAL_ABORTED;
+ if (req->rq_state & RQ_WRITE)
+ _req_may_be_done_not_susp(req, m);
+ else
+ goto goto_queue_for_net_read;
break;
case write_completed_with_error:
@@ -447,7 +457,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
__drbd_chk_io_error(mdev, false);
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
break;
case read_ahead_completed_with_error:
@@ -455,7 +464,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
_req_may_be_done_not_susp(req, m);
- put_ldev(mdev);
break;
case read_completed_with_error:
@@ -464,10 +472,16 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+ if (req->rq_state & RQ_LOCAL_ABORTED) {
+ _req_may_be_done(req, m);
+ break;
+ }
__drbd_chk_io_error(mdev, false);
- put_ldev(mdev);
+
+ goto_queue_for_net_read:
+
+ D_ASSERT(!(req->rq_state & RQ_NET_MASK));
/* no point in retrying if there is no good remote data,
* or we have no connection. */
@@ -556,10 +570,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_queue_work(&mdev->data.work, &req->w);
break;
- case oos_handed_to_network:
- /* actually the same */
+ case read_retry_remote_canceled:
case send_canceled:
- /* treat it the same */
case send_failed:
/* real cleanup will be done from tl_clear. just update flags
* so it is no longer marked as on the worker queue */
@@ -589,17 +601,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
}
req->rq_state &= ~RQ_NET_QUEUED;
req->rq_state |= RQ_NET_SENT;
- /* because _drbd_send_zc_bio could sleep, and may want to
- * dereference the bio even after the "write_acked_by_peer" and
- * "completed_ok" events came in, once we return from
- * _drbd_send_zc_bio (drbd_send_dblock), we have to check
- * whether it is done already, and end it. */
_req_may_be_done_not_susp(req, m);
break;
- case read_retry_remote_canceled:
+ case oos_handed_to_network:
+ /* Was not set PENDING, no longer QUEUED, so is now DONE
+ * as far as this connection is concerned. */
req->rq_state &= ~RQ_NET_QUEUED;
- /* fall through, in case we raced with drbd_disconnect */
+ req->rq_state |= RQ_NET_DONE;
+ _req_may_be_done_not_susp(req, m);
+ break;
+
case connection_lost_while_pending:
/* transfer log cleanup after connection loss */
/* assert something? */
@@ -616,8 +628,6 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
_req_may_be_done(req, m); /* Allowed while state.susp */
break;
- case write_acked_by_peer_and_sis:
- req->rq_state |= RQ_NET_SIS;
case conflict_discarded_by_peer:
/* for discarded conflicting writes of multiple primaries,
* there is no need to keep anything in the tl, potential
@@ -628,18 +638,15 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
(unsigned long long)req->sector, req->size);
req->rq_state |= RQ_NET_DONE;
/* fall through */
+ case write_acked_by_peer_and_sis:
case write_acked_by_peer:
+ if (what == write_acked_by_peer_and_sis)
+ req->rq_state |= RQ_NET_SIS;
/* protocol C; successfully written on peer.
- * Nothing to do here.
+ * Nothing more to do here.
* We want to keep the tl in place for all protocols, to cater
- * for volatile write-back caches on lower level devices.
- *
- * A barrier request is expected to have forced all prior
- * requests onto stable storage, so completion of a barrier
- * request could set NET_DONE right here, and not wait for the
- * P_BARRIER_ACK, but that is an unnecessary optimization. */
+ * for volatile write-back caches on lower level devices. */
- /* this makes it effectively the same as for: */
case recv_acked_by_peer:
/* protocol B; pretends to be successfully written on peer.
* see also notes above in handed_over_to_network about
@@ -763,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
}
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+ int congested = 0;
+
+ /* If I don't even have good local storage, we can not reasonably try
+ * to pull ahead of the peer. We also need the local reference to make
+ * sure mdev->act_log is there.
+ * Note: caller has to make sure that net_conf is there.
+ */
+ if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ return;
+
+ if (mdev->net_conf->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ dev_info(DEV, "Congestion-fill threshold reached\n");
+ congested = 1;
+ }
+
+ if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ dev_info(DEV, "Congestion-extents threshold reached\n");
+ congested = 1;
+ }
+
+ if (congested) {
+ queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+ if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+ else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+ }
+ put_ldev(mdev);
+}
+
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
@@ -773,6 +814,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
int local, remote, send_oos = 0;
int err = -EIO;
int ret = 0;
+ union drbd_state s;
/* allocate outside of all locks; */
req = drbd_req_new(mdev, bio);
@@ -834,8 +876,9 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
drbd_al_begin_io(mdev, sector);
}
- remote = remote && drbd_should_do_remote(mdev->state);
- send_oos = rw == WRITE && drbd_should_send_oos(mdev->state);
+ s = mdev->state;
+ remote = remote && drbd_should_do_remote(s);
+ send_oos = rw == WRITE && drbd_should_send_oos(s);
D_ASSERT(!(remote && send_oos));
if (!(local || remote) && !is_susp(mdev->state)) {
@@ -867,7 +910,7 @@ allocate_barrier:
if (is_susp(mdev->state)) {
/* If we got suspended, use the retry mechanism of
- generic_make_request() to restart processing of this
+ drbd_make_request() to restart processing of this
bio. In the next call to drbd_make_request
we sleep in inc_ap_bio() */
ret = 1;
@@ -968,29 +1011,8 @@ allocate_barrier:
_req_mod(req, queue_for_send_oos);
if (remote &&
- mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
- int congested = 0;
-
- if (mdev->net_conf->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
- dev_info(DEV, "Congestion-fill threshold reached\n");
- congested = 1;
- }
-
- if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
- dev_info(DEV, "Congestion-extents threshold reached\n");
- congested = 1;
- }
-
- if (congested) {
- queue_barrier(mdev); /* last barrier, after mirrored writes */
-
- if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
- _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
- else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
- }
- }
+ mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+ maybe_pull_ahead(mdev);
spin_unlock_irq(&mdev->req_lock);
kfree(b); /* if someone else has beaten us to it... */
@@ -1091,7 +1113,6 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
*/
D_ASSERT(bio->bi_size > 0);
D_ASSERT((bio->bi_size & 0x1ff) == 0);
- D_ASSERT(bio->bi_idx == 0);
/* to make some things easier, force alignment of requests within the
* granularity of our hash tables */
@@ -1099,8 +1120,9 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT;
if (likely(s_enr == e_enr)) {
- inc_ap_bio(mdev, 1);
- drbd_make_request_common(mdev, bio, start_time);
+ do {
+ inc_ap_bio(mdev, 1);
+ } while (drbd_make_request_common(mdev, bio, start_time));
return;
}
@@ -1196,36 +1218,66 @@ void request_timer_fn(unsigned long data)
struct drbd_conf *mdev = (struct drbd_conf *) data;
struct drbd_request *req; /* oldest request */
struct list_head *le;
- unsigned long et = 0; /* effective timeout = ko_count * timeout */
+ unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
+ unsigned long now;
if (get_net_conf(mdev)) {
- et = mdev->net_conf->timeout*HZ/10 * mdev->net_conf->ko_count;
+ if (mdev->state.conn >= C_WF_REPORT_PARAMS)
+ ent = mdev->net_conf->timeout*HZ/10
+ * mdev->net_conf->ko_count;
put_net_conf(mdev);
}
- if (!et || mdev->state.conn < C_WF_REPORT_PARAMS)
+ if (get_ldev(mdev)) { /* implicit state.disk >= D_INCONSISTENT */
+ dt = mdev->ldev->dc.disk_timeout * HZ / 10;
+ put_ldev(mdev);
+ }
+ et = min_not_zero(dt, ent);
+
+ if (!et)
return; /* Recurring timer stopped */
+ now = jiffies;
+
spin_lock_irq(&mdev->req_lock);
le = &mdev->oldest_tle->requests;
if (list_empty(le)) {
spin_unlock_irq(&mdev->req_lock);
- mod_timer(&mdev->request_timer, jiffies + et);
+ mod_timer(&mdev->request_timer, now + et);
return;
}
le = le->prev;
req = list_entry(le, struct drbd_request, tl_requests);
- if (time_is_before_eq_jiffies(req->start_time + et)) {
- if (req->rq_state & RQ_NET_PENDING) {
- dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
- _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE, NULL);
- } else {
- dev_warn(DEV, "Local backing block device frozen?\n");
- mod_timer(&mdev->request_timer, jiffies + et);
- }
- } else {
- mod_timer(&mdev->request_timer, req->start_time + et);
- }
+ /* The request is considered timed out, if
+ * - we have some effective timeout from the configuration,
+ * with above state restrictions applied,
+ * - the oldest request is waiting for a response from the network
+ * resp. the local disk,
+ * - the oldest request is in fact older than the effective timeout,
+ * - the connection was established (resp. disk was attached)
+ * for longer than the timeout already.
+ * Note that for 32bit jiffies and very stable connections/disks,
+ * we may have a wrap around, which is catched by
+ * !time_in_range(now, last_..._jif, last_..._jif + timeout).
+ *
+ * Side effect: once per 32bit wrap-around interval, which means every
+ * ~198 days with 250 HZ, we have a window where the timeout would need
+ * to expire twice (worst case) to become effective. Good enough.
+ */
+ if (ent && req->rq_state & RQ_NET_PENDING &&
+ time_after(now, req->start_time + ent) &&
+ !time_in_range(now, mdev->last_reconnect_jif, mdev->last_reconnect_jif + ent)) {
+ dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
+ _drbd_set_state(_NS(mdev, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
+ }
+ if (dt && req->rq_state & RQ_LOCAL_PENDING &&
+ time_after(now, req->start_time + dt) &&
+ !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) {
+ dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n");
+ __drbd_chk_io_error(mdev, 1);
+ }
+ nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
spin_unlock_irq(&mdev->req_lock);
+ mod_timer(&mdev->request_timer, nt);
}
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 68a234a..3d21119 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -105,6 +105,7 @@ enum drbd_req_event {
read_completed_with_error,
read_ahead_completed_with_error,
write_completed_with_error,
+ abort_disk_io,
completed_ok,
resend,
fail_frozen_disk_io,
@@ -118,18 +119,21 @@ enum drbd_req_event {
* same time, so we should hold the request lock anyways.
*/
enum drbd_req_state_bits {
- /* 210
- * 000: no local possible
- * 001: to be submitted
+ /* 3210
+ * 0000: no local possible
+ * 0001: to be submitted
* UNUSED, we could map: 011: submitted, completion still pending
- * 110: completed ok
- * 010: completed with error
+ * 0110: completed ok
+ * 0010: completed with error
+ * 1001: Aborted (before completion)
+ * 1x10: Aborted and completed -> free
*/
__RQ_LOCAL_PENDING,
__RQ_LOCAL_COMPLETED,
__RQ_LOCAL_OK,
+ __RQ_LOCAL_ABORTED,
- /* 76543
+ /* 87654
* 00000: no network possible
* 00001: to be send
* 00011: to be send, on worker queue
@@ -199,8 +203,9 @@ enum drbd_req_state_bits {
#define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING)
#define RQ_LOCAL_COMPLETED (1UL << __RQ_LOCAL_COMPLETED)
#define RQ_LOCAL_OK (1UL << __RQ_LOCAL_OK)
+#define RQ_LOCAL_ABORTED (1UL << __RQ_LOCAL_ABORTED)
-#define RQ_LOCAL_MASK ((RQ_LOCAL_OK << 1)-1) /* 0x07 */
+#define RQ_LOCAL_MASK ((RQ_LOCAL_ABORTED << 1)-1)
#define RQ_NET_PENDING (1UL << __RQ_NET_PENDING)
#define RQ_NET_QUEUED (1UL << __RQ_NET_QUEUED)
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d3e6f6..620c70f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -70,11 +70,29 @@ rwlock_t global_state_lock;
void drbd_md_io_complete(struct bio *bio, int error)
{
struct drbd_md_io *md_io;
+ struct drbd_conf *mdev;
md_io = (struct drbd_md_io *)bio->bi_private;
+ mdev = container_of(md_io, struct drbd_conf, md_io);
+
md_io->error = error;
- complete(&md_io->event);
+ /* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
+ * to timeout on the lower level device, and eventually detach from it.
+ * If this io completion runs after that timeout expired, this
+ * drbd_md_put_buffer() may allow us to finally try and re-attach.
+ * During normal operation, this only puts that extra reference
+ * down to 1 again.
+ * Make sure we first drop the reference, and only then signal
+ * completion, or we may (in drbd_al_read_log()) cycle so fast into the
+ * next drbd_md_sync_page_io(), that we trigger the
+ * ASSERT(atomic_read(&mdev->md_io_in_use) == 1) there.
+ */
+ drbd_md_put_buffer(mdev);
+ md_io->done = 1;
+ wake_up(&mdev->misc_wait);
+ bio_put(bio);
+ put_ldev(mdev);
}
/* reads on behalf of the partner,
@@ -226,6 +244,7 @@ void drbd_endio_pri(struct bio *bio, int error)
spin_lock_irqsave(&mdev->req_lock, flags);
__req_mod(req, what, &m);
spin_unlock_irqrestore(&mdev->req_lock, flags);
+ put_ldev(mdev);
if (m.bio)
complete_master_bio(mdev, &m);
@@ -290,7 +309,7 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
sg_init_table(&sg, 1);
crypto_hash_init(&desc);
- __bio_for_each_segment(bvec, bio, i, 0) {
+ bio_for_each_segment(bvec, bio, i) {
sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
crypto_hash_update(&desc, &sg, sg.length);
}
@@ -728,7 +747,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags);
+ clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
return 1;
}
@@ -1519,14 +1538,14 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
}
drbd_state_lock(mdev);
-
+ write_lock_irq(&global_state_lock);
if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
+ write_unlock_irq(&global_state_lock);
drbd_state_unlock(mdev);
return;
}
- write_lock_irq(&global_state_lock);
- ns = mdev->state;
+ ns.i = mdev->state.i;
ns.aftr_isp = !_drbd_may_sync_now(mdev);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b0b00d7..553f43a 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -551,7 +551,7 @@ static void floppy_ready(void);
static void floppy_start(void);
static void process_fd_request(void);
static void recalibrate_floppy(void);
-static void floppy_shutdown(unsigned long);
+static void floppy_shutdown(struct work_struct *);
static int floppy_request_regions(int);
static void floppy_release_regions(int);
@@ -588,6 +588,8 @@ static int buffer_max = -1;
static struct floppy_fdc_state fdc_state[N_FDC];
static int fdc; /* current fdc */
+static struct workqueue_struct *floppy_wq;
+
static struct floppy_struct *_floppy = floppy_type;
static unsigned char current_drive;
static long current_count_sectors;
@@ -629,16 +631,15 @@ static inline void set_debugt(void) { }
static inline void debugt(const char *func, const char *msg) { }
#endif /* DEBUGT */
-typedef void (*timeout_fn)(unsigned long);
-static DEFINE_TIMER(fd_timeout, floppy_shutdown, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timeout, floppy_shutdown);
static const char *timeout_message;
static void is_alive(const char *func, const char *message)
{
/* this routine checks whether the floppy driver is "alive" */
if (test_bit(0, &fdc_busy) && command_status < 2 &&
- !timer_pending(&fd_timeout)) {
+ !delayed_work_pending(&fd_timeout)) {
DPRINT("%s: timeout handler died. %s\n", func, message);
}
}
@@ -666,15 +667,19 @@ static int output_log_pos;
static void __reschedule_timeout(int drive, const char *message)
{
+ unsigned long delay;
+
if (drive == current_reqD)
drive = current_drive;
- del_timer(&fd_timeout);
+ __cancel_delayed_work(&fd_timeout);
+
if (drive < 0 || drive >= N_DRIVE) {
- fd_timeout.expires = jiffies + 20UL * HZ;
+ delay = 20UL * HZ;
drive = 0;
} else
- fd_timeout.expires = jiffies + UDP->timeout;
- add_timer(&fd_timeout);
+ delay = UDP->timeout;
+
+ queue_delayed_work(floppy_wq, &fd_timeout, delay);
if (UDP->flags & FD_DEBUG)
DPRINT("reschedule timeout %s\n", message);
timeout_message = message;
@@ -872,7 +877,7 @@ static int lock_fdc(int drive, bool interruptible)
command_status = FD_COMMAND_NONE;
- __reschedule_timeout(drive, "lock fdc");
+ reschedule_timeout(drive, "lock fdc");
set_fdc(drive);
return 0;
}
@@ -880,23 +885,15 @@ static int lock_fdc(int drive, bool interruptible)
/* unlocks the driver */
static void unlock_fdc(void)
{
- unsigned long flags;
-
- raw_cmd = NULL;
if (!test_bit(0, &fdc_busy))
DPRINT("FDC access conflict!\n");
- if (do_floppy)
- DPRINT("device interrupt still active at FDC release: %pf!\n",
- do_floppy);
+ raw_cmd = NULL;
command_status = FD_COMMAND_NONE;
- spin_lock_irqsave(&floppy_lock, flags);
- del_timer(&fd_timeout);
+ __cancel_delayed_work(&fd_timeout);
+ do_floppy = NULL;
cont = NULL;
clear_bit(0, &fdc_busy);
- if (current_req || set_next_request())
- do_fd_request(current_req->q);
- spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
}
@@ -968,26 +965,24 @@ static DECLARE_WORK(floppy_work, NULL);
static void schedule_bh(void (*handler)(void))
{
+ WARN_ON(work_pending(&floppy_work));
+
PREPARE_WORK(&floppy_work, (work_func_t)handler);
- schedule_work(&floppy_work);
+ queue_work(floppy_wq, &floppy_work);
}
-static DEFINE_TIMER(fd_timer, NULL, 0, 0);
+static DECLARE_DELAYED_WORK(fd_timer, NULL);
static void cancel_activity(void)
{
- unsigned long flags;
-
- spin_lock_irqsave(&floppy_lock, flags);
do_floppy = NULL;
- PREPARE_WORK(&floppy_work, (work_func_t)empty);
- del_timer(&fd_timer);
- spin_unlock_irqrestore(&floppy_lock, flags);
+ cancel_delayed_work_sync(&fd_timer);
+ cancel_work_sync(&floppy_work);
}
/* this function makes sure that the disk stays in the drive during the
* transfer */
-static void fd_watchdog(void)
+static void fd_watchdog(struct work_struct *arg)
{
debug_dcl(DP->flags, "calling disk change from watchdog\n");
@@ -997,21 +992,20 @@ static void fd_watchdog(void)
cont->done(0);
reset_fdc();
} else {
- del_timer(&fd_timer);
- fd_timer.function = (timeout_fn)fd_watchdog;
- fd_timer.expires = jiffies + HZ / 10;
- add_timer(&fd_timer);
+ cancel_delayed_work(&fd_timer);
+ PREPARE_DELAYED_WORK(&fd_timer, fd_watchdog);
+ queue_delayed_work(floppy_wq, &fd_timer, HZ / 10);
}
}
static void main_command_interrupt(void)
{
- del_timer(&fd_timer);
+ cancel_delayed_work(&fd_timer);
cont->interrupt();
}
/* waits for a delay (spinup or select) to pass */
-static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+static int fd_wait_for_completion(unsigned long expires, work_func_t function)
{
if (FDCS->reset) {
reset_fdc(); /* do the reset during sleep to win time
@@ -1020,11 +1014,10 @@ static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
return 1;
}
- if (time_before(jiffies, delay)) {
- del_timer(&fd_timer);
- fd_timer.function = function;
- fd_timer.expires = delay;
- add_timer(&fd_timer);
+ if (time_before(jiffies, expires)) {
+ cancel_delayed_work(&fd_timer);
+ PREPARE_DELAYED_WORK(&fd_timer, function);
+ queue_delayed_work(floppy_wq, &fd_timer, expires - jiffies);
return 1;
}
return 0;
@@ -1342,7 +1335,7 @@ static int fdc_dtr(void)
*/
FDCS->dtr = raw_cmd->rate & 3;
return fd_wait_for_completion(jiffies + 2UL * HZ / 100,
- (timeout_fn)floppy_ready);
+ (work_func_t)floppy_ready);
} /* fdc_dtr */
static void tell_sector(void)
@@ -1447,7 +1440,7 @@ static void setup_rw_floppy(void)
int flags;
int dflags;
unsigned long ready_date;
- timeout_fn function;
+ work_func_t function;
flags = raw_cmd->flags;
if (flags & (FD_RAW_READ | FD_RAW_WRITE))
@@ -1461,9 +1454,9 @@ static void setup_rw_floppy(void)
*/
if (time_after(ready_date, jiffies + DP->select_delay)) {
ready_date -= DP->select_delay;
- function = (timeout_fn)floppy_start;
+ function = (work_func_t)floppy_start;
} else
- function = (timeout_fn)setup_rw_floppy;
+ function = (work_func_t)setup_rw_floppy;
/* wait until the floppy is spinning fast enough */
if (fd_wait_for_completion(ready_date, function))
@@ -1493,7 +1486,7 @@ static void setup_rw_floppy(void)
inr = result();
cont->interrupt();
} else if (flags & FD_RAW_NEED_DISK)
- fd_watchdog();
+ fd_watchdog(NULL);
}
static int blind_seek;
@@ -1802,20 +1795,22 @@ static void show_floppy(void)
pr_info("do_floppy=%pf\n", do_floppy);
if (work_pending(&floppy_work))
pr_info("floppy_work.func=%pf\n", floppy_work.func);
- if (timer_pending(&fd_timer))
- pr_info("fd_timer.function=%pf\n", fd_timer.function);
- if (timer_pending(&fd_timeout)) {
- pr_info("timer_function=%pf\n", fd_timeout.function);
- pr_info("expires=%lu\n", fd_timeout.expires - jiffies);
- pr_info("now=%lu\n", jiffies);
- }
+ if (delayed_work_pending(&fd_timer))
+ pr_info("delayed work.function=%p expires=%ld\n",
+ fd_timer.work.func,
+ fd_timer.timer.expires - jiffies);
+ if (delayed_work_pending(&fd_timeout))
+ pr_info("timer_function=%p expires=%ld\n",
+ fd_timeout.work.func,
+ fd_timeout.timer.expires - jiffies);
+
pr_info("cont=%p\n", cont);
pr_info("current_req=%p\n", current_req);
pr_info("command_status=%d\n", command_status);
pr_info("\n");
}
-static void floppy_shutdown(unsigned long data)
+static void floppy_shutdown(struct work_struct *arg)
{
unsigned long flags;
@@ -1868,7 +1863,7 @@ static int start_motor(void (*function)(void))
/* wait_for_completion also schedules reset if needed. */
return fd_wait_for_completion(DRS->select_date + DP->select_delay,
- (timeout_fn)function);
+ (work_func_t)function);
}
static void floppy_ready(void)
@@ -2821,7 +2816,6 @@ do_request:
spin_lock_irq(&floppy_lock);
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
-
if (!pending) {
do_floppy = NULL;
unlock_fdc();
@@ -2898,13 +2892,15 @@ static void do_fd_request(struct request_queue *q)
current_req->cmd_flags))
return;
- if (test_bit(0, &fdc_busy)) {
+ if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
return;
}
- lock_fdc(MAXTIMEOUT, false);
+ command_status = FD_COMMAND_NONE;
+ __reschedule_timeout(MAXTIMEOUT, "fd_request");
+ set_fdc(0);
process_fd_request();
is_alive(__func__, "");
}
@@ -3612,9 +3608,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
mutex_lock(&floppy_mutex);
mutex_lock(&open_lock);
- if (UDRS->fd_ref < 0)
- UDRS->fd_ref = 0;
- else if (!UDRS->fd_ref--) {
+ if (!UDRS->fd_ref--) {
DPRINT("floppy_release with fd_ref == 0");
UDRS->fd_ref = 0;
}
@@ -3650,13 +3644,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
set_bit(FD_VERIFY_BIT, &UDRS->flags);
}
- if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (mode & FMODE_EXCL)))
- goto out2;
-
- if (mode & FMODE_EXCL)
- UDRS->fd_ref = -1;
- else
- UDRS->fd_ref++;
+ UDRS->fd_ref++;
opened_bdev[drive] = bdev;
@@ -3719,10 +3707,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
mutex_unlock(&floppy_mutex);
return 0;
out:
- if (UDRS->fd_ref < 0)
- UDRS->fd_ref = 0;
- else
- UDRS->fd_ref--;
+ UDRS->fd_ref--;
+
if (!UDRS->fd_ref)
opened_bdev[drive] = NULL;
out2:
@@ -4159,10 +4145,16 @@ static int __init floppy_init(void)
goto out_put_disk;
}
+ floppy_wq = alloc_ordered_workqueue("floppy", 0);
+ if (!floppy_wq) {
+ err = -ENOMEM;
+ goto out_put_disk;
+ }
+
disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
if (!disks[dr]->queue) {
err = -ENOMEM;
- goto out_put_disk;
+ goto out_destroy_workq;
}
blk_queue_max_hw_sectors(disks[dr]->queue, 64);
@@ -4213,7 +4205,7 @@ static int __init floppy_init(void)
use_virtual_dma = can_use_virtual_dma & 1;
fdc_state[0].address = FDC1;
if (fdc_state[0].address == -1) {
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
err = -ENODEV;
goto out_unreg_region;
}
@@ -4224,7 +4216,7 @@ static int __init floppy_init(void)
fdc = 0; /* reset fdc in case of unexpected interrupt */
err = floppy_grab_irq_and_dma();
if (err) {
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
err = -EBUSY;
goto out_unreg_region;
}
@@ -4281,13 +4273,13 @@ static int __init floppy_init(void)
user_reset_fdc(-1, FD_RESET_ALWAYS, false);
}
fdc = 0;
- del_timer_sync(&fd_timeout);
+ cancel_delayed_work(&fd_timeout);
current_drive = 0;
initialized = true;
if (have_no_fdc) {
DPRINT("no floppy controllers found\n");
err = have_no_fdc;
- goto out_flush_work;
+ goto out_release_dma;
}
for (drive = 0; drive < N_DRIVE; drive++) {
@@ -4302,7 +4294,7 @@ static int __init floppy_init(void)
err = platform_device_register(&floppy_device[drive]);
if (err)
- goto out_flush_work;
+ goto out_release_dma;
err = device_create_file(&floppy_device[drive].dev,
&dev_attr_cmos);
@@ -4320,13 +4312,14 @@ static int __init floppy_init(void)
out_unreg_platform_dev:
platform_device_unregister(&floppy_device[drive]);
-out_flush_work:
- flush_work_sync(&floppy_work);
+out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
platform_driver_unregister(&floppy_driver);
+out_destroy_workq:
+ destroy_workqueue(floppy_wq);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
@@ -4397,7 +4390,7 @@ static int floppy_grab_irq_and_dma(void)
* We might have scheduled a free_irq(), wait it to
* drain first:
*/
- flush_work_sync(&floppy_work);
+ flush_workqueue(floppy_wq);
if (fd_request_irq()) {
DPRINT("Unable to grab IRQ%d for the floppy driver\n",
@@ -4488,9 +4481,9 @@ static void floppy_release_irq_and_dma(void)
pr_info("motor off timer %d still active\n", drive);
#endif
- if (timer_pending(&fd_timeout))
+ if (delayed_work_pending(&fd_timeout))
pr_info("floppy timer still active:%s\n", timeout_message);
- if (timer_pending(&fd_timer))
+ if (delayed_work_pending(&fd_timer))
pr_info("auxiliary floppy timer still active\n");
if (work_pending(&floppy_work))
pr_info("work still pending\n");
@@ -4560,8 +4553,9 @@ static void __exit floppy_module_exit(void)
put_disk(disks[drive]);
}
- del_timer_sync(&fd_timeout);
- del_timer_sync(&fd_timer);
+ cancel_delayed_work_sync(&fd_timeout);
+ cancel_delayed_work_sync(&fd_timer);
+ destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bbca966..3bba655 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
struct gendisk *disk;
int err;
+ err = -ENOMEM;
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
- if (!lo) {
- err = -ENOMEM;
+ if (!lo)
goto out;
- }
- err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
- if (err < 0)
+ if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
goto out_free_dev;
if (i >= 0) {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 76fa3de..1788f491 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -780,9 +780,9 @@ static const struct block_device_operations mg_disk_ops = {
.getgeo = mg_getgeo
};
-static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
+static int mg_suspend(struct device *dev)
{
- struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
+ struct mg_drv_data *prv_data = dev->platform_data;
struct mg_host *host = prv_data->host;
if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
@@ -804,9 +804,9 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
return 0;
}
-static int mg_resume(struct platform_device *plat_dev)
+static int mg_resume(struct device *dev)
{
- struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
+ struct mg_drv_data *prv_data = dev->platform_data;
struct mg_host *host = prv_data->host;
if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
@@ -825,6 +825,8 @@ static int mg_resume(struct platform_device *plat_dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(mg_pm, mg_suspend, mg_resume);
+
static int mg_probe(struct platform_device *plat_dev)
{
struct mg_host *host;
@@ -1074,11 +1076,10 @@ static int mg_remove(struct platform_device *plat_dev)
static struct platform_driver mg_disk_driver = {
.probe = mg_probe,
.remove = mg_remove,
- .suspend = mg_suspend,
- .resume = mg_resume,
.driver = {
.name = MG_DEV_NAME,
.owner = THIS_MODULE,
+ .pm = &mg_pm,
}
};
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 304000c..a8fddeb 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <../drivers/ata/ahci.h>
#include <linux/export.h>
+#include <linux/debugfs.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
* allocated in mtip_init().
*/
static int mtip_major;
+static struct dentry *dfs_parent;
static DEFINE_SPINLOCK(rssd_index_lock);
static DEFINE_IDA(rssd_index_ida);
@@ -294,18 +296,16 @@ static int hba_reset_nosleep(struct driver_data *dd)
*/
static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
{
- unsigned long flags = 0;
-
atomic_set(&port->commands[tag].active, 1);
- spin_lock_irqsave(&port->cmd_issue_lock, flags);
+ spin_lock(&port->cmd_issue_lock);
writel((1 << MTIP_TAG_BIT(tag)),
port->s_active[MTIP_TAG_INDEX(tag)]);
writel((1 << MTIP_TAG_BIT(tag)),
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
- spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+ spin_unlock(&port->cmd_issue_lock);
/* Set the command's timeout value.*/
port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -436,8 +436,7 @@ static void mtip_init_port(struct mtip_port *port)
writel(0xFFFFFFFF, port->completed[i]);
/* Clear any pending interrupts for this port */
- writel(readl(port->dd->mmio + PORT_IRQ_STAT),
- port->dd->mmio + PORT_IRQ_STAT);
+ writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
/* Clear any pending interrupts on the HBA. */
writel(readl(port->dd->mmio + HOST_IRQ_STAT),
@@ -782,13 +781,24 @@ static void mtip_handle_tfe(struct driver_data *dd)
/* Stop the timer to prevent command timeouts. */
del_timer(&port->cmd_timer);
+ set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
+
+ if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
+ test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+ cmd = &port->commands[MTIP_TAG_INTERNAL];
+ dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
+
+ atomic_inc(&cmd->active); /* active > 1 indicates error */
+ if (cmd->comp_data && cmd->comp_func) {
+ cmd->comp_func(port, MTIP_TAG_INTERNAL,
+ cmd->comp_data, PORT_IRQ_TF_ERR);
+ }
+ goto handle_tfe_exit;
+ }
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
- /* Set eh_active */
- set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
-
/* Loop through all the groups */
for (group = 0; group < dd->slot_groups; group++) {
completed = readl(port->completed[group]);
@@ -940,6 +950,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
}
print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
+handle_tfe_exit:
/* clear eh_active */
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
@@ -961,6 +972,8 @@ static inline void mtip_process_sdbf(struct driver_data *dd)
/* walk all bits in all slot groups */
for (group = 0; group < dd->slot_groups; group++) {
completed = readl(port->completed[group]);
+ if (!completed)
+ continue;
/* clear completed status register in the hardware.*/
writel(completed, port->completed[group]);
@@ -1329,22 +1342,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
}
rv = -EAGAIN;
}
-
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL)) {
- dev_warn(&port->dd->pdev->dev,
- "Retiring internal command but CI is 1.\n");
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
- hba_reset_nosleep(port->dd);
- rv = -ENXIO;
- } else {
- mtip_restart_port(port);
- rv = -EAGAIN;
- }
- goto exec_ic_exit;
- }
-
} else {
/* Spin for <timeout> checking if command still outstanding */
timeout = jiffies + msecs_to_jiffies(timeout);
@@ -1361,21 +1358,25 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rv = -ENXIO;
goto exec_ic_exit;
}
+ if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
+ atomic_inc(&int_cmd->active); /* error */
+ break;
+ }
}
+ }
- if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ if (atomic_read(&int_cmd->active) > 1) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command [%02X] failed\n", fis->command);
+ rv = -EIO;
+ }
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL)) {
- dev_err(&port->dd->pdev->dev,
- "Internal command did not complete [atomic]\n");
+ rv = -ENXIO;
+ if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ mtip_restart_port(port);
rv = -EAGAIN;
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &port->dd->dd_flag)) {
- hba_reset_nosleep(port->dd);
- rv = -ENXIO;
- } else {
- mtip_restart_port(port);
- rv = -EAGAIN;
- }
}
}
exec_ic_exit:
@@ -1893,13 +1894,33 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
void __user *user_buffer)
{
struct host_to_dev_fis fis;
- struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+ struct host_to_dev_fis *reply;
+ u8 *buf = NULL;
+ dma_addr_t dma_addr = 0;
+ int rv = 0, xfer_sz = command[3];
+
+ if (xfer_sz) {
+ if (user_buffer)
+ return -EFAULT;
+
+ buf = dmam_alloc_coherent(&port->dd->pdev->dev,
+ ATA_SECT_SIZE * xfer_sz,
+ &dma_addr,
+ GFP_KERNEL);
+ if (!buf) {
+ dev_err(&port->dd->pdev->dev,
+ "Memory allocation failed (%d bytes)\n",
+ ATA_SECT_SIZE * xfer_sz);
+ return -ENOMEM;
+ }
+ memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
+ }
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
- fis.type = 0x27;
- fis.opts = 1 << 7;
- fis.command = command[0];
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
fis.features = command[2];
fis.sect_count = command[3];
if (fis.command == ATA_CMD_SMART) {
@@ -1908,6 +1929,11 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
fis.cyl_hi = 0xC2;
}
+ if (xfer_sz)
+ reply = (port->rxfis + RX_FIS_PIO_SETUP);
+ else
+ reply = (port->rxfis + RX_FIS_D2H_REG);
+
dbg_printk(MTIP_DRV_NAME
" %s: User Command: cmd %x, sect %x, "
"feat %x, sectcnt %x\n",
@@ -1917,43 +1943,46 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
command[2],
command[3]);
- memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
-
/* Execute the command. */
if (mtip_exec_internal_command(port,
&fis,
5,
- port->sector_buffer_dma,
- (command[3] != 0) ? ATA_SECT_SIZE : 0,
+ (xfer_sz ? dma_addr : 0),
+ (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
0,
GFP_KERNEL,
MTIP_IOCTL_COMMAND_TIMEOUT_MS)
< 0) {
- return -1;
+ rv = -EFAULT;
+ goto exit_drive_command;
}
/* Collect the completion status. */
command[0] = reply->command; /* Status*/
command[1] = reply->features; /* Error*/
- command[2] = command[3];
+ command[2] = reply->sect_count;
dbg_printk(MTIP_DRV_NAME
" %s: Completion Status: stat %x, "
- "err %x, cmd %x\n",
+ "err %x, nsect %x\n",
__func__,
command[0],
command[1],
command[2]);
- if (user_buffer && command[3]) {
+ if (xfer_sz) {
if (copy_to_user(user_buffer,
- port->sector_buffer,
+ buf,
ATA_SECT_SIZE * command[3])) {
- return -EFAULT;
+ rv = -EFAULT;
+ goto exit_drive_command;
}
}
-
- return 0;
+exit_drive_command:
+ if (buf)
+ dmam_free_coherent(&port->dd->pdev->dev,
+ ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
+ return rv;
}
/*
@@ -2003,6 +2032,32 @@ static unsigned int implicit_sector(unsigned char command,
return rv;
}
+static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+{
+ switch (fis->command) {
+ case ATA_CMD_DOWNLOAD_MICRO:
+ *timeout = 120000; /* 2 minutes */
+ break;
+ case ATA_CMD_SEC_ERASE_UNIT:
+ case 0xFC:
+ *timeout = 240000; /* 4 minutes */
+ break;
+ case ATA_CMD_STANDBYNOW1:
+ *timeout = 10000; /* 10 seconds */
+ break;
+ case 0xF7:
+ case 0xFA:
+ *timeout = 60000; /* 60 seconds */
+ break;
+ case ATA_CMD_SMART:
+ *timeout = 15000; /* 15 seconds */
+ break;
+ default:
+ *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ break;
+ }
+}
+
/*
* Executes a taskfile
* See ide_taskfile_ioctl() for derivation
@@ -2023,7 +2078,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
unsigned int taskin = 0;
unsigned int taskout = 0;
u8 nsect = 0;
- unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ unsigned int timeout;
unsigned int force_single_sector;
unsigned int transfer_size;
unsigned long task_file_data;
@@ -2153,32 +2208,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
fis.lba_hi,
fis.device);
- switch (fis.command) {
- case ATA_CMD_DOWNLOAD_MICRO:
- /* Change timeout for Download Microcode to 2 minutes */
- timeout = 120000;
- break;
- case ATA_CMD_SEC_ERASE_UNIT:
- /* Change timeout for Security Erase Unit to 4 minutes.*/
- timeout = 240000;
- break;
- case ATA_CMD_STANDBYNOW1:
- /* Change timeout for standby immediate to 10 seconds.*/
- timeout = 10000;
- break;
- case 0xF7:
- case 0xFA:
- /* Change timeout for vendor unique command to 10 secs */
- timeout = 10000;
- break;
- case ATA_CMD_SMART:
- /* Change timeout for vendor unique command to 15 secs */
- timeout = 15000;
- break;
- default:
- timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
- break;
- }
+ mtip_set_timeout(&fis, &timeout);
/* Determine the correct transfer size.*/
if (force_single_sector)
@@ -2295,13 +2325,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
{
switch (cmd) {
case HDIO_GET_IDENTITY:
- if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
- dev_warn(&dd->pdev->dev,
- "Unable to read identity\n");
- return -EIO;
- }
-
+ {
+ if (copy_to_user((void __user *)arg, dd->port->identify,
+ sizeof(u16) * ATA_ID_WORDS))
+ return -EFAULT;
break;
+ }
case HDIO_DRIVE_CMD:
{
u8 drive_command[4];
@@ -2519,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
}
/*
- * Sysfs register/status dump.
+ * Sysfs status dump.
*
* @dev Pointer to the device structure, passed by the kernrel.
* @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2528,72 +2557,138 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
* return value
* The size, in bytes, of the data copied into buf.
*/
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u32 group_allocated;
struct driver_data *dd = dev_to_disk(dev)->private_data;
int size = 0;
+
+ if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "thermal_shutdown\n");
+ else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "write_protect\n");
+ else
+ size += sprintf(buf, "%s", "online\n");
+
+ return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
+{
+ struct driver_data *dd = (struct driver_data *)f->private_data;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+ u32 group_allocated;
+ int size = *offset;
int n;
- size += sprintf(&buf[size], "S ACTive:\n");
+ if (!len || size)
+ return 0;
+
+ if (size < 0)
+ return -EINVAL;
+
+ size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
readl(dd->port->s_active[n]));
- size += sprintf(&buf[size], "Command Issue:\n");
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
readl(dd->port->cmd_issue[n]));
- size += sprintf(&buf[size], "Allocated:\n");
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "H/ Completed : [ 0x");
+
+ for (n = dd->slot_groups-1; n >= 0; n--)
+ size += sprintf(&buf[size], "%08X ",
+ readl(dd->port->completed[n]));
- for (n = 0; n < dd->slot_groups; n++) {
+ size += sprintf(&buf[size], "]\n");
+ size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
+ readl(dd->port->mmio + PORT_IRQ_STAT));
+ size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
+ readl(dd->mmio + HOST_IRQ_STAT));
+ size += sprintf(&buf[size], "\n");
+
+ size += sprintf(&buf[size], "L/ Allocated : [ 0x");
+
+ for (n = dd->slot_groups-1; n >= 0; n--) {
if (sizeof(long) > sizeof(u32))
group_allocated =
dd->port->allocated[n/2] >> (32*(n&1));
else
group_allocated = dd->port->allocated[n];
- size += sprintf(&buf[size], "0x%08x\n",
- group_allocated);
+ size += sprintf(&buf[size], "%08X ", group_allocated);
}
+ size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Completed:\n");
+ size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
- for (n = 0; n < dd->slot_groups; n++)
- size += sprintf(&buf[size], "0x%08x\n",
- readl(dd->port->completed[n]));
+ for (n = dd->slot_groups-1; n >= 0; n--) {
+ if (sizeof(long) > sizeof(u32))
+ group_allocated =
+ dd->port->cmds_to_issue[n/2] >> (32*(n&1));
+ else
+ group_allocated = dd->port->cmds_to_issue[n];
+ size += sprintf(&buf[size], "%08X ", group_allocated);
+ }
+ size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
- readl(dd->port->mmio + PORT_IRQ_STAT));
- size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
- readl(dd->mmio + HOST_IRQ_STAT));
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
- return size;
+ return *offset;
}
-static ssize_t mtip_hw_show_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
{
- struct driver_data *dd = dev_to_disk(dev)->private_data;
- int size = 0;
+ struct driver_data *dd = (struct driver_data *)f->private_data;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+ int size = *offset;
- if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
- size += sprintf(buf, "%s", "thermal_shutdown\n");
- else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
- size += sprintf(buf, "%s", "write_protect\n");
- else
- size += sprintf(buf, "%s", "online\n");
+ if (!len || size)
+ return 0;
- return size;
+ if (size < 0)
+ return -EINVAL;
+
+ size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
+ dd->port->flags);
+ size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
+ dd->dd_flag);
+
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
+
+ return *offset;
}
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+static const struct file_operations mtip_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_registers,
+ .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_flags,
+ .llseek = no_llseek,
+};
/*
* Create the sysfs related attributes.
@@ -2610,9 +2705,6 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
if (!kobj || !dd)
return -EINVAL;
- if (sysfs_create_file(kobj, &dev_attr_registers.attr))
- dev_warn(&dd->pdev->dev,
- "Error creating 'registers' sysfs entry\n");
if (sysfs_create_file(kobj, &dev_attr_status.attr))
dev_warn(&dd->pdev->dev,
"Error creating 'status' sysfs entry\n");
@@ -2634,12 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
if (!kobj || !dd)
return -EINVAL;
- sysfs_remove_file(kobj, &dev_attr_registers.attr);
sysfs_remove_file(kobj, &dev_attr_status.attr);
return 0;
}
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+ if (!dfs_parent)
+ return -1;
+
+ dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+ if (IS_ERR_OR_NULL(dd->dfs_node)) {
+ dev_warn(&dd->pdev->dev,
+ "Error creating node %s under debugfs\n",
+ dd->disk->disk_name);
+ dd->dfs_node = NULL;
+ return -1;
+ }
+
+ debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+ &mtip_flags_fops);
+ debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+ &mtip_regs_fops);
+
+ return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+ debugfs_remove_recursive(dd->dfs_node);
+}
+
+
/*
* Perform any init/resume time hardware setup
*
@@ -3634,7 +3753,10 @@ skip_create_disk:
set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
blk_queue_physical_block_size(dd->queue, 4096);
+ blk_queue_max_hw_sectors(dd->queue, 0xffff);
+ blk_queue_max_segment_size(dd->queue, 0x400000);
blk_queue_io_min(dd->queue, 4096);
+
/*
* write back cache is not supported in the device. FUA depends on
* write back cache support, hence setting flush support to zero.
@@ -3662,6 +3784,7 @@ skip_create_disk:
mtip_hw_sysfs_init(dd, kobj);
kobject_put(kobj);
}
+ mtip_hw_debugfs_init(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3687,6 +3810,8 @@ start_service_thread:
return rv;
kthread_run_error:
+ mtip_hw_debugfs_exit(dd);
+
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
@@ -3737,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
kobject_put(kobj);
}
}
+ mtip_hw_debugfs_exit(dd);
/*
* Delete our gendisk structure. This also removes the device
@@ -4084,10 +4210,20 @@ static int __init mtip_init(void)
}
mtip_major = error;
+ if (!dfs_parent) {
+ dfs_parent = debugfs_create_dir("rssd", NULL);
+ if (IS_ERR_OR_NULL(dfs_parent)) {
+ printk(KERN_WARNING "Error creating debugfs parent\n");
+ dfs_parent = NULL;
+ }
+ }
+
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
- if (error)
+ if (error) {
+ debugfs_remove(dfs_parent);
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+ }
return error;
}
@@ -4104,6 +4240,8 @@ static int __init mtip_init(void)
*/
static void __exit mtip_exit(void)
{
+ debugfs_remove_recursive(dfs_parent);
+
/* Release the allocated major block device number. */
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 4ef5833..f51fc23 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -26,7 +26,6 @@
#include <linux/ata.h>
#include <linux/interrupt.h>
#include <linux/genhd.h>
-#include <linux/version.h>
/* Offset of Subsystem Device ID in pci confoguration space */
#define PCI_SUBSYSTEM_DEVICEID 0x2E
@@ -111,35 +110,39 @@
#define dbg_printk(format, arg...)
#endif
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
#define __force_bit2int (unsigned int __force)
-/* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
-#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
-#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
-#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
-#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+enum {
+ /* below are bit numbers in 'flags' defined in mtip_port */
+ MTIP_PF_IC_ACTIVE_BIT = 0, /* pio/ioctl */
+ MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
+ MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
+ MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
+ MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
(1 << MTIP_PF_EH_ACTIVE_BIT) | \
(1 << MTIP_PF_SE_ACTIVE_BIT) | \
- (1 << MTIP_PF_DM_ACTIVE_BIT))
-
-#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
-#define MTIP_PF_ISSUE_CMDS_BIT 5
-#define MTIP_PF_REBUILD_BIT 6
-#define MTIP_PF_SVC_THD_STOP_BIT 8
-
-/* below are bit numbers in 'dd_flag' defined in driver_data */
-#define MTIP_DDF_REMOVE_PENDING_BIT 1
-#define MTIP_DDF_OVER_TEMP_BIT 2
-#define MTIP_DDF_WRITE_PROTECT_BIT 3
-#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+ (1 << MTIP_PF_DM_ACTIVE_BIT)),
+
+ MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
+ MTIP_PF_ISSUE_CMDS_BIT = 5,
+ MTIP_PF_REBUILD_BIT = 6,
+ MTIP_PF_SVC_THD_STOP_BIT = 8,
+
+ /* below are bit numbers in 'dd_flag' defined in driver_data */
+ MTIP_DDF_REMOVE_PENDING_BIT = 1,
+ MTIP_DDF_OVER_TEMP_BIT = 2,
+ MTIP_DDF_WRITE_PROTECT_BIT = 3,
+ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
(1 << MTIP_DDF_OVER_TEMP_BIT) | \
- (1 << MTIP_DDF_WRITE_PROTECT_BIT))
+ (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
-#define MTIP_DDF_CLEANUP_BIT 5
-#define MTIP_DDF_RESUME_BIT 6
-#define MTIP_DDF_INIT_DONE_BIT 7
-#define MTIP_DDF_REBUILD_FAILED_BIT 8
+ MTIP_DDF_CLEANUP_BIT = 5,
+ MTIP_DDF_RESUME_BIT = 6,
+ MTIP_DDF_INIT_DONE_BIT = 7,
+ MTIP_DDF_REBUILD_FAILED_BIT = 8,
+};
__packed struct smart_attr{
u8 attr_id;
@@ -445,6 +448,8 @@ struct driver_data {
unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+ struct dentry *dfs_node;
};
#endif
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 013c7a5..8f428a8 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -141,7 +141,7 @@ struct rbd_request {
struct rbd_snap {
struct device dev;
const char *name;
- size_t size;
+ u64 size;
struct list_head node;
u64 id;
};
@@ -175,8 +175,7 @@ struct rbd_device {
/* protects updating the header */
struct rw_semaphore header_rwsem;
char snap_name[RBD_MAX_SNAP_NAME_LEN];
- u32 cur_snap; /* index+1 of current snapshot within snap context
- 0 - for the head */
+ u64 snap_id; /* current snapshot id */
int read_only;
struct list_head node;
@@ -241,7 +240,7 @@ static void rbd_put_dev(struct rbd_device *rbd_dev)
put_device(&rbd_dev->dev);
}
-static int __rbd_update_snaps(struct rbd_device *rbd_dev);
+static int __rbd_refresh_header(struct rbd_device *rbd_dev);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -450,7 +449,9 @@ static void rbd_client_release(struct kref *kref)
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
dout("rbd_release_client %p\n", rbdc);
+ spin_lock(&rbd_client_list_lock);
list_del(&rbdc->node);
+ spin_unlock(&rbd_client_list_lock);
ceph_destroy_client(rbdc->client);
kfree(rbdc->rbd_opts);
@@ -463,9 +464,7 @@ static void rbd_client_release(struct kref *kref)
*/
static void rbd_put_client(struct rbd_device *rbd_dev)
{
- spin_lock(&rbd_client_list_lock);
kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
- spin_unlock(&rbd_client_list_lock);
rbd_dev->rbd_client = NULL;
}
@@ -487,18 +486,20 @@ static void rbd_coll_release(struct kref *kref)
*/
static int rbd_header_from_disk(struct rbd_image_header *header,
struct rbd_image_header_ondisk *ondisk,
- int allocated_snaps,
+ u32 allocated_snaps,
gfp_t gfp_flags)
{
- int i;
- u32 snap_count;
+ u32 i, snap_count;
if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT)))
return -ENXIO;
snap_count = le32_to_cpu(ondisk->snap_count);
+ if (snap_count > (UINT_MAX - sizeof(struct ceph_snap_context))
+ / sizeof (*ondisk))
+ return -EINVAL;
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
- snap_count * sizeof (*ondisk),
+ snap_count * sizeof(u64),
gfp_flags);
if (!header->snapc)
return -ENOMEM;
@@ -506,11 +507,11 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
if (snap_count) {
header->snap_names = kmalloc(header->snap_names_len,
- GFP_KERNEL);
+ gfp_flags);
if (!header->snap_names)
goto err_snapc;
header->snap_sizes = kmalloc(snap_count * sizeof(u64),
- GFP_KERNEL);
+ gfp_flags);
if (!header->snap_sizes)
goto err_names;
} else {
@@ -552,21 +553,6 @@ err_snapc:
return -ENOMEM;
}
-static int snap_index(struct rbd_image_header *header, int snap_num)
-{
- return header->total_snaps - snap_num;
-}
-
-static u64 cur_snap_id(struct rbd_device *rbd_dev)
-{
- struct rbd_image_header *header = &rbd_dev->header;
-
- if (!rbd_dev->cur_snap)
- return 0;
-
- return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)];
-}
-
static int snap_by_name(struct rbd_image_header *header, const char *snap_name,
u64 *seq, u64 *size)
{
@@ -605,7 +591,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
snapc->seq = header->snap_seq;
else
snapc->seq = 0;
- dev->cur_snap = 0;
+ dev->snap_id = CEPH_NOSNAP;
dev->read_only = 0;
if (size)
*size = header->image_size;
@@ -613,8 +599,7 @@ static int rbd_header_set_snap(struct rbd_device *dev, u64 *size)
ret = snap_by_name(header, dev->snap_name, &snapc->seq, size);
if (ret < 0)
goto done;
-
- dev->cur_snap = header->total_snaps - ret;
+ dev->snap_id = snapc->seq;
dev->read_only = 1;
}
@@ -935,7 +920,6 @@ static int rbd_do_request(struct request *rq,
layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
layout->fl_stripe_count = cpu_to_le32(1);
layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
- layout->fl_pg_preferred = cpu_to_le32(-1);
layout->fl_pg_pool = cpu_to_le32(dev->poolid);
ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
req, ops);
@@ -993,7 +977,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
op = (void *)(replyhead + 1);
rc = le32_to_cpu(replyhead->result);
bytes = le64_to_cpu(op->extent.length);
- read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ);
+ read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
@@ -1168,7 +1152,7 @@ static int rbd_req_read(struct request *rq,
int coll_index)
{
return rbd_do_op(rq, rbd_dev, NULL,
- (snapid ? snapid : CEPH_NOSNAP),
+ snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
2,
@@ -1187,7 +1171,7 @@ static int rbd_req_sync_read(struct rbd_device *dev,
u64 *ver)
{
return rbd_req_sync_op(dev, NULL,
- (snapid ? snapid : CEPH_NOSNAP),
+ snapid,
CEPH_OSD_OP_READ,
CEPH_OSD_FLAG_READ,
NULL,
@@ -1238,7 +1222,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name,
notify_id, (int)opcode);
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(dev);
+ rc = __rbd_refresh_header(dev);
mutex_unlock(&ctl_mutex);
if (rc)
pr_warning(RBD_DRV_NAME "%d got notification but failed to "
@@ -1521,7 +1505,7 @@ static void rbd_rq_fn(struct request_queue *q)
coll, cur_seg);
else
rbd_req_read(rq, rbd_dev,
- cur_snap_id(rbd_dev),
+ rbd_dev->snap_id,
ofs,
op_size, bio,
coll, cur_seg);
@@ -1592,7 +1576,7 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
{
ssize_t rc;
struct rbd_image_header_ondisk *dh;
- int snap_count = 0;
+ u32 snap_count = 0;
u64 ver;
size_t len;
@@ -1656,7 +1640,7 @@ static int rbd_header_add_snap(struct rbd_device *dev,
struct ceph_mon_client *monc;
/* we should create a snapshot only if we're pointing at the head */
- if (dev->cur_snap)
+ if (dev->snap_id != CEPH_NOSNAP)
return -EINVAL;
monc = &dev->rbd_client->client->monc;
@@ -1683,7 +1667,9 @@ static int rbd_header_add_snap(struct rbd_device *dev,
if (ret < 0)
return ret;
- dev->header.snapc->seq = new_snapid;
+ down_write(&dev->header_rwsem);
+ dev->header.snapc->seq = new_snapid;
+ up_write(&dev->header_rwsem);
return 0;
bad:
@@ -1703,7 +1689,7 @@ static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
/*
* only read the first part of the ondisk header, without the snaps info
*/
-static int __rbd_update_snaps(struct rbd_device *rbd_dev)
+static int __rbd_refresh_header(struct rbd_device *rbd_dev)
{
int ret;
struct rbd_image_header h;
@@ -1890,7 +1876,7 @@ static ssize_t rbd_image_refresh(struct device *dev,
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(rbd_dev);
+ rc = __rbd_refresh_header(rbd_dev);
if (rc < 0)
ret = rc;
@@ -1949,7 +1935,7 @@ static ssize_t rbd_snap_size_show(struct device *dev,
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%zd\n", snap->size);
+ return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
}
static ssize_t rbd_snap_id_show(struct device *dev,
@@ -1958,7 +1944,7 @@ static ssize_t rbd_snap_id_show(struct device *dev,
{
struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
- return sprintf(buf, "%llu\n", (unsigned long long) snap->id);
+ return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
}
static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
@@ -2173,7 +2159,7 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
rbd_dev->header.obj_version);
if (ret == -ERANGE) {
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
- rc = __rbd_update_snaps(rbd_dev);
+ rc = __rbd_refresh_header(rbd_dev);
mutex_unlock(&ctl_mutex);
if (rc < 0)
return rc;
@@ -2558,7 +2544,7 @@ static ssize_t rbd_snap_add(struct device *dev,
if (ret < 0)
goto err_unlock;
- ret = __rbd_update_snaps(rbd_dev);
+ ret = __rbd_refresh_header(rbd_dev);
if (ret < 0)
goto err_unlock;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index aa27120..9a72277 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
}
}
+struct mm_plug_cb {
+ struct blk_plug_cb cb;
+ struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+ struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+ spin_lock_irq(&mmcb->card->lock);
+ activate(mmcb->card);
+ spin_unlock_irq(&mmcb->card->lock);
+ kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+ struct blk_plug *plug = current->plug;
+ struct mm_plug_cb *mmcb;
+
+ if (!plug)
+ return 0;
+
+ list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+ if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+ return 1;
+ }
+ /* Not currently on the callback list */
+ mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+ if (!mmcb)
+ return 0;
+
+ mmcb->card = card;
+ mmcb->cb.callback = mm_unplug;
+ list_add(&mmcb->cb.list, &plug->cb_list);
+ return 1;
+}
+
static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
+ if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+ activate(card);
spin_unlock_irq(&card->lock);
return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 773cf27..9ad3b5e 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 4e86393..e4fb337 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
return free;
}
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
unsigned long id)
{
+ if (info->shadow[id].req.u.rw.id != id)
+ return -EINVAL;
+ if (info->shadow[id].request == NULL)
+ return -EINVAL;
info->shadow[id].req.u.rw.id = info->shadow_free;
info->shadow[id].request = NULL;
info->shadow_free = id;
+ return 0;
}
+static const char *op_name(int op)
+{
+ static const char *const names[] = {
+ [BLKIF_OP_READ] = "read",
+ [BLKIF_OP_WRITE] = "write",
+ [BLKIF_OP_WRITE_BARRIER] = "barrier",
+ [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+ [BLKIF_OP_DISCARD] = "discard" };
+
+ if (op < 0 || op >= ARRAY_SIZE(names))
+ return "unknown";
+
+ if (!names[op])
+ return "reserved";
+
+ return names[op];
+}
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
{
unsigned int end = minor + nr;
@@ -526,6 +548,14 @@ static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
return 0;
}
+static char *encode_disk_name(char *ptr, unsigned int n)
+{
+ if (n >= 26)
+ ptr = encode_disk_name(ptr, n / 26 - 1);
+ *ptr = 'a' + n % 26;
+ return ptr + 1;
+}
+
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
struct blkfront_info *info,
u16 vdisk_info, u16 sector_size)
@@ -536,6 +566,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
unsigned int offset;
int minor;
int nr_parts;
+ char *ptr;
BUG_ON(info->gd != NULL);
BUG_ON(info->rq != NULL);
@@ -560,7 +591,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
"emulated IDE disks,\n\t choose an xvd device name"
"from xvde on\n", info->vdevice);
}
- err = -ENODEV;
+ if (minor >> MINORBITS) {
+ pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
+ info->vdevice, minor);
+ return -ENODEV;
+ }
if ((minor % nr_parts) == 0)
nr_minors = nr_parts;
@@ -574,23 +609,14 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
if (gd == NULL)
goto release;
- if (nr_minors > 1) {
- if (offset < 26)
- sprintf(gd->disk_name, "%s%c", DEV_NAME, 'a' + offset);
- else
- sprintf(gd->disk_name, "%s%c%c", DEV_NAME,
- 'a' + ((offset / 26)-1), 'a' + (offset % 26));
- } else {
- if (offset < 26)
- sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
- 'a' + offset,
- minor & (nr_parts - 1));
- else
- sprintf(gd->disk_name, "%s%c%c%d", DEV_NAME,
- 'a' + ((offset / 26) - 1),
- 'a' + (offset % 26),
- minor & (nr_parts - 1));
- }
+ strcpy(gd->disk_name, DEV_NAME);
+ ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
+ BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
+ if (nr_minors > 1)
+ *ptr = 0;
+ else
+ snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
+ "%d", minor & (nr_parts - 1));
gd->major = XENVBD_MAJOR;
gd->first_minor = minor;
@@ -742,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
+ /*
+ * The backend has messed up and given us an id that we would
+ * never have given to it (we stamp it up to BLK_RING_SIZE -
+ * look in get_id_from_freelist.
+ */
+ if (id >= BLK_RING_SIZE) {
+ WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+ info->gd->disk_name, op_name(bret->operation), id);
+ /* We can't safely get the 'struct request' as
+ * the id is busted. */
+ continue;
+ }
req = info->shadow[id].request;
if (bret->operation != BLKIF_OP_DISCARD)
blkif_completion(&info->shadow[id]);
- add_id_to_freelist(info, id);
+ if (add_id_to_freelist(info, id)) {
+ WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+ info->gd->disk_name, op_name(bret->operation), id);
+ continue;
+ }
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
- printk(KERN_WARNING "blkfront: %s: discard op failed\n",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
@@ -767,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
- printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : "flush disk cache",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
- printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : "flush disk cache",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
}
if (unlikely(error)) {
@@ -1496,7 +1534,9 @@ module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
- return xenbus_unregister_driver(&blkfront_driver);
+ xenbus_unregister_driver(&blkfront_driver);
+ unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
+ kfree(minors);
}
module_exit(xlblk_exit);
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 5ccf142..e9f203e 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -81,6 +81,18 @@ config BT_HCIUART_LL
Say Y here to compile support for HCILL protocol.
+config BT_HCIUART_3WIRE
+ bool "Three-wire UART (H5) protocol support"
+ depends on BT_HCIUART
+ help
+ The HCI Three-wire UART Transport Layer makes it possible to
+ user the Bluetooth HCI over a serial port interface. The HCI
+ Three-wire UART Transport Layer assumes that the UART
+ communication may have bit errors, overrun errors or burst
+ errors and thereby making CTS/RTS lines unnecessary.
+
+ Say Y here to compile support for Three-wire UART protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index f4460f4..4afae20 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -28,4 +28,5 @@ hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o
hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
+hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-objs := $(hci_uart-y)
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index ad591bd..10308cd 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -63,6 +63,7 @@ static struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0CF3, 0x3002) },
+ { USB_DEVICE(0x0CF3, 0xE019) },
{ USB_DEVICE(0x13d3, 0x3304) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0489, 0xE03D) },
@@ -77,6 +78,7 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x04CA, 0x3005) },
{ USB_DEVICE(0x13d3, 0x3362) },
{ USB_DEVICE(0x0CF3, 0xE004) },
+ { USB_DEVICE(0x0930, 0x0219) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -101,6 +103,7 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 1fcd923..66c3a67 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
- register unsigned int offset;
- register unsigned char command;
- register unsigned long ready_bit;
+ unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int offset;
+ unsigned char command;
+ unsigned long ready_bit;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -621,7 +621,6 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
static int bluecard_hci_open(struct hci_dev *hdev)
{
bluecard_info_t *info = hci_get_drvdata(hdev);
- unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
@@ -630,6 +629,8 @@ static int bluecard_hci_open(struct hci_dev *hdev)
return 0;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
+ unsigned int iobase = info->p_dev->resource[0]->start;
+
/* Enable LED */
outb(0x08 | 0x20, iobase + 0x30);
}
@@ -641,7 +642,6 @@ static int bluecard_hci_open(struct hci_dev *hdev)
static int bluecard_hci_close(struct hci_dev *hdev)
{
bluecard_info_t *info = hci_get_drvdata(hdev);
- unsigned int iobase = info->p_dev->resource[0]->start;
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
@@ -649,6 +649,8 @@ static int bluecard_hci_close(struct hci_dev *hdev)
bluecard_hci_flush(hdev);
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
+ unsigned int iobase = info->p_dev->resource[0]->start;
+
/* Disable LED */
outb(0x00, iobase + 0x30);
}
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 609861a..29caaed 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
hdev->flush = bpa10x_flush;
hdev->send = bpa10x_send_frame;
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 308c859..8925b6d 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
return;
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
if (!pcmcia_dev_present(info->p_dev))
break;
@@ -664,7 +664,7 @@ static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
{
int *try = priv_data;
- if (try == 0)
+ if (!try)
p_dev->io_lines = 16;
if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
diff --git a/drivers/bluetooth/btmrvl_drv.h b/drivers/bluetooth/btmrvl_drv.h
index 94f2d65..27068d1 100644
--- a/drivers/bluetooth/btmrvl_drv.h
+++ b/drivers/bluetooth/btmrvl_drv.h
@@ -136,7 +136,7 @@ int btmrvl_remove_card(struct btmrvl_private *priv);
void btmrvl_interrupt(struct btmrvl_private *priv);
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb);
int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, int subcmd);
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 681ca9d..3a4343b 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -44,23 +44,35 @@ void btmrvl_interrupt(struct btmrvl_private *priv)
}
EXPORT_SYMBOL_GPL(btmrvl_interrupt);
-void btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
+bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
- struct hci_ev_cmd_complete *ec;
- u16 opcode, ocf;
if (hdr->evt == HCI_EV_CMD_COMPLETE) {
+ struct hci_ev_cmd_complete *ec;
+ u16 opcode, ocf, ogf;
+
ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
opcode = __le16_to_cpu(ec->opcode);
ocf = hci_opcode_ocf(opcode);
+ ogf = hci_opcode_ogf(opcode);
+
if (ocf == BT_CMD_MODULE_CFG_REQ &&
priv->btmrvl_dev.sendcmdflag) {
priv->btmrvl_dev.sendcmdflag = false;
priv->adapter->cmd_complete = true;
wake_up_interruptible(&priv->adapter->cmd_wait_q);
}
+
+ if (ogf == OGF) {
+ BT_DBG("vendor event skipped: ogf 0x%4.4x ocf 0x%4.4x",
+ ogf, ocf);
+ kfree_skb(skb);
+ return false;
+ }
}
+
+ return true;
}
EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt);
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index a853244..6a9e971 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -110,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8787 Bluetooth AMP device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
/* Marvell SD8797 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
@@ -562,10 +565,13 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
skb_put(skb, buf_len);
skb_pull(skb, SDIO_HEADER_LEN);
- if (type == HCI_EVENT_PKT)
- btmrvl_check_evtpkt(priv, skb);
+ if (type == HCI_EVENT_PKT) {
+ if (btmrvl_check_evtpkt(priv, skb))
+ hci_recv_frame(skb);
+ } else {
+ hci_recv_frame(skb);
+ }
- hci_recv_frame(skb);
hdev->stat.byte_rx += buf_len;
break;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index c4fc2f3..21e803a 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -593,7 +593,7 @@ static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
{
int *try = priv_data;
- if (try == 0)
+ if (!try)
p_dev->io_lines = 16;
if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c9463af..e272214 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,15 +21,7 @@
*
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-
#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
@@ -125,6 +117,7 @@ static struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
@@ -139,6 +132,7 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -1026,7 +1020,7 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc = usb_ifnum_to_if(data->udev, 1);
if (!reset)
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
if (!disable_scofix)
@@ -1038,7 +1032,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
}
if (id->driver_info & BTUSB_CSR) {
@@ -1046,7 +1040,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Old firmware would otherwise execute USB reset */
if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
}
if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 6e8d961..97a7784 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -586,29 +586,31 @@ static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
static int dtl1_config(struct pcmcia_device *link)
{
dtl1_info_t *info = link->priv;
- int i;
+ int ret;
/* Look for a generic full-sized window */
link->resource[0]->end = 8;
- if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
+ ret = pcmcia_loop_config(link, dtl1_confcheck, NULL);
+ if (ret)
goto failed;
- i = pcmcia_request_irq(link, dtl1_interrupt);
- if (i != 0)
+ ret = pcmcia_request_irq(link, dtl1_interrupt);
+ if (ret)
goto failed;
- i = pcmcia_enable_device(link);
- if (i != 0)
+ ret = pcmcia_enable_device(link);
+ if (ret)
goto failed;
- if (dtl1_open(info) != 0)
+ ret = dtl1_open(info);
+ if (ret)
goto failed;
return 0;
failed:
dtl1_detach(link);
- return -ENODEV;
+ return ret;
}
static const struct pcmcia_device_id dtl1_ids[] = {
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 661a8dc..57e502e 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
static int bcsp_recv(struct hci_uart *hu, void *data, int count)
{
struct bcsp_struct *bcsp = hu->priv;
- register unsigned char *ptr;
+ unsigned char *ptr;
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 7483294..c60623f 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
static inline int h4_check_data_len(struct h4_struct *h4, int len)
{
- register int room = skb_tailroom(h4->rx_skb);
+ int room = skb_tailroom(h4->rx_skb);
BT_DBG("len %d room %d", len, room);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
new file mode 100644
index 0000000..b6154d5
--- /dev/null
+++ b/drivers/bluetooth/hci_h5.c
@@ -0,0 +1,747 @@
+/*
+ *
+ * Bluetooth HCI Three-wire UART driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+#define HCI_3WIRE_ACK_PKT 0
+#define HCI_3WIRE_LINK_PKT 15
+
+/* Sliding window size */
+#define H5_TX_WIN_MAX 4
+
+#define H5_ACK_TIMEOUT msecs_to_jiffies(250)
+#define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
+
+/*
+ * Maximum Three-wire packet:
+ * 4 byte header + max value for 12-bit length + 2 bytes for CRC
+ */
+#define H5_MAX_LEN (4 + 0xfff + 2)
+
+/* Convenience macros for reading Three-wire header values */
+#define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
+#define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
+#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
+#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
+#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
+#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
+
+#define SLIP_DELIMITER 0xc0
+#define SLIP_ESC 0xdb
+#define SLIP_ESC_DELIM 0xdc
+#define SLIP_ESC_ESC 0xdd
+
+/* H5 state flags */
+enum {
+ H5_RX_ESC, /* SLIP escape mode */
+ H5_TX_ACK_REQ, /* Pending ack to send */
+};
+
+struct h5 {
+ struct sk_buff_head unack; /* Unack'ed packets queue */
+ struct sk_buff_head rel; /* Reliable packets queue */
+ struct sk_buff_head unrel; /* Unreliable packets queue */
+
+ unsigned long flags;
+
+ struct sk_buff *rx_skb; /* Receive buffer */
+ size_t rx_pending; /* Expecting more bytes */
+ u8 rx_ack; /* Last ack number received */
+
+ int (*rx_func) (struct hci_uart *hu, u8 c);
+
+ struct timer_list timer; /* Retransmission timer */
+
+ u8 tx_seq; /* Next seq number to send */
+ u8 tx_ack; /* Next ack number to send */
+ u8 tx_win; /* Sliding window size */
+
+ enum {
+ H5_UNINITIALIZED,
+ H5_INITIALIZED,
+ H5_ACTIVE,
+ } state;
+
+ enum {
+ H5_AWAKE,
+ H5_SLEEPING,
+ H5_WAKING_UP,
+ } sleep;
+};
+
+static void h5_reset_rx(struct h5 *h5);
+
+static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
+{
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *nskb;
+
+ nskb = alloc_skb(3, GFP_ATOMIC);
+ if (!nskb)
+ return;
+
+ bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT;
+
+ memcpy(skb_put(nskb, len), data, len);
+
+ skb_queue_tail(&h5->unrel, nskb);
+}
+
+static u8 h5_cfg_field(struct h5 *h5)
+{
+ u8 field = 0;
+
+ /* Sliding window size (first 3 bits) */
+ field |= (h5->tx_win & 7);
+
+ return field;
+}
+
+static void h5_timed_event(unsigned long arg)
+{
+ const unsigned char sync_req[] = { 0x01, 0x7e };
+ unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
+ struct hci_uart *hu = (struct hci_uart *) arg;
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ BT_DBG("%s", hu->hdev->name);
+
+ if (h5->state == H5_UNINITIALIZED)
+ h5_link_control(hu, sync_req, sizeof(sync_req));
+
+ if (h5->state == H5_INITIALIZED) {
+ conf_req[2] = h5_cfg_field(h5);
+ h5_link_control(hu, conf_req, sizeof(conf_req));
+ }
+
+ if (h5->state != H5_ACTIVE) {
+ mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+ goto wakeup;
+ }
+
+ if (h5->sleep != H5_AWAKE) {
+ h5->sleep = H5_SLEEPING;
+ goto wakeup;
+ }
+
+ BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
+
+ spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
+
+ while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
+ h5->tx_seq = (h5->tx_seq - 1) & 0x07;
+ skb_queue_head(&h5->rel, skb);
+ }
+
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+
+wakeup:
+ hci_uart_tx_wakeup(hu);
+}
+
+static int h5_open(struct hci_uart *hu)
+{
+ struct h5 *h5;
+ const unsigned char sync[] = { 0x01, 0x7e };
+
+ BT_DBG("hu %p", hu);
+
+ h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+
+ hu->priv = h5;
+
+ skb_queue_head_init(&h5->unack);
+ skb_queue_head_init(&h5->rel);
+ skb_queue_head_init(&h5->unrel);
+
+ h5_reset_rx(h5);
+
+ init_timer(&h5->timer);
+ h5->timer.function = h5_timed_event;
+ h5->timer.data = (unsigned long) hu;
+
+ h5->tx_win = H5_TX_WIN_MAX;
+
+ set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
+
+ /* Send initial sync request */
+ h5_link_control(hu, sync, sizeof(sync));
+ mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+
+ return 0;
+}
+
+static int h5_close(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+
+ skb_queue_purge(&h5->unack);
+ skb_queue_purge(&h5->rel);
+ skb_queue_purge(&h5->unrel);
+
+ del_timer(&h5->timer);
+
+ kfree(h5);
+
+ return 0;
+}
+
+static void h5_pkt_cull(struct h5 *h5)
+{
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ int i, to_remove;
+ u8 seq;
+
+ spin_lock_irqsave(&h5->unack.lock, flags);
+
+ to_remove = skb_queue_len(&h5->unack);
+ if (to_remove == 0)
+ goto unlock;
+
+ seq = h5->tx_seq;
+
+ while (to_remove > 0) {
+ if (h5->rx_ack == seq)
+ break;
+
+ to_remove--;
+ seq = (seq - 1) % 8;
+ }
+
+ if (seq != h5->rx_ack)
+ BT_ERR("Controller acked invalid packet");
+
+ i = 0;
+ skb_queue_walk_safe(&h5->unack, skb, tmp) {
+ if (i++ >= to_remove)
+ break;
+
+ __skb_unlink(skb, &h5->unack);
+ kfree_skb(skb);
+ }
+
+ if (skb_queue_empty(&h5->unack))
+ del_timer(&h5->timer);
+
+unlock:
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+}
+
+static void h5_handle_internal_rx(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char sync_req[] = { 0x01, 0x7e };
+ const unsigned char sync_rsp[] = { 0x02, 0x7d };
+ unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
+ const unsigned char conf_rsp[] = { 0x04, 0x7b };
+ const unsigned char wakeup_req[] = { 0x05, 0xfa };
+ const unsigned char woken_req[] = { 0x06, 0xf9 };
+ const unsigned char sleep_req[] = { 0x07, 0x78 };
+ const unsigned char *hdr = h5->rx_skb->data;
+ const unsigned char *data = &h5->rx_skb->data[4];
+
+ BT_DBG("%s", hu->hdev->name);
+
+ if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
+ return;
+
+ if (H5_HDR_LEN(hdr) < 2)
+ return;
+
+ conf_req[2] = h5_cfg_field(h5);
+
+ if (memcmp(data, sync_req, 2) == 0) {
+ h5_link_control(hu, sync_rsp, 2);
+ } else if (memcmp(data, sync_rsp, 2) == 0) {
+ h5->state = H5_INITIALIZED;
+ h5_link_control(hu, conf_req, 3);
+ } else if (memcmp(data, conf_req, 2) == 0) {
+ h5_link_control(hu, conf_rsp, 2);
+ h5_link_control(hu, conf_req, 3);
+ } else if (memcmp(data, conf_rsp, 2) == 0) {
+ if (H5_HDR_LEN(hdr) > 2)
+ h5->tx_win = (data[2] & 7);
+ BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
+ h5->state = H5_ACTIVE;
+ hci_uart_init_ready(hu);
+ return;
+ } else if (memcmp(data, sleep_req, 2) == 0) {
+ BT_DBG("Peer went to sleep");
+ h5->sleep = H5_SLEEPING;
+ return;
+ } else if (memcmp(data, woken_req, 2) == 0) {
+ BT_DBG("Peer woke up");
+ h5->sleep = H5_AWAKE;
+ } else if (memcmp(data, wakeup_req, 2) == 0) {
+ BT_DBG("Peer requested wakeup");
+ h5_link_control(hu, woken_req, 2);
+ h5->sleep = H5_AWAKE;
+ } else {
+ BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
+ return;
+ }
+
+ hci_uart_tx_wakeup(hu);
+}
+
+static void h5_complete_rx_pkt(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ if (H5_HDR_RELIABLE(hdr)) {
+ h5->tx_ack = (h5->tx_ack + 1) % 8;
+ set_bit(H5_TX_ACK_REQ, &h5->flags);
+ hci_uart_tx_wakeup(hu);
+ }
+
+ h5->rx_ack = H5_HDR_ACK(hdr);
+
+ h5_pkt_cull(h5);
+
+ switch (H5_HDR_PKT_TYPE(hdr)) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
+
+ /* Remove Three-wire header */
+ skb_pull(h5->rx_skb, 4);
+
+ hci_recv_frame(h5->rx_skb);
+ h5->rx_skb = NULL;
+
+ break;
+
+ default:
+ h5_handle_internal_rx(hu);
+ break;
+ }
+
+ h5_reset_rx(h5);
+}
+
+static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ h5_complete_rx_pkt(hu);
+ h5_reset_rx(h5);
+
+ return 0;
+}
+
+static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ if (H5_HDR_CRC(hdr)) {
+ h5->rx_func = h5_rx_crc;
+ h5->rx_pending = 2;
+ } else {
+ h5_complete_rx_pkt(hu);
+ h5_reset_rx(h5);
+ }
+
+ return 0;
+}
+
+static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
+ hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
+ H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
+ H5_HDR_LEN(hdr));
+
+ if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
+ BT_ERR("Invalid header checksum");
+ h5_reset_rx(h5);
+ return 0;
+ }
+
+ if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
+ BT_ERR("Out-of-order packet arrived (%u != %u)",
+ H5_HDR_SEQ(hdr), h5->tx_ack);
+ h5_reset_rx(h5);
+ return 0;
+ }
+
+ if (h5->state != H5_ACTIVE &&
+ H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
+ BT_ERR("Non-link packet received in non-active state");
+ h5_reset_rx(h5);
+ }
+
+ h5->rx_func = h5_rx_payload;
+ h5->rx_pending = H5_HDR_LEN(hdr);
+
+ return 0;
+}
+
+static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (c == SLIP_DELIMITER)
+ return 1;
+
+ h5->rx_func = h5_rx_3wire_hdr;
+ h5->rx_pending = 4;
+
+ h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
+ if (!h5->rx_skb) {
+ BT_ERR("Can't allocate mem for new packet");
+ h5_reset_rx(h5);
+ return -ENOMEM;
+ }
+
+ h5->rx_skb->dev = (void *) hu->hdev;
+
+ return 0;
+}
+
+static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (c == SLIP_DELIMITER)
+ h5->rx_func = h5_rx_pkt_start;
+
+ return 1;
+}
+
+static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
+{
+ const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
+ const u8 *byte = &c;
+
+ if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
+ set_bit(H5_RX_ESC, &h5->flags);
+ return;
+ }
+
+ if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
+ switch (c) {
+ case SLIP_ESC_DELIM:
+ byte = &delim;
+ break;
+ case SLIP_ESC_ESC:
+ byte = &esc;
+ break;
+ default:
+ BT_ERR("Invalid esc byte 0x%02hhx", c);
+ h5_reset_rx(h5);
+ return;
+ }
+ }
+
+ memcpy(skb_put(h5->rx_skb, 1), byte, 1);
+ h5->rx_pending--;
+
+ BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
+}
+
+static void h5_reset_rx(struct h5 *h5)
+{
+ if (h5->rx_skb) {
+ kfree_skb(h5->rx_skb);
+ h5->rx_skb = NULL;
+ }
+
+ h5->rx_func = h5_rx_delimiter;
+ h5->rx_pending = 0;
+ clear_bit(H5_RX_ESC, &h5->flags);
+}
+
+static int h5_recv(struct hci_uart *hu, void *data, int count)
+{
+ struct h5 *h5 = hu->priv;
+ unsigned char *ptr = data;
+
+ BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
+ count);
+
+ while (count > 0) {
+ int processed;
+
+ if (h5->rx_pending > 0) {
+ if (*ptr == SLIP_DELIMITER) {
+ BT_ERR("Too short H5 packet");
+ h5_reset_rx(h5);
+ continue;
+ }
+
+ h5_unslip_one_byte(h5, *ptr);
+
+ ptr++; count--;
+ continue;
+ }
+
+ processed = h5->rx_func(hu, *ptr);
+ if (processed < 0)
+ return processed;
+
+ ptr += processed;
+ count -= processed;
+ }
+
+ return 0;
+}
+
+static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (skb->len > 0xfff) {
+ BT_ERR("Packet too long (%u bytes)", skb->len);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (h5->state != H5_ACTIVE) {
+ BT_ERR("Ignoring HCI data in non-active state");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_ACLDATA_PKT:
+ case HCI_COMMAND_PKT:
+ skb_queue_tail(&h5->rel, skb);
+ break;
+
+ case HCI_SCODATA_PKT:
+ skb_queue_tail(&h5->unrel, skb);
+ break;
+
+ default:
+ BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
+ kfree_skb(skb);
+ break;
+ }
+
+ return 0;
+}
+
+static void h5_slip_delim(struct sk_buff *skb)
+{
+ const char delim = SLIP_DELIMITER;
+
+ memcpy(skb_put(skb, 1), &delim, 1);
+}
+
+static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
+{
+ const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
+ const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
+
+ switch (c) {
+ case SLIP_DELIMITER:
+ memcpy(skb_put(skb, 2), &esc_delim, 2);
+ break;
+ case SLIP_ESC:
+ memcpy(skb_put(skb, 2), &esc_esc, 2);
+ break;
+ default:
+ memcpy(skb_put(skb, 1), &c, 1);
+ }
+}
+
+static bool valid_packet_type(u8 type)
+{
+ switch (type) {
+ case HCI_ACLDATA_PKT:
+ case HCI_COMMAND_PKT:
+ case HCI_SCODATA_PKT:
+ case HCI_3WIRE_LINK_PKT:
+ case HCI_3WIRE_ACK_PKT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
+ const u8 *data, size_t len)
+{
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *nskb;
+ u8 hdr[4];
+ int i;
+
+ if (!valid_packet_type(pkt_type)) {
+ BT_ERR("Unknown packet type %u", pkt_type);
+ return NULL;
+ }
+
+ /*
+ * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
+ * (because bytes 0xc0 and 0xdb are escaped, worst case is when
+ * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
+ * delimiters at start and end).
+ */
+ nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ bt_cb(nskb)->pkt_type = pkt_type;
+
+ h5_slip_delim(nskb);
+
+ hdr[0] = h5->tx_ack << 3;
+ clear_bit(H5_TX_ACK_REQ, &h5->flags);
+
+ /* Reliable packet? */
+ if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
+ hdr[0] |= 1 << 7;
+ hdr[0] |= h5->tx_seq;
+ h5->tx_seq = (h5->tx_seq + 1) % 8;
+ }
+
+ hdr[1] = pkt_type | ((len & 0x0f) << 4);
+ hdr[2] = len >> 4;
+ hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
+
+ BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
+ hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
+ H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
+ H5_HDR_LEN(hdr));
+
+ for (i = 0; i < 4; i++)
+ h5_slip_one_byte(nskb, hdr[i]);
+
+ for (i = 0; i < len; i++)
+ h5_slip_one_byte(nskb, data[i]);
+
+ h5_slip_delim(nskb);
+
+ return nskb;
+}
+
+static struct sk_buff *h5_dequeue(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ unsigned long flags;
+ struct sk_buff *skb, *nskb;
+
+ if (h5->sleep != H5_AWAKE) {
+ const unsigned char wakeup_req[] = { 0x05, 0xfa };
+
+ if (h5->sleep == H5_WAKING_UP)
+ return NULL;
+
+ h5->sleep = H5_WAKING_UP;
+ BT_DBG("Sending wakeup request");
+
+ mod_timer(&h5->timer, jiffies + HZ / 100);
+ return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
+ }
+
+ if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
+ nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
+ skb->data, skb->len);
+ if (nskb) {
+ kfree_skb(skb);
+ return nskb;
+ }
+
+ skb_queue_head(&h5->unrel, skb);
+ BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ }
+
+ spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
+
+ if (h5->unack.qlen >= h5->tx_win)
+ goto unlock;
+
+ if ((skb = skb_dequeue(&h5->rel)) != NULL) {
+ nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
+ skb->data, skb->len);
+ if (nskb) {
+ __skb_queue_tail(&h5->unack, skb);
+ mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+ return nskb;
+ }
+
+ skb_queue_head(&h5->rel, skb);
+ BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ }
+
+unlock:
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+
+ if (test_bit(H5_TX_ACK_REQ, &h5->flags))
+ return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
+
+ return NULL;
+}
+
+static int h5_flush(struct hci_uart *hu)
+{
+ BT_DBG("hu %p", hu);
+ return 0;
+}
+
+static struct hci_uart_proto h5p = {
+ .id = HCI_UART_3WIRE,
+ .open = h5_open,
+ .close = h5_close,
+ .recv = h5_recv,
+ .enqueue = h5_enqueue,
+ .dequeue = h5_dequeue,
+ .flush = h5_flush,
+};
+
+int __init h5_init(void)
+{
+ int err = hci_uart_register_proto(&h5p);
+
+ if (!err)
+ BT_INFO("HCI Three-wire UART (H5) protocol initialized");
+ else
+ BT_ERR("HCI Three-wire UART (H5) protocol init failed");
+
+ return err;
+}
+
+int __exit h5_deinit(void)
+{
+ return hci_uart_unregister_proto(&h5p);
+}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index e564579..74e0966 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -156,6 +156,35 @@ restart:
return 0;
}
+static void hci_uart_init_work(struct work_struct *work)
+{
+ struct hci_uart *hu = container_of(work, struct hci_uart, init_ready);
+ int err;
+
+ if (!test_and_clear_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return;
+
+ err = hci_register_dev(hu->hdev);
+ if (err < 0) {
+ BT_ERR("Can't register HCI device");
+ hci_free_dev(hu->hdev);
+ hu->hdev = NULL;
+ hu->proto->close(hu);
+ }
+
+ set_bit(HCI_UART_REGISTERED, &hu->flags);
+}
+
+int hci_uart_init_ready(struct hci_uart *hu)
+{
+ if (!test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return -EALREADY;
+
+ schedule_work(&hu->init_ready);
+
+ return 0;
+}
+
/* ------- Interface to HCI layer ------ */
/* Initialize device */
static int hci_uart_open(struct hci_dev *hdev)
@@ -264,6 +293,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
hu->tty = tty;
tty->receive_room = 65536;
+ INIT_WORK(&hu->init_ready, hci_uart_init_work);
+
spin_lock_init(&hu->rx_lock);
/* Flush any pending characters in the driver and line discipline. */
@@ -286,28 +317,30 @@ static int hci_uart_tty_open(struct tty_struct *tty)
static void hci_uart_tty_close(struct tty_struct *tty)
{
struct hci_uart *hu = (void *)tty->disc_data;
+ struct hci_dev *hdev;
BT_DBG("tty %p", tty);
/* Detach from the tty */
tty->disc_data = NULL;
- if (hu) {
- struct hci_dev *hdev = hu->hdev;
+ if (!hu)
+ return;
- if (hdev)
- hci_uart_close(hdev);
+ hdev = hu->hdev;
+ if (hdev)
+ hci_uart_close(hdev);
- if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
- if (hdev) {
+ if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ if (hdev) {
+ if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- }
- hu->proto->close(hu);
+ hci_free_dev(hdev);
}
-
- kfree(hu);
+ hu->proto->close(hu);
}
+
+ kfree(hu);
}
/* hci_uart_tty_wakeup()
@@ -394,19 +427,24 @@ static int hci_uart_register_dev(struct hci_uart *hu)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
hdev->dev_type = HCI_AMP;
else
hdev->dev_type = HCI_BREDR;
+ if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return 0;
+
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
return -ENODEV;
}
+ set_bit(HCI_UART_REGISTERED, &hu->flags);
+
return 0;
}
@@ -558,6 +596,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_ATH3K
ath_init();
#endif
+#ifdef CONFIG_BT_HCIUART_3WIRE
+ h5_init();
+#endif
return 0;
}
@@ -578,6 +619,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_ATH3K
ath_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_3WIRE
+ h5_deinit();
+#endif
/* Release tty registration of line discipline */
if ((err = tty_unregister_ldisc(N_HCI)))
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index b874c0e..ff6d589 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
static inline int ll_check_data_len(struct ll_struct *ll, int len)
{
- register int room = skb_tailroom(ll->rx_skb);
+ int room = skb_tailroom(ll->rx_skb);
BT_DBG("len %d room %d", len, room);
@@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len)
static int ll_recv(struct hci_uart *hu, void *data, int count)
{
struct ll_struct *ll = hu->priv;
- register char *ptr;
+ char *ptr;
struct hci_event_hdr *eh;
struct hci_acl_hdr *ah;
struct hci_sco_hdr *sh;
- register int len, type, dlen;
+ int len, type, dlen;
BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 6cf6ab22..fffa61f 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -47,6 +47,7 @@
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
#define HCI_UART_CREATE_AMP 2
+#define HCI_UART_INIT_PENDING 3
struct hci_uart;
@@ -66,6 +67,8 @@ struct hci_uart {
unsigned long flags;
unsigned long hdev_flags;
+ struct work_struct init_ready;
+
struct hci_uart_proto *proto;
void *priv;
@@ -76,6 +79,7 @@ struct hci_uart {
/* HCI_UART proto flag bits */
#define HCI_UART_PROTO_SET 0
+#define HCI_UART_REGISTERED 1
/* TX states */
#define HCI_UART_SENDING 1
@@ -84,6 +88,7 @@ struct hci_uart {
int hci_uart_register_proto(struct hci_uart_proto *p);
int hci_uart_unregister_proto(struct hci_uart_proto *p);
int hci_uart_tx_wakeup(struct hci_uart *hu);
+int hci_uart_init_ready(struct hci_uart *hu);
#ifdef CONFIG_BT_HCIUART_H4
int h4_init(void);
@@ -104,3 +109,8 @@ int ll_deinit(void);
int ath_init(void);
int ath_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_3WIRE
+int h5_init(void);
+int h5_deinit(void);
+#endif
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 764f70c..0a41852 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -898,6 +898,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_B43_HB),
ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+ ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c009175..8e2d914 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -212,6 +212,7 @@
#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index f518b99..731c904 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
u32 *data = buf;
/* data ready? */
- if (readl(trng->base + TRNG_ODATA) & 1) {
+ if (readl(trng->base + TRNG_ISR) & 1) {
*data = readl(trng->base + TRNG_ODATA);
+ /*
+ ensure data ready is only set again AFTER the next data
+ word is ready in case it got set between checking ISR
+ and reading ODATA, so we don't risk re-reading the
+ same word
+ */
+ readl(trng->base + TRNG_ISR);
return 4;
} else
return 0;
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 1412565..d706bd0e 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -162,22 +162,24 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message)
+static int omap_rng_suspend(struct device *dev)
{
omap_rng_write_reg(RNG_MASK_REG, 0x0);
return 0;
}
-static int omap_rng_resume(struct platform_device *pdev)
+static int omap_rng_resume(struct device *dev)
{
omap_rng_write_reg(RNG_MASK_REG, 0x1);
return 0;
}
+static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
+#define OMAP_RNG_PM (&omap_rng_pm)
+
#else
-#define omap_rng_suspend NULL
-#define omap_rng_resume NULL
+#define OMAP_RNG_PM NULL
#endif
@@ -188,11 +190,10 @@ static struct platform_driver omap_rng_driver = {
.driver = {
.name = "omap_rng",
.owner = THIS_MODULE,
+ .pm = OMAP_RNG_PM,
},
.probe = omap_rng_probe,
.remove = __exit_p(omap_rng_remove),
- .suspend = omap_rng_suspend,
- .resume = omap_rng_resume
};
static int __init omap_rng_init(void)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1e638ff..83f85cf 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2503,18 +2503,6 @@ static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
cleanup_one_si(info);
}
-#ifdef CONFIG_PM
-static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- return 0;
-}
-
-static int ipmi_pci_resume(struct pci_dev *pdev)
-{
- return 0;
-}
-#endif
-
static struct pci_device_id ipmi_pci_devices[] = {
{ PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
{ PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
@@ -2527,10 +2515,6 @@ static struct pci_driver ipmi_pci_driver = {
.id_table = ipmi_pci_devices,
.probe = ipmi_pci_probe,
.remove = __devexit_p(ipmi_pci_remove),
-#ifdef CONFIG_PM
- .suspend = ipmi_pci_suspend,
- .resume = ipmi_pci_resume,
-#endif
};
#endif /* CONFIG_PCI */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 7ed356e..37b8be7 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -141,17 +141,6 @@
#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
-/* These are here until the real ones get into the watchdog.h interface. */
-#ifndef WDIOC_GETTIMEOUT
-#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
-#endif
-#ifndef WDIOC_SET_PRETIMEOUT
-#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
-#endif
-#ifndef WDIOC_GET_PRETIMEOUT
-#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
-#endif
-
static DEFINE_MUTEX(ipmi_watchdog_mutex);
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -732,7 +721,6 @@ static int ipmi_ioctl(struct file *file,
return -EFAULT;
return 0;
- case WDIOC_SET_PRETIMEOUT:
case WDIOC_SETPRETIMEOUT:
i = copy_from_user(&val, argp, sizeof(int));
if (i)
@@ -740,7 +728,6 @@ static int ipmi_ioctl(struct file *file,
pretimeout = val;
return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
- case WDIOC_GET_PRETIMEOUT:
case WDIOC_GETPRETIMEOUT:
i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
if (i)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 67c3371..e5eedfa 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -27,14 +27,16 @@
#include <linux/splice.h>
#include <linux/pfn.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
#endif
+#define DEVPORT_MINOR 4
+
static inline unsigned long size_inside_page(unsigned long start,
unsigned long size)
{
@@ -894,6 +896,13 @@ static int __init chr_dev_init(void)
for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
if (!devlist[minor].name)
continue;
+
+ /*
+ * Create /dev/port?
+ */
+ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
+ continue;
+
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name);
}
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 45713f0..f877805 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1459,7 +1459,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int old_camera_power;
-static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
+static int sonypi_suspend(struct device *dev)
{
old_camera_power = sonypi_device.camera_power;
sonypi_disable();
@@ -1467,14 +1467,16 @@ static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
return 0;
}
-static int sonypi_resume(struct platform_device *dev)
+static int sonypi_resume(struct device *dev)
{
sonypi_enable(old_camera_power);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume);
+#define SONYPI_PM (&sonypi_pm)
#else
-#define sonypi_suspend NULL
-#define sonypi_resume NULL
+#define SONYPI_PM NULL
#endif
static void sonypi_shutdown(struct platform_device *dev)
@@ -1486,12 +1488,11 @@ static struct platform_driver sonypi_driver = {
.driver = {
.name = "sonypi",
.owner = THIS_MODULE,
+ .pm = SONYPI_PM,
},
.probe = sonypi_probe,
.remove = __devexit_p(sonypi_remove),
.shutdown = sonypi_shutdown,
- .suspend = sonypi_suspend,
- .resume = sonypi_resume,
};
static struct platform_device *sonypi_platform_device;
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index ad7c732..817f0ee 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -827,10 +827,10 @@ EXPORT_SYMBOL_GPL(tpm_pcr_extend);
int tpm_do_selftest(struct tpm_chip *chip)
{
int rc;
- u8 digest[TPM_DIGEST_SIZE];
unsigned int loops;
unsigned int delay_msec = 1000;
unsigned long duration;
+ struct tpm_cmd_t cmd;
duration = tpm_calc_ordinal_duration(chip,
TPM_ORD_CONTINUE_SELFTEST);
@@ -845,7 +845,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
return rc;
do {
- rc = __tpm_pcr_read(chip, 0, digest);
+ /* Attempt to read a PCR value */
+ cmd.header.in = pcrread_header;
+ cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
+ rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
+
+ if (rc < TPM_HEADER_SIZE)
+ return -EFAULT;
+
+ rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
@@ -1274,7 +1282,7 @@ static struct tpm_input_header savestate_header = {
* We are about to suspend. Save the TPM state
* so that it can be restored.
*/
-int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
+int tpm_pm_suspend(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct tpm_cmd_t cmd;
@@ -1322,6 +1330,9 @@ EXPORT_SYMBOL_GPL(tpm_pm_resume);
void tpm_dev_vendor_release(struct tpm_chip *chip)
{
+ if (!chip)
+ return;
+
if (chip->vendor.release)
chip->vendor.release(chip->dev);
@@ -1339,6 +1350,9 @@ void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
+ if (!chip)
+ return;
+
tpm_dev_vendor_release(chip);
chip->release(dev);
@@ -1405,15 +1419,12 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
"unable to misc_register %s, minor %d\n",
chip->vendor.miscdev.name,
chip->vendor.miscdev.minor);
- put_device(chip->dev);
- return NULL;
+ goto put_device;
}
if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
misc_deregister(&chip->vendor.miscdev);
- put_device(chip->dev);
-
- return NULL;
+ goto put_device;
}
chip->bios_dir = tpm_bios_log_setup(devname);
@@ -1425,6 +1436,8 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
return chip;
+put_device:
+ put_device(chip->dev);
out_free:
kfree(chip);
kfree(devname);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index b1c5280..917f727 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -299,7 +299,7 @@ extern ssize_t tpm_write(struct file *, const char __user *, size_t,
loff_t *);
extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
extern void tpm_remove_hardware(struct device *);
-extern int tpm_pm_suspend(struct device *, pm_message_t);
+extern int tpm_pm_suspend(struct device *);
extern int tpm_pm_resume(struct device *);
extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
wait_queue_head_t *);
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index c64a1bc..678d570 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -168,22 +168,14 @@ static void atml_plat_remove(void)
}
}
-static int tpm_atml_suspend(struct platform_device *dev, pm_message_t msg)
-{
- return tpm_pm_suspend(&dev->dev, msg);
-}
+static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume);
-static int tpm_atml_resume(struct platform_device *dev)
-{
- return tpm_pm_resume(&dev->dev);
-}
static struct platform_driver atml_drv = {
.driver = {
.name = "tpm_atmel",
.owner = THIS_MODULE,
+ .pm = &tpm_atml_pm,
},
- .suspend = tpm_atml_suspend,
- .resume = tpm_atml_resume,
};
static int __init init_atmel(void)
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 76da32e..3251a44 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -4,8 +4,8 @@
* SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
* Specifications at www.trustedcomputinggroup.org
*
- * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com>
- * Sirrix AG - security technologies, http://www.sirrix.com and
+ * Copyright (C) 2005, Marcel Selhorst <tpmdd@selhorst.net>
+ * Sirrix AG - security technologies <tpmdd@sirrix.com> and
* Applied Data Security Group, Ruhr-University Bochum, Germany
* Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
*
@@ -671,7 +671,7 @@ static void __exit cleanup_inf(void)
module_init(init_inf);
module_exit(cleanup_inf);
-MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
+MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
MODULE_VERSION("1.9.2");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 4d24648..640c9a4 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -274,22 +274,13 @@ static void tpm_nsc_remove(struct device *dev)
}
}
-static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg)
-{
- return tpm_pm_suspend(&dev->dev, msg);
-}
-
-static int tpm_nsc_resume(struct platform_device *dev)
-{
- return tpm_pm_resume(&dev->dev);
-}
+static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
static struct platform_driver nsc_drv = {
- .suspend = tpm_nsc_suspend,
- .resume = tpm_nsc_resume,
.driver = {
.name = "tpm_nsc",
.owner = THIS_MODULE,
+ .pm = &tpm_nsc_pm,
},
};
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index d2a70ca..89682fa 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -750,7 +750,7 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
{
- return tpm_pm_suspend(&dev->dev, msg);
+ return tpm_pm_suspend(&dev->dev);
}
static int tpm_tis_pnp_resume(struct pnp_dev *dev)
@@ -806,27 +806,25 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
-static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
-{
- return tpm_pm_suspend(&dev->dev, msg);
-}
-static int tpm_tis_resume(struct platform_device *dev)
+static int tpm_tis_resume(struct device *dev)
{
- struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip->vendor.irq)
tpm_tis_reenable_interrupts(chip);
- return tpm_pm_resume(&dev->dev);
+ return tpm_pm_resume(dev);
}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
static struct platform_driver tis_drv = {
.driver = {
.name = "tpm_tis",
.owner = THIS_MODULE,
+ .pm = &tpm_tis_pm,
},
- .suspend = tpm_tis_suspend,
- .resume = tpm_tis_resume,
};
static struct platform_device *pdev;
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b9a5158..3669761 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -3,5 +3,7 @@ obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
clk-mux.o clk-divider.o clk-fixed-factor.o
# SoCs specific
+obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
new file mode 100644
index 0000000..517a8ff
--- /dev/null
+++ b/drivers/clk/clk-nomadik.c
@@ -0,0 +1,47 @@
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+
+/*
+ * The Nomadik clock tree is described in the STN8815A12 DB V4.2
+ * reference manual for the chip, page 94 ff.
+ */
+
+void __init nomadik_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+ clk_register_clkdev(clk, NULL, "gpio.0");
+ clk_register_clkdev(clk, NULL, "gpio.1");
+ clk_register_clkdev(clk, NULL, "gpio.2");
+ clk_register_clkdev(clk, NULL, "gpio.3");
+ clk_register_clkdev(clk, NULL, "rng");
+
+ /*
+ * The 2.4 MHz TIMCLK reference clock is active at boot time, this is
+ * actually the MXTALCLK @19.2 MHz divided by 8. This clock is used
+ * by the timers and watchdog. See page 105 ff.
+ */
+ clk = clk_register_fixed_rate(NULL, "TIMCLK", NULL, CLK_IS_ROOT,
+ 2400000);
+ clk_register_clkdev(clk, NULL, "mtu0");
+ clk_register_clkdev(clk, NULL, "mtu1");
+
+ /*
+ * At boot time, PLL2 is set to generate a set of fixed clocks,
+ * one of them is CLK48, the 48 MHz clock, routed to the UART, MMC/SD
+ * I2C, IrDA, USB and SSP blocks.
+ */
+ clk = clk_register_fixed_rate(NULL, "CLK48", NULL, CLK_IS_ROOT,
+ 48000000);
+ clk_register_clkdev(clk, NULL, "uart0");
+ clk_register_clkdev(clk, NULL, "uart1");
+ clk_register_clkdev(clk, NULL, "mmci");
+ clk_register_clkdev(clk, NULL, "ssp");
+ clk_register_clkdev(clk, NULL, "nmk-i2c.0");
+ clk_register_clkdev(clk, NULL, "nmk-i2c.1");
+}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 687b00d..9a1eb0c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -850,18 +850,21 @@ static void clk_change_rate(struct clk *clk)
{
struct clk *child;
unsigned long old_rate;
+ unsigned long best_parent_rate = 0;
struct hlist_node *tmp;
old_rate = clk->rate;
+ if (clk->parent)
+ best_parent_rate = clk->parent->rate;
+
if (clk->ops->set_rate)
- clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
+ clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
if (clk->ops->recalc_rate)
- clk->rate = clk->ops->recalc_rate(clk->hw,
- clk->parent->rate);
+ clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
else
- clk->rate = clk->parent->rate;
+ clk->rate = best_parent_rate;
if (clk->notifier_count && old_rate != clk->rate)
__clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
@@ -999,7 +1002,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
if (!clk->parents)
clk->parents =
- kmalloc((sizeof(struct clk*) * clk->num_parents),
+ kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
if (!clk->parents)
@@ -1064,21 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
old_parent = clk->parent;
- /* find index of new parent clock using cached parent ptrs */
- for (i = 0; i < clk->num_parents; i++)
- if (clk->parents[i] == parent)
- break;
+ if (!clk->parents)
+ clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
+ GFP_KERNEL);
/*
- * find index of new parent clock using string name comparison
- * also try to cache the parent to avoid future calls to __clk_lookup
+ * find index of new parent clock using cached parent ptrs,
+ * or if not yet cached, use string name comparison and cache
+ * them now to avoid future calls to __clk_lookup.
*/
- if (i == clk->num_parents)
- for (i = 0; i < clk->num_parents; i++)
- if (!strcmp(clk->parent_names[i], parent->name)) {
+ for (i = 0; i < clk->num_parents; i++) {
+ if (clk->parents && clk->parents[i] == parent)
+ break;
+ else if (!strcmp(clk->parent_names[i], parent->name)) {
+ if (clk->parents)
clk->parents[i] = __clk_lookup(parent->name);
- break;
- }
+ break;
+ }
+ }
if (i == clk->num_parents) {
pr_debug("%s: clock %s is not a possible parent of clock %s\n",
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index f7be225..db2391c 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -71,7 +71,7 @@ static void __init clk_misc_init(void)
__mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
}
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
{ .dev_id = "duart", },
{ .dev_id = "mxs-auart.0", },
{ .dev_id = "mxs-auart.1", },
@@ -80,31 +80,31 @@ static struct clk_lookup uart_lookups[] __initdata = {
{ .dev_id = "80070000.serial", },
};
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
{ .dev_id = "imx23-dma-apbh", },
{ .dev_id = "80004000.dma-apbh", },
};
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
{ .dev_id = "duart", .con_id = "apb_pclk"},
{ .dev_id = "80070000.serial", .con_id = "apb_pclk"},
{ .dev_id = "imx23-dma-apbx", },
{ .dev_id = "80024000.dma-apbx", },
};
-static struct clk_lookup ssp_lookups[] __initdata = {
+static struct clk_lookup ssp_lookups[] = {
{ .dev_id = "imx23-mmc.0", },
{ .dev_id = "imx23-mmc.1", },
{ .dev_id = "80010000.ssp", },
{ .dev_id = "80034000.ssp", },
};
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
{ .dev_id = "imx23-fb", },
{ .dev_id = "80030000.lcdif", },
};
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
{ .dev_id = "imx23-gpmi-nand", },
{ .dev_id = "8000c000.gpmi", },
};
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index 2826a26..7fad6c8 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -120,7 +120,7 @@ static void __init clk_misc_init(void)
writel_relaxed(val, FRAC0);
}
-static struct clk_lookup uart_lookups[] __initdata = {
+static struct clk_lookup uart_lookups[] = {
{ .dev_id = "duart", },
{ .dev_id = "mxs-auart.0", },
{ .dev_id = "mxs-auart.1", },
@@ -135,71 +135,71 @@ static struct clk_lookup uart_lookups[] __initdata = {
{ .dev_id = "80074000.serial", },
};
-static struct clk_lookup hbus_lookups[] __initdata = {
+static struct clk_lookup hbus_lookups[] = {
{ .dev_id = "imx28-dma-apbh", },
{ .dev_id = "80004000.dma-apbh", },
};
-static struct clk_lookup xbus_lookups[] __initdata = {
+static struct clk_lookup xbus_lookups[] = {
{ .dev_id = "duart", .con_id = "apb_pclk"},
{ .dev_id = "80074000.serial", .con_id = "apb_pclk"},
{ .dev_id = "imx28-dma-apbx", },
{ .dev_id = "80024000.dma-apbx", },
};
-static struct clk_lookup ssp0_lookups[] __initdata = {
+static struct clk_lookup ssp0_lookups[] = {
{ .dev_id = "imx28-mmc.0", },
{ .dev_id = "80010000.ssp", },
};
-static struct clk_lookup ssp1_lookups[] __initdata = {
+static struct clk_lookup ssp1_lookups[] = {
{ .dev_id = "imx28-mmc.1", },
{ .dev_id = "80012000.ssp", },
};
-static struct clk_lookup ssp2_lookups[] __initdata = {
+static struct clk_lookup ssp2_lookups[] = {
{ .dev_id = "imx28-mmc.2", },
{ .dev_id = "80014000.ssp", },
};
-static struct clk_lookup ssp3_lookups[] __initdata = {
+static struct clk_lookup ssp3_lookups[] = {
{ .dev_id = "imx28-mmc.3", },
{ .dev_id = "80016000.ssp", },
};
-static struct clk_lookup lcdif_lookups[] __initdata = {
+static struct clk_lookup lcdif_lookups[] = {
{ .dev_id = "imx28-fb", },
{ .dev_id = "80030000.lcdif", },
};
-static struct clk_lookup gpmi_lookups[] __initdata = {
+static struct clk_lookup gpmi_lookups[] = {
{ .dev_id = "imx28-gpmi-nand", },
{ .dev_id = "8000c000.gpmi", },
};
-static struct clk_lookup fec_lookups[] __initdata = {
+static struct clk_lookup fec_lookups[] = {
{ .dev_id = "imx28-fec.0", },
{ .dev_id = "imx28-fec.1", },
{ .dev_id = "800f0000.ethernet", },
{ .dev_id = "800f4000.ethernet", },
};
-static struct clk_lookup can0_lookups[] __initdata = {
+static struct clk_lookup can0_lookups[] = {
{ .dev_id = "flexcan.0", },
{ .dev_id = "80032000.can", },
};
-static struct clk_lookup can1_lookups[] __initdata = {
+static struct clk_lookup can1_lookups[] = {
{ .dev_id = "flexcan.1", },
{ .dev_id = "80034000.can", },
};
-static struct clk_lookup saif0_lookups[] __initdata = {
+static struct clk_lookup saif0_lookups[] = {
{ .dev_id = "mxs-saif.0", },
{ .dev_id = "80042000.saif", },
};
-static struct clk_lookup saif1_lookups[] __initdata = {
+static struct clk_lookup saif1_lookups[] = {
{ .dev_id = "mxs-saif.1", },
{ .dev_id = "80046000.saif", },
};
@@ -245,8 +245,8 @@ int __init mx28_clocks_init(void)
clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
- clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
- clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
+ clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 2);
+ clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 3);
clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
new file mode 100644
index 0000000..0303c0b
--- /dev/null
+++ b/drivers/clk/socfpga/Makefile
@@ -0,0 +1 @@
+obj-y += clk.o
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
new file mode 100644
index 0000000..2c855a6
--- /dev/null
+++ b/drivers/clk/socfpga/clk.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 Altera Corporation <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+
+#define SOCFPGA_OSC1_CLK 10000000
+#define SOCFPGA_MPU_CLK 800000000
+#define SOCFPGA_MAIN_QSPI_CLK 432000000
+#define SOCFPGA_MAIN_NAND_SDMMC_CLK 250000000
+#define SOCFPGA_S2F_USR_CLK 125000000
+
+void __init socfpga_init_clocks(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, "osc1_clk", NULL, CLK_IS_ROOT, SOCFPGA_OSC1_CLK);
+ clk_register_clkdev(clk, "osc1_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "mpu_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK);
+ clk_register_clkdev(clk, "mpu_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "main_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK/2);
+ clk_register_clkdev(clk, "main_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "dbg_base_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK/2);
+ clk_register_clkdev(clk, "dbg_base_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "main_qspi_clk", NULL, CLK_IS_ROOT, SOCFPGA_MAIN_QSPI_CLK);
+ clk_register_clkdev(clk, "main_qspi_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "main_nand_sdmmc_clk", NULL, CLK_IS_ROOT, SOCFPGA_MAIN_NAND_SDMMC_CLK);
+ clk_register_clkdev(clk, "main_nand_sdmmc_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "s2f_usr_clk", NULL, CLK_IS_ROOT, SOCFPGA_S2F_USR_CLK);
+ clk_register_clkdev(clk, "s2f_usr_clk", NULL);
+}
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index af34074..6756e7c 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 4dbdb3f..958aa3a 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index b471c97..1afc18c 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index dcd4bdf..5f1b6ba 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 376d4e5..7cd6378 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 3321c46..9317376 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
* Clock framework definitions for SPEAr platform
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 42b68df..0fcec2a 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,7 +4,7 @@
* SPEAr1310 machine clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -345,31 +345,30 @@ static struct frac_rate_tbl gen_rtbl[] = {
/* clock parents */
static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
-static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+static const char *uart0_parents[] = { "pll5_clk", "uart_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
"osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
- "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
"i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
"pll3_clk", };
static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
"pll2_clk", };
static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
- "ras_pll2_clk", "ras_synth0_clk", };
+ "ras_pll2_clk", "ras_syn0_clk", };
static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
- "ras_pll2_clk", "ras_synth0_clk", };
-static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
-static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
-static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+ "ras_pll2_clk", "ras_syn0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_syn3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_syn1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk",
"ras_plclk0_clk", };
-static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
-static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", };
void __init spear1310_clk_init(void)
{
@@ -390,9 +389,9 @@ void __init spear1310_clk_init(void)
25000000);
clk_register_clkdev(clk, "osc_25m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
- CLK_IS_ROOT, 125000000);
- clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+ 125000000);
+ clk_register_clkdev(clk, "gmii_pad_clk", NULL);
clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
CLK_IS_ROOT, 12288000);
@@ -406,34 +405,34 @@ void __init spear1310_clk_init(void)
/* clock derived from 24 or 25 MHz osc clk */
/* vco-pll */
- clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco1_mux_clk", NULL);
- clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+ clk_register_clkdev(clk, "vco1_mclk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco1_clk", NULL);
clk_register_clkdev(clk1, "pll1_clk", NULL);
- clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco2_mux_clk", NULL);
- clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+ clk_register_clkdev(clk, "vco2_mclk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco2_clk", NULL);
clk_register_clkdev(clk1, "pll2_clk", NULL);
- clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco3_mux_clk", NULL);
- clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+ clk_register_clkdev(clk, "vco3_mclk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco3_clk", NULL);
@@ -473,7 +472,7 @@ void __init spear1310_clk_init(void)
/* peripherals */
clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
128);
- clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+ clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -500,177 +499,176 @@ void __init spear1310_clk_init(void)
clk_register_clkdev(clk, "apb_clk", NULL);
/* gpt clocks */
- clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt0_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt0");
- clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt1");
- clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt2");
- clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt3");
/* others */
- clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_UART_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
- clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "e0000000.serial");
- clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+ clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
"vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
- clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b3000000.sdhci");
- clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
- clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+ clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_CFXD_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b2800000.cf");
clk_register_clkdev(clk, NULL, "arasan_xd");
- clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "c3_synth_clk", NULL);
- clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+ clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_C3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+ clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "c3_mux_clk", NULL);
+ clk_register_clkdev(clk, "c3_mclk", NULL);
- clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "c3");
/* gmac */
- clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
- gmac_phy_input_parents,
+ clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
ARRAY_SIZE(gmac_phy_input_parents), 0,
SPEAR1310_GMAC_CLK_CFG,
SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+ clk_register_clkdev(clk, "phy_input_mclk", NULL);
- clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
- "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
- NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
- clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+ clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+ 0, SPEAR1310_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+ ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "phy_syn_clk", NULL);
+ clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+ clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
ARRAY_SIZE(gmac_phy_parents), 0,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "stmmacphy.0");
/* clcd */
- clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+ clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
ARRAY_SIZE(clcd_synth_parents), 0,
SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+ clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
- clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+ clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
ARRAY_SIZE(clcd_rtbl), &_lock);
- clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
- clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+ clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
ARRAY_SIZE(clcd_pixel_parents), 0,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
- clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, "clcd_clk", NULL);
/* i2s */
- clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+ clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
0, &_lock);
clk_register_clkdev(clk, "i2s_src_clk", NULL);
- clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
- clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+ clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
&_lock);
clk_register_clkdev(clk, "i2s_ref_clk", NULL);
- clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
- clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk",
"i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
&i2s_sclk_masks, i2s_sclk_rtbl,
ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
- clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
/* clock derived from ahb clk */
clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -747,13 +745,13 @@ void __init spear1310_clk_init(void)
&_lock);
clk_register_clkdev(clk, "sysram1_clk", NULL);
- clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+ clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "adc_synth_clk", NULL);
- clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "adc_syn_clk", NULL);
+ clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "adc_clk");
@@ -790,37 +788,37 @@ void __init spear1310_clk_init(void)
clk_register_clkdev(clk, NULL, "e0300000.kbd");
/* RAS clks */
- clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
- gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
- 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+ clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+ ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
- clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
- gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
- 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+ clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+ ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
- clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+ clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn0_clk", NULL);
- clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+ clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn1_clk", NULL);
- clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+ clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn2_clk", NULL);
- clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+ clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn3_clk", NULL);
clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
@@ -847,7 +845,7 @@ void __init spear1310_clk_init(void)
&_lock);
clk_register_clkdev(clk, "ras_pll3_clk", NULL);
- clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+ clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_pad_clk", 0,
SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, "ras_tx125_clk", NULL);
@@ -912,7 +910,7 @@ void __init spear1310_clk_init(void)
&_lock);
clk_register_clkdev(clk, NULL, "5c700000.eth");
- clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+ clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
smii_rgmii_phy_parents,
ARRAY_SIZE(smii_rgmii_phy_parents), 0,
SPEAR1310_RAS_CTRL_REG1,
@@ -922,184 +920,184 @@ void __init spear1310_clk_init(void)
clk_register_clkdev(clk, NULL, "stmmacphy.2");
clk_register_clkdev(clk, NULL, "stmmacphy.4");
- clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+ clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
ARRAY_SIZE(rmii_phy_parents), 0,
SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
SPEAR1310_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "stmmacphy.3");
- clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart1_mclk", NULL);
- clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5c800000.serial");
- clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart2_mclk", NULL);
- clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5c900000.serial");
- clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart3_mclk", NULL);
- clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5ca00000.serial");
- clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart4_mclk", NULL);
- clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5cb00000.serial");
- clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart5_mclk", NULL);
- clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5cc00000.serial");
- clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c1_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5cd00000.i2c");
- clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c2_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5ce00000.i2c");
- clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c3_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5cf00000.i2c");
- clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c4_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d000000.i2c");
- clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c5_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d100000.i2c");
- clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c6_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d200000.i2c");
- clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c7_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d300000.i2c");
- clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+ clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+ clk_register_clkdev(clk, "ssp1_mclk", NULL);
- clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d400000.spi");
- clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+ clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "pci_mux_clk", NULL);
+ clk_register_clkdev(clk, "pci_mclk", NULL);
- clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+ clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "pci");
- clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+ clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+ clk_register_clkdev(clk, "tdm1_mclk", NULL);
- clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
- clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+ clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+ clk_register_clkdev(clk, "tdm2_mclk", NULL);
- clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index f130919..2352cee 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,7 +4,7 @@
* SPEAr1340 machine clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -369,27 +369,25 @@ static struct frac_rate_tbl gen_rtbl[] = {
/* clock parents */
static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
-static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
- "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
-static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *sys_parents[] = { "pll1_clk", "pll1_clk", "pll1_clk",
+ "pll1_clk", "sys_synth_clk", "sys_synth_clk", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_syn_clk", };
static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
- "uart0_synth_gate_clk", };
+ "uart0_syn_gclk", };
static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
- "uart1_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+ "uart1_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
"osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
- "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
"i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
-static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
-};
-static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_syn2_clk", };
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_syn3_clk", };
static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
"pll3_clk", };
@@ -415,9 +413,9 @@ void __init spear1340_clk_init(void)
25000000);
clk_register_clkdev(clk, "osc_25m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
- CLK_IS_ROOT, 125000000);
- clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+ 125000000);
+ clk_register_clkdev(clk, "gmii_pad_clk", NULL);
clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
CLK_IS_ROOT, 12288000);
@@ -431,35 +429,35 @@ void __init spear1340_clk_init(void)
/* clock derived from 24 or 25 MHz osc clk */
/* vco-pll */
- clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco1_mux_clk", NULL);
- clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
- 0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+ clk_register_clkdev(clk, "vco1_mclk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
+ SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco1_clk", NULL);
clk_register_clkdev(clk1, "pll1_clk", NULL);
- clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco2_mux_clk", NULL);
- clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
- 0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+ clk_register_clkdev(clk, "vco2_mclk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
+ SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco2_clk", NULL);
clk_register_clkdev(clk1, "pll2_clk", NULL);
- clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco3_mux_clk", NULL);
- clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
- 0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+ clk_register_clkdev(clk, "vco3_mclk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
+ SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco3_clk", NULL);
clk_register_clkdev(clk1, "pll3_clk", NULL);
@@ -498,7 +496,7 @@ void __init spear1340_clk_init(void)
/* peripherals */
clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
128);
- clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+ clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -509,23 +507,23 @@ void __init spear1340_clk_init(void)
clk_register_clkdev(clk, "ddr_clk", NULL);
/* clock derived from pll1 clk */
- clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+ clk = clk_register_frac("sys_syn_clk", "vco1div2_clk", 0,
SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
ARRAY_SIZE(sys_synth_rtbl), &_lock);
- clk_register_clkdev(clk, "sys_synth_clk", NULL);
+ clk_register_clkdev(clk, "sys_syn_clk", NULL);
- clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+ clk = clk_register_frac("amba_syn_clk", "vco1div2_clk", 0,
SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
ARRAY_SIZE(amba_synth_rtbl), &_lock);
- clk_register_clkdev(clk, "amba_synth_clk", NULL);
+ clk_register_clkdev(clk, "amba_syn_clk", NULL);
- clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+ clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
SPEAR1340_SCLK_SRC_SEL_SHIFT,
SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "sys_clk", NULL);
- clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mclk", 0, 1,
2);
clk_register_clkdev(clk, "cpu_clk", NULL);
@@ -548,194 +546,193 @@ void __init spear1340_clk_init(void)
clk_register_clkdev(clk, "apb_clk", NULL);
/* gpt clocks */
- clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt0_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt0");
- clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt1");
- clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt2");
- clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt3");
/* others */
- clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+ clk = clk_register_aux("uart0_syn_clk", "uart0_syn_gclk",
"vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart0_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "uart0_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
- clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "e0000000.serial");
- clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+ clk = clk_register_aux("uart1_syn_clk", "uart1_syn_gclk",
"vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart1_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "uart1_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+ clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart1_mclk", NULL);
- clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
- SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b4100000.serial");
- clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+ clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
"vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
- clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b3000000.sdhci");
- clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
- clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+ clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1340_CFXD_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b2800000.cf");
clk_register_clkdev(clk, NULL, "arasan_xd");
- clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "c3_synth_clk", NULL);
- clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+ clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", 0,
+ SPEAR1340_C3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+ clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "c3_mux_clk", NULL);
+ clk_register_clkdev(clk, "c3_mclk", NULL);
- clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "c3");
/* gmac */
- clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
- gmac_phy_input_parents,
+ clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
ARRAY_SIZE(gmac_phy_input_parents), 0,
SPEAR1340_GMAC_CLK_CFG,
SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+ clk_register_clkdev(clk, "phy_input_mclk", NULL);
- clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
- "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
- NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
- clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+ clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+ 0, SPEAR1340_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+ ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "phy_syn_clk", NULL);
+ clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+ clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
ARRAY_SIZE(gmac_phy_parents), 0,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "stmmacphy.0");
/* clcd */
- clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+ clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
ARRAY_SIZE(clcd_synth_parents), 0,
SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+ clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
- clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+ clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
ARRAY_SIZE(clcd_rtbl), &_lock);
- clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
- clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+ clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
ARRAY_SIZE(clcd_pixel_parents), 0,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
- clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, "clcd_clk", NULL);
/* i2s */
- clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+ clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
0, &_lock);
clk_register_clkdev(clk, "i2s_src_clk", NULL);
- clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
- clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+ clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
&_lock);
clk_register_clkdev(clk, "i2s_ref_clk", NULL);
- clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
- clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
- "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
- &i2s_sclk_masks, i2s_sclk_rtbl,
- ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", "i2s_ref_mclk",
+ 0, SPEAR1340_I2S_CLK_CFG, &i2s_sclk_masks,
+ i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock,
+ &clk1);
clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
- clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
/* clock derived from ahb clk */
clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -744,7 +741,7 @@ void __init spear1340_clk_init(void)
clk_register_clkdev(clk, NULL, "e0280000.i2c");
clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
- SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b4000000.i2c");
@@ -800,13 +797,13 @@ void __init spear1340_clk_init(void)
&_lock);
clk_register_clkdev(clk, "sysram1_clk", NULL);
- clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+ clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "adc_synth_clk", NULL);
- clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "adc_syn_clk", NULL);
+ clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "adc_clk");
@@ -843,39 +840,39 @@ void __init spear1340_clk_init(void)
clk_register_clkdev(clk, NULL, "e0300000.kbd");
/* RAS clks */
- clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
- gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
- 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+ clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+ ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
- clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
- gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
- 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+ clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+ ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
- clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+ clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn0_clk", NULL);
- clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+ clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn1_clk", NULL);
- clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+ clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn2_clk", NULL);
- clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+ clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn3_clk", NULL);
- clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+ clk = clk_register_gate(NULL, "mali_clk", "gen_syn3_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "mali");
@@ -890,74 +887,74 @@ void __init spear1340_clk_init(void)
&_lock);
clk_register_clkdev(clk, NULL, "spear_cec.1");
- clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+ clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
ARRAY_SIZE(spdif_out_parents), 0,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+ clk_register_clkdev(clk, "spdif_out_mclk", NULL);
- clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+ clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "spdif-out");
- clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+ clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
ARRAY_SIZE(spdif_in_parents), 0,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+ clk_register_clkdev(clk, "spdif_in_mclk", NULL);
- clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+ clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spdif-in");
- clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+ clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "acp_clk");
- clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+ clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "plgpio");
- clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+ clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "video_dec");
- clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+ clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "video_enc");
- clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+ clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_vip");
- clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+ clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_camif.0");
- clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_camif.1");
- clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_camif.2");
- clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_camif.3");
- clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+ clk = clk_register_gate(NULL, "pwm_clk", "pwm_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "pwm");
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 440bb3e..c315745 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
* SPEAr3xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -122,12 +122,12 @@ static struct gpt_rate_tbl gpt_rtbl[] = {
};
/* clock parents */
-static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk",
};
-static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
-static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", };
+static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
"pll2_clk", };
@@ -137,7 +137,7 @@ static void __init spear300_clk_init(void)
{
struct clk *clk;
- clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
1, 1);
clk_register_clkdev(clk, NULL, "60000000.clcd");
@@ -219,15 +219,11 @@ static void __init spear310_clk_init(void)
#define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0
#define SPEAR320_UARTX_PCLK_VAL_APB 0x1
-static const char *i2s_ref_parents[] = { "ras_pll2_clk",
- "ras_gen2_synth_gate_clk", };
-static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
- "ras_gen3_synth_gate_clk",
-};
+static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", };
+static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", };
static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
- "ras_gen0_synth_gate_clk", };
-static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
-};
+ "ras_syn0_gclk", };
+static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
static void __init spear320_clk_init(void)
{
@@ -237,7 +233,7 @@ static void __init spear320_clk_init(void)
CLK_IS_ROOT, 125000000);
clk_register_clkdev(clk, "smii_125m_pad", NULL);
- clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
1, 1);
clk_register_clkdev(clk, NULL, "90000000.clcd");
@@ -363,9 +359,9 @@ void __init spear3xx_clk_init(void)
clk_register_clkdev(clk, NULL, "fc900000.rtc");
/* clock derived from 24 MHz osc clk */
- clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+ clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
48000000);
- clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+ clk_register_clkdev(clk, "pll3_clk", NULL);
clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
1);
@@ -392,98 +388,98 @@ void __init spear3xx_clk_init(void)
HCLK_RATIO_MASK, 0, &_lock);
clk_register_clkdev(clk, "ahb_clk", NULL);
- clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
- "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+ UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
- clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
- PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "uart0", "uart0_mclk", 0, PERIP1_CLK_ENB,
+ UART_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "d0000000.serial");
- clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
- "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "firda_synth_clk", NULL);
- clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+ clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0,
+ FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_syn_clk", NULL);
+ clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+ clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "firda_mux_clk", NULL);
+ clk_register_clkdev(clk, "firda_mclk", NULL);
- clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "firda");
/* gpt clocks */
- clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
- gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt0");
- clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
- gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+ clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt1");
- clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
- gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+ clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt2");
/* general synths clocks */
- clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
- "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gen0_synth_clk", NULL);
- clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
-
- clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
- "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gen1_synth_clk", NULL);
- clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
-
- clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+ clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk",
+ 0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "gen0_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen0_syn_gclk", NULL);
+
+ clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk",
+ 0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "gen1_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+ clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
- clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
- "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+ clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
+ "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gen2_synth_clk", NULL);
- clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "gen2_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen2_syn_gclk", NULL);
- clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
- "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+ clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk",
+ "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gen3_synth_clk", NULL);
- clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "gen3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen3_syn_gclk", NULL);
/* clock derived from pll3 clk */
- clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
- PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBH_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, "usbh_clk", NULL);
clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
@@ -494,8 +490,8 @@ void __init spear3xx_clk_init(void)
1);
clk_register_clkdev(clk, "usbh.1_clk", NULL);
- clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
- PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBD_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "designware_udc");
/* clock derived from ahb clk */
@@ -579,29 +575,25 @@ void __init spear3xx_clk_init(void)
RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, "ras_pll2_clk", NULL);
- clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+ clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
-
- clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
- "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
- RAS_SYNT0_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
-
- clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
- "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
- RAS_SYNT1_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
-
- clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
- "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
- RAS_SYNT2_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
-
- clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
- "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
- RAS_SYNT3_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn0_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn1_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn2_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn3_gclk", NULL);
if (of_machine_is_compatible("st,spear300"))
spear300_clk_init();
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index f9a20b3..a98d086 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,7 +2,7 @@
* SPEAr6xx machines clock framework source file
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -97,13 +97,12 @@ static struct aux_rate_tbl aux_rtbl[] = {
{.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
};
-static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
-};
-static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
-static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *clcd_parents[] = { "pll3_clk", "clcd_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", };
+static const char *uart_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *gpt0_1_parents[] = { "pll3_clk", "gpt0_1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
+static const char *gpt3_parents[] = { "pll3_clk", "gpt3_syn_clk", };
static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
"pll2_clk", };
@@ -136,9 +135,9 @@ void __init spear6xx_clk_init(void)
clk_register_clkdev(clk, NULL, "rtc-spear");
/* clock derived from 30 MHz osc clk */
- clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+ clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
48000000);
- clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+ clk_register_clkdev(clk, "pll3_clk", NULL);
clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
@@ -146,9 +145,9 @@ void __init spear6xx_clk_init(void)
clk_register_clkdev(clk, "vco1_clk", NULL);
clk_register_clkdev(clk1, "pll1_clk", NULL);
- clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
- "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
- ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_30m_clk",
+ 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+ &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco2_clk", NULL);
clk_register_clkdev(clk1, "pll2_clk", NULL);
@@ -165,111 +164,111 @@ void __init spear6xx_clk_init(void)
HCLK_RATIO_MASK, 0, &_lock);
clk_register_clkdev(clk, "ahb_clk", NULL);
- clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
- "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+ UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "uart_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart_mclk", NULL);
- clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
- PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
+ UART0_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "d0000000.serial");
- clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
- PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "uart1", "uart_mclk", 0, PERIP1_CLK_ENB,
+ UART1_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "d0080000.serial");
- clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
- "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "firda_synth_clk", NULL);
- clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+ clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk",
+ 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_syn_clk", NULL);
+ clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+ clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "firda_mux_clk", NULL);
+ clk_register_clkdev(clk, "firda_mclk", NULL);
- clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "firda");
- clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
- "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "clcd_synth_clk", NULL);
- clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+ clk = clk_register_aux("clcd_syn_clk", "clcd_syn_gclk", "pll1_clk",
+ 0, CLCD_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+ clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+ clk_register_clkdev(clk, "clcd_mclk", NULL);
- clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "clcd");
/* gpt clocks */
- clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+ clk = clk_register_gpt("gpt0_1_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+ clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
- clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt0");
- clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
- clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt1");
- clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+ clk = clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+ clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
- clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
- clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt2");
- clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+ clk = clk_register_gpt("gpt3_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+ clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
- clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
- clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt3");
/* clock derived from pll3 clk */
- clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+ clk = clk_register_gate(NULL, "usbh0_clk", "pll3_clk", 0,
PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "usbh.0_clk");
- clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+ clk = clk_register_gate(NULL, "usbh1_clk", "pll3_clk", 0,
PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "usbh.1_clk");
- clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
- PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBD_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "designware_udc");
/* clock derived from ahb clk */
@@ -278,9 +277,8 @@ void __init spear6xx_clk_init(void)
clk_register_clkdev(clk, "ahbmult2_clk", NULL);
clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
- ARRAY_SIZE(ddr_parents),
- 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+ MCTR_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "ddr_clk", NULL);
clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
@@ -298,7 +296,7 @@ void __init spear6xx_clk_init(void)
clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
GMAC_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, NULL, "gmac");
+ clk_register_clkdev(clk, NULL, "e0800000.ethernet");
clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
I2C_CLK_ENB, 0, &_lock);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 99c6b20..d53cd0a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -16,6 +16,12 @@ config CLKSRC_MMIO
config DW_APB_TIMER
bool
+config DW_APB_TIMER_OF
+ bool
+
+config ARMADA_370_XP_TIMER
+ bool
+
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer"
depends on UX500_SOC_DB8500
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8d81a1d..b65d0c5 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,7 +6,10 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
+obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
-obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o \ No newline at end of file
+obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
+obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
+obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
new file mode 100644
index 0000000..f7dba5b
--- /dev/null
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2012 Altera Corporation
+ * Copyright (c) 2011 Picochip Ltd., Jamie Iles
+ *
+ * Modified from mach-picoxcell/time.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/dw_apb_timer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/time.h>
+#include <asm/sched_clock.h>
+
+static void timer_get_base_and_rate(struct device_node *np,
+ void __iomem **base, u32 *rate)
+{
+ *base = of_iomap(np, 0);
+
+ if (!*base)
+ panic("Unable to map regs for %s", np->name);
+
+ if (of_property_read_u32(np, "clock-freq", rate) &&
+ of_property_read_u32(np, "clock-frequency", rate))
+ panic("No clock-frequency property for %s", np->name);
+}
+
+static void add_clockevent(struct device_node *event_timer)
+{
+ void __iomem *iobase;
+ struct dw_apb_clock_event_device *ced;
+ u32 irq, rate;
+
+ irq = irq_of_parse_and_map(event_timer, 0);
+ if (irq == NO_IRQ)
+ panic("No IRQ for clock event timer");
+
+ timer_get_base_and_rate(event_timer, &iobase, &rate);
+
+ ced = dw_apb_clockevent_init(0, event_timer->name, 300, iobase, irq,
+ rate);
+ if (!ced)
+ panic("Unable to initialise clockevent device");
+
+ dw_apb_clockevent_register(ced);
+}
+
+static void add_clocksource(struct device_node *source_timer)
+{
+ void __iomem *iobase;
+ struct dw_apb_clocksource *cs;
+ u32 rate;
+
+ timer_get_base_and_rate(source_timer, &iobase, &rate);
+
+ cs = dw_apb_clocksource_init(300, source_timer->name, iobase, rate);
+ if (!cs)
+ panic("Unable to initialise clocksource device");
+
+ dw_apb_clocksource_start(cs);
+ dw_apb_clocksource_register(cs);
+}
+
+static void __iomem *sched_io_base;
+
+static u32 read_sched_clock(void)
+{
+ return __raw_readl(sched_io_base);
+}
+
+static const struct of_device_id sptimer_ids[] __initconst = {
+ { .compatible = "picochip,pc3x2-rtc" },
+ { .compatible = "snps,dw-apb-timer-sp" },
+ { /* Sentinel */ },
+};
+
+static void init_sched_clock(void)
+{
+ struct device_node *sched_timer;
+ u32 rate;
+
+ sched_timer = of_find_matching_node(NULL, sptimer_ids);
+ if (!sched_timer)
+ panic("No RTC for sched clock to use");
+
+ timer_get_base_and_rate(sched_timer, &sched_io_base, &rate);
+ of_node_put(sched_timer);
+
+ setup_sched_clock(read_sched_clock, 32, rate);
+}
+
+static const struct of_device_id osctimer_ids[] __initconst = {
+ { .compatible = "picochip,pc3x2-timer" },
+ { .compatible = "snps,dw-apb-timer-osc" },
+ {},
+};
+
+static void __init timer_init(void)
+{
+ struct device_node *event_timer, *source_timer;
+
+ event_timer = of_find_matching_node(NULL, osctimer_ids);
+ if (!event_timer)
+ panic("No timer for clockevent");
+ add_clockevent(event_timer);
+
+ source_timer = of_find_matching_node(event_timer, osctimer_ids);
+ if (!source_timer)
+ panic("No timer for clocksource");
+ add_clocksource(source_timer);
+
+ of_node_put(source_timer);
+
+ init_sched_clock();
+}
+
+struct sys_timer dw_apb_timer = {
+ .init = timer_init,
+};
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644
index 0000000..372051d
--- /dev/null
+++ b/drivers/clocksource/em_sti.c
@@ -0,0 +1,406 @@
+/*
+ * Emma Mobile Timer Support - STI
+ *
+ * Copyright (C) 2012 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
+
+struct em_sti_priv {
+ void __iomem *base;
+ struct clk *clk;
+ struct platform_device *pdev;
+ unsigned int active[USER_NR];
+ unsigned long rate;
+ raw_spinlock_t lock;
+ struct clock_event_device ced;
+ struct clocksource cs;
+};
+
+#define STI_CONTROL 0x00
+#define STI_COMPA_H 0x10
+#define STI_COMPA_L 0x14
+#define STI_COMPB_H 0x18
+#define STI_COMPB_L 0x1c
+#define STI_COUNT_H 0x20
+#define STI_COUNT_L 0x24
+#define STI_COUNT_RAW_H 0x28
+#define STI_COUNT_RAW_L 0x2c
+#define STI_SET_H 0x30
+#define STI_SET_L 0x34
+#define STI_INTSTATUS 0x40
+#define STI_INTRAWSTATUS 0x44
+#define STI_INTENSET 0x48
+#define STI_INTENCLR 0x4c
+#define STI_INTFFCLR 0x50
+
+static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
+{
+ return ioread32(p->base + offs);
+}
+
+static inline void em_sti_write(struct em_sti_priv *p, int offs,
+ unsigned long value)
+{
+ iowrite32(value, p->base + offs);
+}
+
+static int em_sti_enable(struct em_sti_priv *p)
+{
+ int ret;
+
+ /* enable clock */
+ ret = clk_enable(p->clk);
+ if (ret) {
+ dev_err(&p->pdev->dev, "cannot enable clock\n");
+ return ret;
+ }
+
+ /* configure channel, periodic mode and maximum timeout */
+ p->rate = clk_get_rate(p->clk);
+
+ /* reset the counter */
+ em_sti_write(p, STI_SET_H, 0x40000000);
+ em_sti_write(p, STI_SET_L, 0x00000000);
+
+ /* mask and clear pending interrupts */
+ em_sti_write(p, STI_INTENCLR, 3);
+ em_sti_write(p, STI_INTFFCLR, 3);
+
+ /* enable updates of counter registers */
+ em_sti_write(p, STI_CONTROL, 1);
+
+ return 0;
+}
+
+static void em_sti_disable(struct em_sti_priv *p)
+{
+ /* mask interrupts */
+ em_sti_write(p, STI_INTENCLR, 3);
+
+ /* stop clock */
+ clk_disable(p->clk);
+}
+
+static cycle_t em_sti_count(struct em_sti_priv *p)
+{
+ cycle_t ticks;
+ unsigned long flags;
+
+ /* the STI hardware buffers the 48-bit count, but to
+ * break it out into two 32-bit access the registers
+ * must be accessed in a certain order.
+ * Always read STI_COUNT_H before STI_COUNT_L.
+ */
+ raw_spin_lock_irqsave(&p->lock, flags);
+ ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
+ ticks |= em_sti_read(p, STI_COUNT_L);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return ticks;
+}
+
+static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+
+ /* mask compare A interrupt */
+ em_sti_write(p, STI_INTENCLR, 1);
+
+ /* update compare A value */
+ em_sti_write(p, STI_COMPA_H, next >> 32);
+ em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
+
+ /* clear compare A interrupt source */
+ em_sti_write(p, STI_INTFFCLR, 1);
+
+ /* unmask compare A interrupt */
+ em_sti_write(p, STI_INTENSET, 1);
+
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return next;
+}
+
+static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
+{
+ struct em_sti_priv *p = dev_id;
+
+ p->ced.event_handler(&p->ced);
+ return IRQ_HANDLED;
+}
+
+static int em_sti_start(struct em_sti_priv *p, unsigned int user)
+{
+ unsigned long flags;
+ int used_before;
+ int ret = 0;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+ used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+ if (!used_before)
+ ret = em_sti_enable(p);
+
+ if (!ret)
+ p->active[user] = 1;
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+
+ return ret;
+}
+
+static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
+{
+ unsigned long flags;
+ int used_before, used_after;
+
+ raw_spin_lock_irqsave(&p->lock, flags);
+ used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+ p->active[user] = 0;
+ used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
+
+ if (used_before && !used_after)
+ em_sti_disable(p);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
+{
+ return container_of(cs, struct em_sti_priv, cs);
+}
+
+static cycle_t em_sti_clocksource_read(struct clocksource *cs)
+{
+ return em_sti_count(cs_to_em_sti(cs));
+}
+
+static int em_sti_clocksource_enable(struct clocksource *cs)
+{
+ int ret;
+ struct em_sti_priv *p = cs_to_em_sti(cs);
+
+ ret = em_sti_start(p, USER_CLOCKSOURCE);
+ if (!ret)
+ __clocksource_updatefreq_hz(cs, p->rate);
+ return ret;
+}
+
+static void em_sti_clocksource_disable(struct clocksource *cs)
+{
+ em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
+}
+
+static void em_sti_clocksource_resume(struct clocksource *cs)
+{
+ em_sti_clocksource_enable(cs);
+}
+
+static int em_sti_register_clocksource(struct em_sti_priv *p)
+{
+ struct clocksource *cs = &p->cs;
+
+ memset(cs, 0, sizeof(*cs));
+ cs->name = dev_name(&p->pdev->dev);
+ cs->rating = 200;
+ cs->read = em_sti_clocksource_read;
+ cs->enable = em_sti_clocksource_enable;
+ cs->disable = em_sti_clocksource_disable;
+ cs->suspend = em_sti_clocksource_disable;
+ cs->resume = em_sti_clocksource_resume;
+ cs->mask = CLOCKSOURCE_MASK(48);
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+
+ dev_info(&p->pdev->dev, "used as clock source\n");
+
+ /* Register with dummy 1 Hz value, gets updated in ->enable() */
+ clocksource_register_hz(cs, 1);
+ return 0;
+}
+
+static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
+{
+ return container_of(ced, struct em_sti_priv, ced);
+}
+
+static void em_sti_clock_event_mode(enum clock_event_mode mode,
+ struct clock_event_device *ced)
+{
+ struct em_sti_priv *p = ced_to_em_sti(ced);
+
+ /* deal with old setting first */
+ switch (ced->mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ em_sti_stop(p, USER_CLOCKEVENT);
+ break;
+ default:
+ break;
+ }
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_ONESHOT:
+ dev_info(&p->pdev->dev, "used for oneshot clock events\n");
+ em_sti_start(p, USER_CLOCKEVENT);
+ clockevents_config(&p->ced, p->rate);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ em_sti_stop(p, USER_CLOCKEVENT);
+ break;
+ default:
+ break;
+ }
+}
+
+static int em_sti_clock_event_next(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ struct em_sti_priv *p = ced_to_em_sti(ced);
+ cycle_t next;
+ int safe;
+
+ next = em_sti_set_next(p, em_sti_count(p) + delta);
+ safe = em_sti_count(p) < (next - 1);
+
+ return !safe;
+}
+
+static void em_sti_register_clockevent(struct em_sti_priv *p)
+{
+ struct clock_event_device *ced = &p->ced;
+
+ memset(ced, 0, sizeof(*ced));
+ ced->name = dev_name(&p->pdev->dev);
+ ced->features = CLOCK_EVT_FEAT_ONESHOT;
+ ced->rating = 200;
+ ced->cpumask = cpumask_of(0);
+ ced->set_next_event = em_sti_clock_event_next;
+ ced->set_mode = em_sti_clock_event_mode;
+
+ dev_info(&p->pdev->dev, "used for clock events\n");
+
+ /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
+ clockevents_config_and_register(ced, 1, 2, 0xffffffff);
+}
+
+static int __devinit em_sti_probe(struct platform_device *pdev)
+{
+ struct em_sti_priv *p;
+ struct resource *res;
+ int irq, ret;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ dev_err(&pdev->dev, "failed to allocate driver data\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ p->pdev = pdev;
+ platform_set_drvdata(pdev, p);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get I/O memory\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ ret = -EINVAL;
+ goto err0;
+ }
+
+ /* map memory, let base point to the STI instance */
+ p->base = ioremap_nocache(res->start, resource_size(res));
+ if (p->base == NULL) {
+ dev_err(&pdev->dev, "failed to remap I/O memory\n");
+ ret = -ENXIO;
+ goto err0;
+ }
+
+ /* get hold of clock */
+ p->clk = clk_get(&pdev->dev, "sclk");
+ if (IS_ERR(p->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ if (request_irq(irq, em_sti_interrupt,
+ IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
+ dev_name(&pdev->dev), p)) {
+ dev_err(&pdev->dev, "failed to request low IRQ\n");
+ ret = -ENOENT;
+ goto err2;
+ }
+
+ raw_spin_lock_init(&p->lock);
+ em_sti_register_clockevent(p);
+ em_sti_register_clocksource(p);
+ return 0;
+
+err2:
+ clk_put(p->clk);
+err1:
+ iounmap(p->base);
+err0:
+ kfree(p);
+ return ret;
+}
+
+static int __devexit em_sti_remove(struct platform_device *pdev)
+{
+ return -EBUSY; /* cannot unregister clockevent and clocksource */
+}
+
+static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
+ { .compatible = "renesas,em-sti", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
+
+static struct platform_driver em_sti_device_driver = {
+ .probe = em_sti_probe,
+ .remove = __devexit_p(em_sti_remove),
+ .driver = {
+ .name = "em_sti",
+ .of_match_table = em_sti_dt_ids,
+ }
+};
+
+module_platform_driver(em_sti_device_driver);
+
+MODULE_AUTHOR("Magnus Damm");
+MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 32fe9ef..98b06ba 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -48,13 +48,13 @@ struct sh_cmt_priv {
unsigned long next_match_value;
unsigned long max_match_value;
unsigned long rate;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
unsigned long total_cycles;
};
-static DEFINE_SPINLOCK(sh_cmt_lock);
+static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
#define CMSTR -1 /* shared register */
#define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_cmt_lock, flags);
+ raw_spin_lock_irqsave(&sh_cmt_lock, flags);
value = sh_cmt_read(p, CMSTR);
if (start)
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_cmt_write(p, CMSTR, value);
- spin_unlock_irqrestore(&sh_cmt_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
}
static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
{
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
__sh_cmt_set_next(p, delta);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
__sh_cmt_set_next(p, p->max_match_value);
out:
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
return ret;
}
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
unsigned long flags;
unsigned long f;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
p->flags &= ~flag;
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
__sh_cmt_set_next(p, p->max_match_value);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
unsigned long value;
int has_wrapped;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
value = p->total_cycles;
raw = sh_cmt_get_counter(p, &has_wrapped);
if (unlikely(has_wrapped))
raw += p->match_value + 1;
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
return value + raw;
}
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
p->max_match_value = (1 << p->width) - 1;
p->match_value = p->max_match_value;
- spin_lock_init(&p->lock);
+ raw_spin_lock_init(&p->lock);
if (clockevent_rating)
sh_cmt_register_clockevent(p, name, clockevent_rating);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index a2172f6..d9b76ca 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -43,7 +43,7 @@ struct sh_mtu2_priv {
struct clock_event_device ced;
};
-static DEFINE_SPINLOCK(sh_mtu2_lock);
+static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
#define TSTR -1 /* shared register */
#define TCR 0 /* channel register */
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_mtu2_lock, flags);
+ raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
value = sh_mtu2_read(p, TSTR);
if (start)
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_mtu2_write(p, TSTR, value);
- spin_unlock_irqrestore(&sh_mtu2_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
}
static int sh_mtu2_enable(struct sh_mtu2_priv *p)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 97f54b6..c1b51d4 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -45,7 +45,7 @@ struct sh_tmu_priv {
struct clocksource cs;
};
-static DEFINE_SPINLOCK(sh_tmu_lock);
+static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
#define TSTR -1 /* shared register */
#define TCOR 0 /* channel register */
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
- spin_lock_irqsave(&sh_tmu_lock, flags);
+ raw_spin_lock_irqsave(&sh_tmu_lock, flags);
value = sh_tmu_read(p, TSTR);
if (start)
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
value &= ~(1 << cfg->timer_bit);
sh_tmu_write(p, TSTR, value);
- spin_unlock_irqrestore(&sh_tmu_lock, flags);
+ raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
}
static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
sh_tmu_enable(p);
- /* TODO: calculate good shift from rate and counter bit width */
-
- ced->shift = 32;
- ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
- ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
- ced->min_delta_ns = 5000;
+ clockevents_config(ced, p->rate);
if (periodic) {
p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
ced->set_mode = sh_tmu_clock_event_mode;
dev_info(&p->pdev->dev, "used for clock events\n");
- clockevents_register_device(ced);
+
+ clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
ret = setup_irq(p->irqaction.irq, &p->irqaction);
if (ret) {
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
new file mode 100644
index 0000000..4674f94
--- /dev/null
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -0,0 +1,226 @@
+/*
+ * Marvell Armada 370/XP SoC timer handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Timer 0 is used as free-running clocksource, while timer 1 is
+ * used as clock_event_device.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <asm/sched_clock.h>
+
+/*
+ * Timer block registers.
+ */
+#define TIMER_CTRL_OFF 0x0000
+#define TIMER0_EN 0x0001
+#define TIMER0_RELOAD_EN 0x0002
+#define TIMER0_25MHZ 0x0800
+#define TIMER0_DIV(div) ((div) << 19)
+#define TIMER1_EN 0x0004
+#define TIMER1_RELOAD_EN 0x0008
+#define TIMER1_25MHZ 0x1000
+#define TIMER1_DIV(div) ((div) << 22)
+#define TIMER_EVENTS_STATUS 0x0004
+#define TIMER0_CLR_MASK (~0x1)
+#define TIMER1_CLR_MASK (~0x100)
+#define TIMER0_RELOAD_OFF 0x0010
+#define TIMER0_VAL_OFF 0x0014
+#define TIMER1_RELOAD_OFF 0x0018
+#define TIMER1_VAL_OFF 0x001c
+
+/* Global timers are connected to the coherency fabric clock, and the
+ below divider reduces their incrementing frequency. */
+#define TIMER_DIVIDER_SHIFT 5
+#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT)
+
+/*
+ * SoC-specific data.
+ */
+static void __iomem *timer_base;
+static int timer_irq;
+
+/*
+ * Number of timer ticks per jiffy.
+ */
+static u32 ticks_per_jiffy;
+
+static u32 notrace armada_370_xp_read_sched_clock(void)
+{
+ return ~readl(timer_base + TIMER0_VAL_OFF);
+}
+
+/*
+ * Clockevent handling.
+ */
+static int
+armada_370_xp_clkevt_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ u32 u;
+
+ /*
+ * Clear clockevent timer interrupt.
+ */
+ writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+
+ /*
+ * Setup new clockevent timer value.
+ */
+ writel(delta, timer_base + TIMER1_VAL_OFF);
+
+ /*
+ * Enable the timer.
+ */
+ u = readl(timer_base + TIMER_CTRL_OFF);
+ u = ((u & ~TIMER1_RELOAD_EN) | TIMER1_EN |
+ TIMER1_DIV(TIMER_DIVIDER_SHIFT));
+ writel(u, timer_base + TIMER_CTRL_OFF);
+
+ return 0;
+}
+
+static void
+armada_370_xp_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ u32 u;
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC) {
+ /*
+ * Setup timer to fire at 1/HZ intervals.
+ */
+ writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF);
+ writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF);
+
+ /*
+ * Enable timer.
+ */
+ u = readl(timer_base + TIMER_CTRL_OFF);
+
+ writel((u | TIMER1_EN | TIMER1_RELOAD_EN |
+ TIMER1_DIV(TIMER_DIVIDER_SHIFT)),
+ timer_base + TIMER_CTRL_OFF);
+ } else {
+ /*
+ * Disable timer.
+ */
+ u = readl(timer_base + TIMER_CTRL_OFF);
+ writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF);
+
+ /*
+ * ACK pending timer interrupt.
+ */
+ writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+
+ }
+}
+
+static struct clock_event_device armada_370_xp_clkevt = {
+ .name = "armada_370_xp_tick",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .shift = 32,
+ .rating = 300,
+ .set_next_event = armada_370_xp_clkevt_next_event,
+ .set_mode = armada_370_xp_clkevt_mode,
+};
+
+static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
+{
+ /*
+ * ACK timer interrupt and call event handler.
+ */
+
+ writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+ armada_370_xp_clkevt.event_handler(&armada_370_xp_clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction armada_370_xp_timer_irq = {
+ .name = "armada_370_xp_tick",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = armada_370_xp_timer_interrupt
+};
+
+void __init armada_370_xp_timer_init(void)
+{
+ u32 u;
+ struct device_node *np;
+ unsigned int timer_clk;
+ int ret;
+ np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
+ timer_base = of_iomap(np, 0);
+ WARN_ON(!timer_base);
+
+ if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
+ /* The fixed 25MHz timer is available so let's use it */
+ u = readl(timer_base + TIMER_CTRL_OFF);
+ writel(u | TIMER0_25MHZ | TIMER1_25MHZ,
+ timer_base + TIMER_CTRL_OFF);
+ timer_clk = 25000000;
+ } else {
+ u32 clk = 0;
+ ret = of_property_read_u32(np, "clock-frequency", &clk);
+ WARN_ON(!clk || ret < 0);
+ u = readl(timer_base + TIMER_CTRL_OFF);
+ writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
+ timer_base + TIMER_CTRL_OFF);
+ timer_clk = clk / TIMER_DIVIDER;
+ }
+
+ /* We use timer 0 as clocksource, and timer 1 for
+ clockevents */
+ timer_irq = irq_of_parse_and_map(np, 1);
+
+ ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
+
+ /*
+ * Set scale and timer for sched_clock.
+ */
+ setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
+
+ /*
+ * Setup free-running clocksource timer (interrupts
+ * disabled).
+ */
+ writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
+ writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
+
+ u = readl(timer_base + TIMER_CTRL_OFF);
+
+ writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF);
+
+ clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
+ "armada_370_xp_clocksource",
+ timer_clk, 300, 32, clocksource_mmio_readl_down);
+
+ /*
+ * Setup clockevent timer (interrupt-driven).
+ */
+ setup_irq(timer_irq, &armada_370_xp_timer_irq);
+ armada_370_xp_clkevt.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&armada_370_xp_clkevt,
+ timer_clk, 1, 0xfffffffe);
+}
+
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 77e1e6c..3e92b7d 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -46,7 +46,7 @@ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
static inline void get_seq(__u32 *ts, int *cpu)
{
preempt_disable();
- *ts = __this_cpu_inc_return(proc_event_counts) -1;
+ *ts = __this_cpu_inc_return(proc_event_counts) - 1;
*cpu = smp_processor_id();
preempt_enable();
}
@@ -62,8 +62,8 @@ void proc_fork_connector(struct task_struct *task)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -93,8 +93,8 @@ void proc_exec_connector(struct task_struct *task)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -119,8 +119,8 @@ void proc_id_connector(struct task_struct *task, int which_id)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
ev->what = which_id;
ev->event_data.id.process_pid = task->pid;
ev->event_data.id.process_tgid = task->tgid;
@@ -134,7 +134,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
ev->event_data.id.e.egid = cred->egid;
} else {
rcu_read_unlock();
- return;
+ return;
}
rcu_read_unlock();
get_seq(&msg->seq, &ev->cpu);
@@ -241,8 +241,8 @@ void proc_exit_connector(struct task_struct *task)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -276,8 +276,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -303,7 +303,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
if (msg->len != sizeof(*mc_op))
return;
- mc_op = (enum proc_cn_mcast_op*)msg->data;
+ mc_op = (enum proc_cn_mcast_op *)msg->data;
switch (*mc_op) {
case PROC_CN_MCAST_LISTEN:
atomic_inc(&proc_event_num_listeners);
@@ -325,11 +325,11 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
*/
static int __init cn_proc_init(void)
{
- int err;
-
- if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc",
- &cn_proc_mcast_ctl))) {
- printk(KERN_WARNING "cn_proc failed to register\n");
+ int err = cn_add_callback(&cn_proc_event_id,
+ "cn_proc",
+ &cn_proc_mcast_ctl);
+ if (err) {
+ pr_warn("cn_proc failed to register\n");
return err;
}
return 0;
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index c42c9d5..1f8bf05 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -1,5 +1,5 @@
/*
- * cn_queue.c
+ * cn_queue.c
*
* 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
@@ -34,13 +34,14 @@
static struct cn_callback_entry *
cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
struct cb_id *id,
- void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
+ void (*callback)(struct cn_msg *,
+ struct netlink_skb_parms *))
{
struct cn_callback_entry *cbq;
cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
if (!cbq) {
- printk(KERN_ERR "Failed to create new callback queue.\n");
+ pr_err("Failed to create new callback queue.\n");
return NULL;
}
@@ -71,7 +72,8 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
struct cb_id *id,
- void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
+ void (*callback)(struct cn_msg *,
+ struct netlink_skb_parms *))
{
struct cn_callback_entry *cbq, *__cbq;
int found = 0;
@@ -149,7 +151,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev)
spin_unlock_bh(&dev->queue_lock);
while (atomic_read(&dev->refcnt)) {
- printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
+ pr_info("Waiting for %s to become free: refcnt=%d.\n",
dev->name, atomic_read(&dev->refcnt));
msleep(1000);
}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index dde6a0f..82fa4f0 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -1,5 +1,5 @@
/*
- * connector.c
+ * connector.c
*
* 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
@@ -101,19 +101,19 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
if (!skb)
return -ENOMEM;
- nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh));
+ nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
- data = NLMSG_DATA(nlh);
+ data = nlmsg_data(nlh);
memcpy(data, msg, sizeof(*data) + msg->len);
NETLINK_CB(skb).dst_group = group;
return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
-
-nlmsg_failure:
- kfree_skb(skb);
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(cn_netlink_send);
@@ -185,7 +185,8 @@ static void cn_rx_skb(struct sk_buff *__skb)
* May sleep.
*/
int cn_add_callback(struct cb_id *id, const char *name,
- void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
+ void (*callback)(struct cn_msg *,
+ struct netlink_skb_parms *))
{
int err;
struct cn_dev *dev = &cdev;
@@ -251,15 +252,20 @@ static const struct file_operations cn_file_ops = {
.release = single_release
};
+static struct cn_dev cdev = {
+ .input = cn_rx_skb,
+};
+
static int __devinit cn_init(void)
{
struct cn_dev *dev = &cdev;
-
- dev->input = cn_rx_skb;
+ struct netlink_kernel_cfg cfg = {
+ .groups = CN_NETLINK_USERS + 0xf,
+ .input = dev->input,
+ };
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
- CN_NETLINK_USERS + 0xf,
- dev->input, NULL, THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!dev->nls)
return -EIO;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7f2f149..fb8a527 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -138,7 +138,7 @@ void disable_cpufreq(void)
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
{
struct cpufreq_policy *data;
unsigned long flags;
@@ -162,7 +162,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
if (!data)
goto err_out_put_module;
- if (!kobject_get(&data->kobj))
+ if (!sysfs && !kobject_get(&data->kobj))
goto err_out_put_module;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -175,16 +175,35 @@ err_out_unlock:
err_out:
return NULL;
}
+
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ return __cpufreq_cpu_get(cpu, false);
+}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
+static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
+{
+ return __cpufreq_cpu_get(cpu, true);
+}
-void cpufreq_cpu_put(struct cpufreq_policy *data)
+static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
{
- kobject_put(&data->kobj);
+ if (!sysfs)
+ kobject_put(&data->kobj);
module_put(cpufreq_driver->owner);
}
+
+void cpufreq_cpu_put(struct cpufreq_policy *data)
+{
+ __cpufreq_cpu_put(data, false);
+}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
+{
+ __cpufreq_cpu_put(data, true);
+}
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
@@ -617,7 +636,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
+ policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -631,7 +650,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
unlock_policy_rwsem_read(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
@@ -642,7 +661,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
+ policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -656,7 +675,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
unlock_policy_rwsem_write(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index b243a7e..af2d81e 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -62,8 +62,18 @@ static int exynos_target(struct cpufreq_policy *policy,
goto out;
}
- if (cpufreq_frequency_table_target(policy, freq_table,
- freqs.old, relation, &old_index)) {
+ /*
+ * The policy max have been changed so that we cannot get proper
+ * old_index with cpufreq_frequency_table_target(). Thus, ignore
+ * policy and get the index from the raw freqeuncy table.
+ */
+ for (old_index = 0;
+ freq_table[old_index].frequency != CPUFREQ_TABLE_END;
+ old_index++)
+ if (freq_table[old_index].frequency == freqs.old)
+ break;
+
+ if (freq_table[old_index].frequency == CPUFREQ_TABLE_END) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d90519c..d6a533e 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -201,6 +201,22 @@ void cpuidle_resume_and_unlock(void)
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
+/* Currently used in suspend/resume path to suspend cpuidle */
+void cpuidle_pause(void)
+{
+ mutex_lock(&cpuidle_lock);
+ cpuidle_uninstall_idle_handler();
+ mutex_unlock(&cpuidle_lock);
+}
+
+/* Currently used in suspend/resume path to resume cpuidle */
+void cpuidle_resume(void)
+{
+ mutex_lock(&cpuidle_lock);
+ cpuidle_install_idle_handler();
+ mutex_unlock(&cpuidle_lock);
+}
+
/**
* cpuidle_wrap_enter - performs timekeeping and irqen around enter function
* @dev: pointer to a valid cpuidle_device object
@@ -265,7 +281,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
state->power_usage = -1;
state->flags = 0;
state->enter = poll_idle;
- state->disable = 0;
+ state->disabled = false;
}
#else
static void poll_idle_init(struct cpuidle_driver *drv) {}
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 40cd3f3..58bf3b1 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -16,6 +16,7 @@
static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
+int cpuidle_driver_refcount;
static void __cpuidle_register_driver(struct cpuidle_driver *drv)
{
@@ -89,8 +90,34 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv)
}
spin_lock(&cpuidle_driver_lock);
- cpuidle_curr_driver = NULL;
+
+ if (!WARN_ON(cpuidle_driver_refcount > 0))
+ cpuidle_curr_driver = NULL;
+
spin_unlock(&cpuidle_driver_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+
+struct cpuidle_driver *cpuidle_driver_ref(void)
+{
+ struct cpuidle_driver *drv;
+
+ spin_lock(&cpuidle_driver_lock);
+
+ drv = cpuidle_curr_driver;
+ cpuidle_driver_refcount++;
+
+ spin_unlock(&cpuidle_driver_lock);
+ return drv;
+}
+
+void cpuidle_driver_unref(void)
+{
+ spin_lock(&cpuidle_driver_lock);
+
+ if (!WARN_ON(cpuidle_driver_refcount <= 0))
+ cpuidle_driver_refcount--;
+
+ spin_unlock(&cpuidle_driver_lock);
+}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 0633575..5b1f2c3 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -281,7 +281,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* unless the timer is happening really really soon.
*/
if (data->expected_us > 5 &&
- drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0)
+ !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
+ dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
/*
@@ -290,8 +291,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
*/
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
+ struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disable)
+ if (s->disabled || su->disable)
continue;
if (s->target_residency > data->predicted_us)
continue;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 88032b4..5f809e3 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -217,7 +217,8 @@ struct cpuidle_state_attr {
struct attribute attr;
ssize_t (*show)(struct cpuidle_state *, \
struct cpuidle_state_usage *, char *);
- ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
+ ssize_t (*store)(struct cpuidle_state *, \
+ struct cpuidle_state_usage *, const char *, size_t);
};
#define define_one_state_ro(_name, show) \
@@ -233,21 +234,22 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%u\n", state->_name);\
}
-#define define_store_state_function(_name) \
+#define define_store_state_ull_function(_name) \
static ssize_t store_state_##_name(struct cpuidle_state *state, \
+ struct cpuidle_state_usage *state_usage, \
const char *buf, size_t size) \
{ \
- long value; \
+ unsigned long long value; \
int err; \
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
- err = kstrtol(buf, 0, &value); \
+ err = kstrtoull(buf, 0, &value); \
if (err) \
return err; \
if (value) \
- state->disable = 1; \
+ state_usage->_name = 1; \
else \
- state->disable = 0; \
+ state_usage->_name = 0; \
return size; \
}
@@ -273,8 +275,8 @@ define_show_state_ull_function(usage)
define_show_state_ull_function(time)
define_show_state_str_function(name)
define_show_state_str_function(desc)
-define_show_state_function(disable)
-define_store_state_function(disable)
+define_show_state_ull_function(disable)
+define_store_state_ull_function(disable)
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
@@ -318,10 +320,11 @@ static ssize_t cpuidle_state_store(struct kobject *kobj,
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
+ struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
if (cattr->store)
- ret = cattr->store(state, buf, size);
+ ret = cattr->store(state, state_usage, buf, size);
return ret;
}
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 422a976..ac236f6 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -572,7 +572,7 @@ static void aes_workqueue_handler(struct work_struct *work)
struct tegra_aes_dev *dd = aes_dev;
int ret;
- ret = clk_enable(dd->aes_clk);
+ ret = clk_prepare_enable(dd->aes_clk);
if (ret)
BUG_ON("clock enable failed");
@@ -581,7 +581,7 @@ static void aes_workqueue_handler(struct work_struct *work)
ret = tegra_aes_handle_req(dd);
} while (!ret);
- clk_disable(dd->aes_clk);
+ clk_disable_unprepare(dd->aes_clk);
}
static irqreturn_t aes_irq(int irq, void *dev_id)
@@ -673,7 +673,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
/* take mutex to access the aes hw */
mutex_lock(&aes_lock);
- ret = clk_enable(dd->aes_clk);
+ ret = clk_prepare_enable(dd->aes_clk);
if (ret)
return ret;
@@ -700,7 +700,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
}
out:
- clk_disable(dd->aes_clk);
+ clk_disable_unprepare(dd->aes_clk);
mutex_unlock(&aes_lock);
dev_dbg(dd->dev, "%s: done\n", __func__);
@@ -758,7 +758,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
- ret = clk_enable(dd->aes_clk);
+ ret = clk_prepare_enable(dd->aes_clk);
if (ret)
return ret;
@@ -788,7 +788,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
out:
- clk_disable(dd->aes_clk);
+ clk_disable_unprepare(dd->aes_clk);
mutex_unlock(&aes_lock);
dev_dbg(dd->dev, "%s: done\n", __func__);
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 7cac127..1c307e1 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1661,27 +1661,26 @@ static void ux500_cryp_shutdown(struct platform_device *pdev)
}
-static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
+static int ux500_cryp_suspend(struct device *dev)
{
int ret;
+ struct platform_device *pdev = to_platform_device(dev);
struct cryp_device_data *device_data;
struct resource *res_irq;
struct cryp_ctx *temp_ctx = NULL;
- dev_dbg(&pdev->dev, "[%s]", __func__);
+ dev_dbg(dev, "[%s]", __func__);
/* Handle state? */
device_data = platform_get_drvdata(pdev);
if (!device_data) {
- dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
- __func__);
+ dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
return -ENOMEM;
}
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq)
- dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
- __func__);
+ dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
else
disable_irq(res_irq->start);
@@ -1692,32 +1691,32 @@ static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
if (device_data->current_ctx == ++temp_ctx) {
if (down_interruptible(&driver_data.device_allocation))
- dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
- "failed", __func__);
- ret = cryp_disable_power(&pdev->dev, device_data, false);
+ dev_dbg(dev, "[%s]: down_interruptible() failed",
+ __func__);
+ ret = cryp_disable_power(dev, device_data, false);
} else
- ret = cryp_disable_power(&pdev->dev, device_data, true);
+ ret = cryp_disable_power(dev, device_data, true);
if (ret)
- dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__);
+ dev_err(dev, "[%s]: cryp_disable_power()", __func__);
return ret;
}
-static int ux500_cryp_resume(struct platform_device *pdev)
+static int ux500_cryp_resume(struct device *dev)
{
int ret = 0;
+ struct platform_device *pdev = to_platform_device(dev);
struct cryp_device_data *device_data;
struct resource *res_irq;
struct cryp_ctx *temp_ctx = NULL;
- dev_dbg(&pdev->dev, "[%s]", __func__);
+ dev_dbg(dev, "[%s]", __func__);
device_data = platform_get_drvdata(pdev);
if (!device_data) {
- dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
- __func__);
+ dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
return -ENOMEM;
}
@@ -1730,11 +1729,10 @@ static int ux500_cryp_resume(struct platform_device *pdev)
if (!device_data->current_ctx)
up(&driver_data.device_allocation);
else
- ret = cryp_enable_power(&pdev->dev, device_data, true);
+ ret = cryp_enable_power(dev, device_data, true);
if (ret)
- dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!",
- __func__);
+ dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res_irq)
@@ -1744,15 +1742,16 @@ static int ux500_cryp_resume(struct platform_device *pdev)
return ret;
}
+static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
+
static struct platform_driver cryp_driver = {
.probe = ux500_cryp_probe,
.remove = ux500_cryp_remove,
.shutdown = ux500_cryp_shutdown,
- .suspend = ux500_cryp_suspend,
- .resume = ux500_cryp_resume,
.driver = {
.owner = THIS_MODULE,
.name = "cryp1"
+ .pm = &ux500_cryp_pm,
}
};
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 6dbb9ec..08d5032 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1894,19 +1894,17 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
/**
* ux500_hash_suspend - Function that suspends the hash device.
- * @pdev: The platform device.
- * @state: -
+ * @dev: Device to suspend.
*/
-static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
+static int ux500_hash_suspend(struct device *dev)
{
int ret;
struct hash_device_data *device_data;
struct hash_ctx *temp_ctx = NULL;
- device_data = platform_get_drvdata(pdev);
+ device_data = dev_get_drvdata(dev);
if (!device_data) {
- dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
- __func__);
+ dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
return -ENOMEM;
}
@@ -1917,33 +1915,32 @@ static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
if (device_data->current_ctx == ++temp_ctx) {
if (down_interruptible(&driver_data.device_allocation))
- dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
- "failed", __func__);
+ dev_dbg(dev, "[%s]: down_interruptible() failed",
+ __func__);
ret = hash_disable_power(device_data, false);
} else
ret = hash_disable_power(device_data, true);
if (ret)
- dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
+ dev_err(dev, "[%s]: hash_disable_power()", __func__);
return ret;
}
/**
* ux500_hash_resume - Function that resume the hash device.
- * @pdev: The platform device.
+ * @dev: Device to resume.
*/
-static int ux500_hash_resume(struct platform_device *pdev)
+static int ux500_hash_resume(struct device *dev)
{
int ret = 0;
struct hash_device_data *device_data;
struct hash_ctx *temp_ctx = NULL;
- device_data = platform_get_drvdata(pdev);
+ device_data = dev_get_drvdata(dev);
if (!device_data) {
- dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
- __func__);
+ dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
return -ENOMEM;
}
@@ -1958,21 +1955,21 @@ static int ux500_hash_resume(struct platform_device *pdev)
ret = hash_enable_power(device_data, true);
if (ret)
- dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
- __func__);
+ dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
return ret;
}
+static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
+
static struct platform_driver hash_driver = {
.probe = ux500_hash_probe,
.remove = ux500_hash_remove,
.shutdown = ux500_hash_shutdown,
- .suspend = ux500_hash_suspend,
- .resume = ux500_hash_resume,
.driver = {
.owner = THIS_MODULE,
.name = "hash1",
+ .pm = &ux500_hash_pm,
}
};
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index e23dc82..7212961 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1626,4 +1626,4 @@ module_exit(dw_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fb4f499..1dc2a4a 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
init_completion(&sdmac->done);
- sdmac->buf_tail = 0;
-
return 0;
out:
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
sdmac->flags = 0;
+ sdmac->buf_tail = 0;
+
dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
sg_len, channel);
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
sdmac->status = DMA_IN_PROGRESS;
+ sdmac->buf_tail = 0;
+
sdmac->flags |= IMX_DMA_SG_LOOP;
sdmac->direction = direction;
ret = sdma_load_context(sdmac);
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 5ec7204..c7573e5 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1663,7 +1663,6 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
static int __init ipu_probe(struct platform_device *pdev)
{
- struct ipu_platform_data *pdata = pdev->dev.platform_data;
struct resource *mem_ipu, *mem_ic;
int ret;
@@ -1671,7 +1670,7 @@ static int __init ipu_probe(struct platform_device *pdev)
mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!pdata || !mem_ipu || !mem_ic)
+ if (!mem_ipu || !mem_ic)
return -EINVAL;
ipu_data.dev = &pdev->dev;
@@ -1688,10 +1687,9 @@ static int __init ipu_probe(struct platform_device *pdev)
goto err_noirq;
ipu_data.irq_err = ret;
- ipu_data.irq_base = pdata->irq_base;
- dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n",
- ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
+ dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
+ ipu_data.irq_fn, ipu_data.irq_err);
/* Remap IPU common registers */
ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index a71f55e..fa95bcc 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <mach/ipu.h>
@@ -354,10 +355,12 @@ static struct irq_chip ipu_irq_chip = {
/* Install the IRQ handler */
int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
{
- struct ipu_platform_data *pdata = dev->dev.platform_data;
- unsigned int irq, irq_base, i;
+ unsigned int irq, i;
+ int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS,
+ numa_node_id());
- irq_base = pdata->irq_base;
+ if (irq_base < 0)
+ return irq_base;
for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
irq_bank[i].ipu = ipu;
@@ -387,15 +390,16 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
irq_set_handler_data(ipu->irq_err, ipu);
irq_set_chained_handler(ipu->irq_err, ipu_irq_err);
+ ipu->irq_base = irq_base;
+
return 0;
}
void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
{
- struct ipu_platform_data *pdata = dev->dev.platform_data;
unsigned int irq, irq_base;
- irq_base = pdata->irq_base;
+ irq_base = ipu->irq_base;
irq_set_chained_handler(ipu->irq_fn, NULL);
irq_set_handler_data(ipu->irq_fn, NULL);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cbcc28e..e4feba6 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -392,6 +392,8 @@ struct pl330_req {
struct pl330_reqcfg *cfg;
/* Pointer to first xfer in the request. */
struct pl330_xfer *x;
+ /* Hook to attach to DMAC's list of reqs with due callback */
+ struct list_head rqd;
};
/*
@@ -461,8 +463,6 @@ struct _pl330_req {
/* Number of bytes taken to setup MC for the req */
u32 mc_len;
struct pl330_req *r;
- /* Hook to attach to DMAC's list of reqs with due callback */
- struct list_head rqd;
};
/* ToBeDone for tasklet */
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
/* Returns 1 if state was updated, 0 otherwise */
static int pl330_update(const struct pl330_info *pi)
{
- struct _pl330_req *rqdone;
+ struct pl330_req *rqdone, *tmp;
struct pl330_dmac *pl330;
unsigned long flags;
void __iomem *regs;
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
if (active == -1) /* Aborted */
continue;
- rqdone = &thrd->req[active];
+ /* Detach the req */
+ rqdone = thrd->req[active].r;
+ thrd->req[active].r = NULL;
+
mark_free(thrd, active);
/* Get going again ASAP */
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
}
/* Now that we are in no hurry, do the callbacks */
- while (!list_empty(&pl330->req_done)) {
- struct pl330_req *r;
-
- rqdone = container_of(pl330->req_done.next,
- struct _pl330_req, rqd);
-
- list_del_init(&rqdone->rqd);
-
- /* Detach the req */
- r = rqdone->r;
- rqdone->r = NULL;
+ list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
+ list_del(&rqdone->rqd);
spin_unlock_irqrestore(&pl330->lock, flags);
- _callback(r, PL330_ERR_NONE);
+ _callback(rqdone, PL330_ERR_NONE);
spin_lock_irqsave(&pl330->lock, flags);
}
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
/* Pick up ripe tomatoes */
list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
if (desc->status == DONE) {
- if (pch->cyclic)
+ if (!pch->cyclic)
dma_cookie_complete(&desc->txd);
list_move_tail(&desc->node, &list);
}
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
}
/* Returns the number of descriptors added to the DMAC pool */
-int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
+static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
{
struct dma_pl330_desc *desc;
unsigned long flags;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7ef73c9..7be9b72 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -715,25 +715,6 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
input_addr_to_dram_addr(mci, input_addr));
}
-/*
- * Find the minimum and maximum InputAddr values that map to the given @csrow.
- * Pass back these values in *input_addr_min and *input_addr_max.
- */
-static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
- u64 *input_addr_min, u64 *input_addr_max)
-{
- struct amd64_pvt *pvt;
- u64 base, mask;
-
- pvt = mci->pvt_info;
- BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
-
- get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
-
- *input_addr_min = base & ~mask;
- *input_addr_max = base | mask;
-}
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
u32 *page, u32 *offset)
@@ -1058,6 +1039,37 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
int channel, csrow;
u32 page, offset;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
+ /*
+ * Find out which node the error address belongs to. This may be
+ * different from the node that detected the error.
+ */
+ src_mci = find_mc_by_sys_addr(mci, sys_addr);
+ if (!src_mci) {
+ amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
+ (unsigned long)sys_addr);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a node",
+ NULL);
+ return;
+ }
+
+ /* Now map the sys_addr to a CSROW */
+ csrow = sys_addr_to_csrow(src_mci, sys_addr);
+ if (csrow < 0) {
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a csrow",
+ NULL);
+ return;
+ }
+
/* CHIPKILL enabled */
if (pvt->nbcfg & NBCFG_CHIPKILL) {
channel = get_channel_from_ecc_syndrome(mci, syndrome);
@@ -1067,9 +1079,15 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
* 2 DIMMs is in error. So we need to ID 'both' of them
* as suspect.
*/
- amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible "
- "error reporting race\n", syndrome);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
+ "possible error reporting race\n",
+ syndrome);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ csrow, -1, -1,
+ EDAC_MOD_STR,
+ "unknown syndrome - possible error reporting race",
+ NULL);
return;
}
} else {
@@ -1084,28 +1102,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
channel = ((sys_addr & BIT(3)) != 0);
}
- /*
- * Find out which node the error address belongs to. This may be
- * different from the node that detected the error.
- */
- src_mci = find_mc_by_sys_addr(mci, sys_addr);
- if (!src_mci) {
- amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
- (unsigned long)sys_addr);
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
- return;
- }
-
- /* Now map the sys_addr to a CSROW */
- csrow = sys_addr_to_csrow(src_mci, sys_addr);
- if (csrow < 0) {
- edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
- } else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
- edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
- channel, EDAC_MOD_STR);
- }
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
+ page, offset, syndrome,
+ csrow, channel, -1,
+ EDAC_MOD_STR, "", NULL);
}
static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1611,15 +1611,20 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
u32 page, offset;
int nid, csrow, chan = 0;
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
+
csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
if (csrow < 0) {
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "failed to map error addr to a csrow",
+ NULL);
return;
}
- error_address_to_page_and_offset(sys_addr, &page, &offset);
-
/*
* We need the syndromes for channel detection only when we're
* ganged. Otherwise @chan should already contain the channel at
@@ -1628,16 +1633,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
if (dct_ganging_enabled(pvt))
chan = get_channel_from_ecc_syndrome(mci, syndrome);
- if (chan >= 0)
- edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
- EDAC_MOD_STR);
- else
- /*
- * Channel unknown, report all channels on this CSROW as failed.
- */
- for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
- edac_mc_handle_ce(mci, page, offset, syndrome,
- csrow, chan, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset, syndrome,
+ csrow, chan, -1,
+ EDAC_MOD_STR, "", NULL);
}
/*
@@ -1918,7 +1917,12 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
/* Ensure that the Error Address is VALID */
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "HW has no ERROR_ADDRESS available",
+ NULL);
return;
}
@@ -1942,11 +1946,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!(m->status & MCI_STATUS_ADDRV)) {
amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "HW has no ERROR_ADDRESS available",
+ NULL);
return;
}
sys_addr = get_error_address(m);
+ error_address_to_page_and_offset(sys_addr, &page, &offset);
/*
* Find out which node the error address belongs to. This may be
@@ -1956,7 +1966,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (!src_mci) {
amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
(unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "ERROR ADDRESS NOT mapped to a MC", NULL);
return;
}
@@ -1966,10 +1980,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
if (csrow < 0) {
amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
(unsigned long)sys_addr);
- edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ -1, -1, -1,
+ EDAC_MOD_STR,
+ "ERROR ADDRESS NOT mapped to CS",
+ NULL);
} else {
- error_address_to_page_and_offset(sys_addr, &page, &offset);
- edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ csrow, -1, -1,
+ EDAC_MOD_STR, "", NULL);
}
}
@@ -2171,7 +2192,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
- debugf0(" nr_pages= %u channel-count = %d\n",
+ debugf0(" nr_pages/channel= %u channel-count = %d\n",
nr_pages, pvt->channel_count);
return nr_pages;
@@ -2185,9 +2206,12 @@ static int init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow;
struct amd64_pvt *pvt = mci->pvt_info;
- u64 input_addr_min, input_addr_max, sys_addr, base, mask;
+ u64 base, mask;
u32 val;
- int i, empty = 1;
+ int i, j, empty = 1;
+ enum mem_type mtype;
+ enum edac_type edac_mode;
+ int nr_pages = 0;
amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
@@ -2211,41 +2235,32 @@ static int init_csrows(struct mem_ctl_info *mci)
empty = 0;
if (csrow_enabled(i, 0, pvt))
- csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
+ nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
if (csrow_enabled(i, 1, pvt))
- csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
- find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
- csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
- sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
- csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
+ nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
get_cs_base_and_mask(pvt, i, 0, &base, &mask);
- csrow->page_mask = ~mask;
/* 8 bytes of resolution */
- csrow->mtype = amd64_determine_memory_type(pvt, i);
+ mtype = amd64_determine_memory_type(pvt, i);
debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
- debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n",
- (unsigned long)input_addr_min,
- (unsigned long)input_addr_max);
- debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
- (unsigned long)sys_addr, csrow->page_mask);
- debugf1(" nr_pages: %u first_page: 0x%lx "
- "last_page: 0x%lx\n",
- (unsigned)csrow->nr_pages,
- csrow->first_page, csrow->last_page);
+ debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
/*
* determine whether CHIPKILL or JUST ECC or NO ECC is operating
*/
if (pvt->nbcfg & NBCFG_ECC_ENABLE)
- csrow->edac_mode =
- (pvt->nbcfg & NBCFG_CHIPKILL) ?
- EDAC_S4ECD4ED : EDAC_SECDED;
+ edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
+ EDAC_S4ECD4ED : EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ edac_mode = EDAC_NONE;
+
+ for (j = 0; j < pvt->channel_count; j++) {
+ csrow->channels[j].dimm->mtype = mtype;
+ csrow->channels[j].dimm->edac_mode = edac_mode;
+ csrow->channels[j].dimm->nr_pages = nr_pages;
+ }
}
return empty;
@@ -2540,6 +2555,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
struct amd64_pvt *pvt = NULL;
struct amd64_family_type *fam_type = NULL;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
int err = 0, ret;
u8 nid = get_node_id(F2);
@@ -2574,7 +2590,13 @@ static int amd64_init_one_instance(struct pci_dev *F2)
goto err_siblings;
ret = -ENOMEM;
- mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = pvt->csels[0].b_cnt;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = pvt->channel_count;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
if (!mci)
goto err_siblings;
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f8fd3c8..9774d44 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -29,7 +29,6 @@
edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
#define AMD76X_NR_CSROWS 8
-#define AMD76X_NR_CHANS 1
#define AMD76X_NR_DIMMS 4
/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
@@ -146,8 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = (info->ecc_mode_status >> 4) & 0xf;
- edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0,
- row, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ mci->csrows[row].first_page, 0, 0,
+ row, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -159,8 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
if (handle_errors) {
row = info->ecc_mode_status & 0xf;
- edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0,
- 0, row, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ mci->csrows[row].first_page, 0, 0,
+ row, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -186,11 +189,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 mba, mba_base, mba_mask, dms;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_dword(pdev,
@@ -203,13 +208,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
csrow->first_page = mba_base >> PAGE_SHIFT;
- csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
csrow->page_mask = mba_mask >> PAGE_SHIFT;
- csrow->grain = csrow->nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_RDDR;
- csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
+ dimm->grain = dimm->nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_RDDR;
+ dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
+ dimm->edac_mode = edac_mode;
}
}
@@ -230,7 +235,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
EDAC_SECDED,
EDAC_SECDED
};
- struct mem_ctl_info *mci = NULL;
+ struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u32 ems;
u32 ems_mode;
struct amd76x_error_info discard;
@@ -238,11 +244,17 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
debugf0("%s()\n", __func__);
pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
ems_mode = (ems >> 10) & 0x3;
- mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
- if (mci == NULL) {
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = AMD76X_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
+
+ if (mci == NULL)
return -ENOMEM;
- }
debugf0("%s(): mci = %p\n", __func__, mci);
mci->dev = &pdev->dev;
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 9a6a274..69ee6aa 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -48,8 +48,9 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
syndrome = (ar & 0x000000001fe00000ul) >> 21;
/* TODO: Decoding of the error address */
- edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
- syndrome, 0, chan, "");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ csrow->first_page + pfn, offset, syndrome,
+ 0, chan, -1, "", "", NULL);
}
static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
@@ -69,7 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
offset = address & ~PAGE_MASK;
/* TODO: Decoding of the error address */
- edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ csrow->first_page + pfn, offset, 0,
+ 0, chan, -1, "", "", NULL);
}
static void cell_edac_check(struct mem_ctl_info *mci)
@@ -124,8 +127,11 @@ static void cell_edac_check(struct mem_ctl_info *mci)
static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
{
struct csrow_info *csrow = &mci->csrows[0];
+ struct dimm_info *dimm;
struct cell_edac_priv *priv = mci->pvt_info;
struct device_node *np;
+ int j;
+ u32 nr_pages;
for (np = NULL;
(np = of_find_node_by_name(np, "memory")) != NULL;) {
@@ -140,15 +146,20 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
if (of_node_to_nid(np) != priv->node)
continue;
csrow->first_page = r.start >> PAGE_SHIFT;
- csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->mtype = MEM_XDR;
- csrow->edac_mode = EDAC_SECDED;
+ nr_pages = resource_size(&r) >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + nr_pages - 1;
+
+ for (j = 0; j < csrow->nr_channels; j++) {
+ dimm = csrow->channels[j].dimm;
+ dimm->mtype = MEM_XDR;
+ dimm->edac_mode = EDAC_SECDED;
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ }
dev_dbg(mci->dev,
"Initialized on node %d, chanmask=0x%x,"
" first_page=0x%lx, nr_pages=0x%x\n",
priv->node, priv->chanmask,
- csrow->first_page, csrow->nr_pages);
+ csrow->first_page, nr_pages);
break;
}
}
@@ -157,9 +168,10 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
{
struct cbe_mic_tm_regs __iomem *regs;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct cell_edac_priv *priv;
u64 reg;
- int rc, chanmask;
+ int rc, chanmask, num_chans;
regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
if (regs == NULL)
@@ -184,8 +196,16 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
in_be64(&regs->mic_fir));
/* Allocate & init EDAC MC data structure */
- mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1,
- chanmask == 3 ? 2 : 1, pdev->id);
+ num_chans = chanmask == 3 ? 2 : 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+ sizeof(struct cell_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index a774c0d..e22030a 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -329,9 +329,10 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
{
struct cpc925_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
- int index;
+ struct dimm_info *dimm;
+ int index, j;
u32 mbmr, mbbar, bba;
- unsigned long row_size, last_nr_pages = 0;
+ unsigned long row_size, nr_pages, last_nr_pages = 0;
get_total_mem(pdata);
@@ -350,36 +351,41 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
row_size = bba * (1UL << 28); /* 256M */
csrow->first_page = last_nr_pages;
- csrow->nr_pages = row_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
+ nr_pages = row_size >> PAGE_SHIFT;
+ csrow->last_page = csrow->first_page + nr_pages - 1;
last_nr_pages = csrow->last_page + 1;
- csrow->mtype = MEM_RDDR;
- csrow->edac_mode = EDAC_SECDED;
-
- switch (csrow->nr_channels) {
- case 1: /* Single channel */
- csrow->grain = 32; /* four-beat burst of 32 bytes */
- break;
- case 2: /* Dual channel */
- default:
- csrow->grain = 64; /* four-beat burst of 64 bytes */
- break;
- }
-
- switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
- case 6: /* 0110, no way to differentiate X8 VS X16 */
- case 5: /* 0101 */
- case 8: /* 1000 */
- csrow->dtype = DEV_X16;
- break;
- case 7: /* 0111 */
- case 9: /* 1001 */
- csrow->dtype = DEV_X8;
- break;
- default:
- csrow->dtype = DEV_UNKNOWN;
- break;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ dimm->mtype = MEM_RDDR;
+ dimm->edac_mode = EDAC_SECDED;
+
+ switch (csrow->nr_channels) {
+ case 1: /* Single channel */
+ dimm->grain = 32; /* four-beat burst of 32 bytes */
+ break;
+ case 2: /* Dual channel */
+ default:
+ dimm->grain = 64; /* four-beat burst of 64 bytes */
+ break;
+ }
+
+ switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
+ case 6: /* 0110, no way to differentiate X8 VS X16 */
+ case 5: /* 0101 */
+ case 8: /* 1000 */
+ dimm->dtype = DEV_X16;
+ break;
+ case 7: /* 0111 */
+ case 9: /* 1001 */
+ dimm->dtype = DEV_X8;
+ break;
+ default:
+ dimm->dtype = DEV_UNKNOWN;
+ break;
+ }
}
}
}
@@ -549,13 +555,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
if (apiexcp & CECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
channel = cpc925_mc_find_channel(mci, syndrome);
- edac_mc_handle_ce(mci, pfn, offset, syndrome,
- csrow, channel, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, syndrome,
+ csrow, channel, -1,
+ mci->ctl_name, "", NULL);
}
if (apiexcp & UECC_EXCP_DETECTED) {
cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
- edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, 0,
+ csrow, -1, -1,
+ mci->ctl_name, "", NULL);
}
cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -927,6 +938,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
{
static int edac_mc_idx;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
void __iomem *vbase;
struct cpc925_mc_pdata *pdata;
struct resource *r;
@@ -962,9 +974,16 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
goto err2;
}
- nr_channels = cpc925_mc_get_channels(vbase);
- mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata),
- CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx);
+ nr_channels = cpc925_mc_get_channels(vbase) + 1;
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = CPC925_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(struct cpc925_mc_pdata));
if (!mci) {
cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
res = -ENOMEM;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 4122326..3186512 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -4,7 +4,11 @@
* This file may be distributed under the terms of the
* GNU General Public License.
*
- * See "enum e752x_chips" below for supported chipsets
+ * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
+ *
+ * Datasheets:
+ * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
+ * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
*
* Written by Tom Zimmerman
*
@@ -13,8 +17,6 @@
* Wang Zhenyu at intel.com
* Dave Jiang at mvista.com
*
- * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
- *
*/
#include <linux/module.h>
@@ -187,6 +189,25 @@ enum e752x_chips {
I3100 = 3
};
+/*
+ * Those chips Support single-rank and dual-rank memories only.
+ *
+ * On e752x chips, the odd rows are present only on dual-rank memories.
+ * Dividing the rank by two will provide the dimm#
+ *
+ * i3100 MC has a different mapping: it supports only 4 ranks.
+ *
+ * The mapping is (from 1 to n):
+ * slot single-ranked double-ranked
+ * dimm #1 -> rank #4 NA
+ * dimm #2 -> rank #3 NA
+ * dimm #3 -> rank #2 Ranks 2 and 3
+ * dimm #4 -> rank $1 Ranks 1 and 4
+ *
+ * FIXME: The current mapping for i3100 considers that it supports up to 8
+ * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
+ */
+
struct e752x_pvt {
struct pci_dev *bridge_ck;
struct pci_dev *dev_d0f0;
@@ -350,8 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
channel = !(error_one & 1);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4),
- sec1_syndrome, row, channel, "e752x CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offset_in_page(sec1_add << 4), sec1_syndrome,
+ row, channel, -1,
+ "e752x CE", "", NULL);
}
static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -385,9 +408,12 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Read");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ block_page,
+ offset_in_page(error_2b << 4), 0,
+ row, -1, -1,
+ "e752x UE from Read", "", NULL);
+
}
if (error_one & 0x0404) {
error_2b = scrb_add;
@@ -401,9 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
edac_mc_find_csrow_by_page(mci, block_page);
/* e752x mc reads 34:6 of the DRAM linear address */
- edac_mc_handle_ue(mci, block_page,
- offset_in_page(error_2b << 4),
- row, "e752x UE from Scruber");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ block_page,
+ offset_in_page(error_2b << 4), 0,
+ row, -1, -1,
+ "e752x UE from Scruber", "", NULL);
}
}
@@ -426,7 +454,9 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
return;
debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e752x UE log memory write");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "e752x UE log memory write", "", NULL);
}
static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -1044,7 +1074,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
u8 value;
- u32 dra, drc, cumul_size;
+ u32 dra, drc, cumul_size, i, nr_pages;
dra = 0;
for (index = 0; index < 4; index++) {
@@ -1053,7 +1083,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
dra |= dra_reg << (index * 8);
}
pci_read_config_dword(pdev, E752X_DRC, &drc);
- drc_chan = dual_channel_active(ddrcsr);
+ drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
drc_ddim = (drc >> 20) & 0x3;
@@ -1078,26 +1108,33 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
+
+ for (i = 0; i < csrow->nr_channels; i++) {
+ struct dimm_info *dimm = csrow->channels[i].dimm;
+
+ debugf3("Initializing rank at (%i,%i)\n", index, i);
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ dimm->mtype = MEM_RDDR; /* only one type supported */
+ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ dimm->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ dimm->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ dimm->edac_mode = EDAC_NONE;
+ }
}
}
@@ -1226,6 +1263,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
u16 pci_data;
u8 stat8;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct e752x_pvt *pvt;
u16 ddrcsr;
int drc_chan; /* Number of channels 0=1chan,1=2chan */
@@ -1252,11 +1290,15 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
/* Dual channel = 1, Single channel = 0 */
drc_chan = dual_channel_active(ddrcsr);
- mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0);
-
- if (mci == NULL) {
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = E752X_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = drc_chan + 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
+ if (mci == NULL)
return -ENOMEM;
- }
debugf3("%s(): init mci\n", __func__);
mci->mtype_cap = MEM_FLAG_RDDR;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 68dea87..9a9c1a5 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -10,6 +10,9 @@
* Based on work by Dan Hollis <goemon at anime dot net> and others.
* http://www.anime.net/~goemon/linux-ecc/
*
+ * Datasheet:
+ * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
+ *
* Contributors:
* Eric Biederman (Linux Networx)
* Tom Zimmerman (Linux Networx)
@@ -71,7 +74,7 @@
#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
#define E7XXX_NR_CSROWS 8 /* number of csrows */
-#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */
+#define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */
/* E7XXX register addresses - device 0 function 0 */
#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
@@ -216,13 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
row = edac_mc_find_csrow_by_page(mci, page);
/* convert syndrome to channel */
channel = e7xxx_find_channel(syndrome);
- edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
+ row, channel, -1, "e7xxx CE", "", NULL);
}
static void process_ce_no_info(struct mem_ctl_info *mci)
{
debugf3("%s()\n", __func__);
- edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+ "e7xxx CE log register overflow", "", NULL);
}
static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -236,13 +241,17 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
/* FIXME - should use PAGE_SHIFT */
block_page = error_2b >> 6; /* convert to 4k address */
row = edac_mc_find_csrow_by_page(mci, block_page);
- edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE");
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
+ row, -1, -1, "e7xxx UE", "", NULL);
}
static void process_ue_no_info(struct mem_ctl_info *mci)
{
debugf3("%s()\n", __func__);
- edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow");
+
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
+ "e7xxx UE log register overflow", "", NULL);
}
static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -347,11 +356,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
int dev_idx, u32 drc)
{
unsigned long last_cumul_size;
- int index;
+ int index, j;
u8 value;
- u32 dra, cumul_size;
+ u32 dra, cumul_size, nr_pages;
int drc_chan, drc_drbg, drc_ddim, mem_dev;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
pci_read_config_dword(pdev, E7XXX_DRA, &dra);
drc_chan = dual_channel_active(drc, dev_idx);
@@ -379,26 +389,32 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */
- csrow->mtype = MEM_RDDR; /* only one type supported */
- csrow->dtype = mem_dev ? DEV_X4 : DEV_X8;
-
- /*
- * if single channel or x8 devices then SECDED
- * if dual channel and x4 then S4ECD4ED
- */
- if (drc_ddim) {
- if (drc_chan && mem_dev) {
- csrow->edac_mode = EDAC_S4ECD4ED;
- mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
- } else {
- csrow->edac_mode = EDAC_SECDED;
- mci->edac_cap |= EDAC_FLAG_SECDED;
- }
- } else
- csrow->edac_mode = EDAC_NONE;
+
+ for (j = 0; j < drc_chan + 1; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / (drc_chan + 1);
+ dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
+ dimm->mtype = MEM_RDDR; /* only one type supported */
+ dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
+
+ /*
+ * if single channel or x8 devices then SECDED
+ * if dual channel and x4 then S4ECD4ED
+ */
+ if (drc_ddim) {
+ if (drc_chan && mem_dev) {
+ dimm->edac_mode = EDAC_S4ECD4ED;
+ mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
+ } else {
+ dimm->edac_mode = EDAC_SECDED;
+ mci->edac_cap |= EDAC_FLAG_SECDED;
+ }
+ } else
+ dimm->edac_mode = EDAC_NONE;
+ }
}
}
@@ -406,6 +422,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
{
u16 pci_data;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
struct e7xxx_pvt *pvt = NULL;
u32 drc;
int drc_chan;
@@ -416,8 +433,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
pci_read_config_dword(pdev, E7XXX_DRC, &drc);
drc_chan = dual_channel_active(drc, dev_idx);
- mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0);
-
+ /*
+ * According with the datasheet, this device has a maximum of
+ * 4 DIMMS per channel, either single-rank or dual-rank. So, the
+ * total amount of dimms is 8 (E7XXX_NR_DIMMS).
+ * That means that the DIMM is mapped as CSROWs, and the channel
+ * will map the rank. So, an error to either channel should be
+ * attributed to the same dimm.
+ */
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = E7XXX_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = drc_chan + 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 5b73941..117490d 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -447,8 +447,10 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
#endif /* CONFIG_PCI */
-extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index);
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt);
extern int edac_mc_add_mc(struct mem_ctl_info *mci);
extern void edac_mc_free(struct mem_ctl_info *mci);
extern struct mem_ctl_info *edac_mc_find(int idx);
@@ -456,35 +458,17 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
unsigned long page);
-
-/*
- * The no info errors are used when error overflows are reported.
- * There are a limited number of error logging registers that can
- * be exausted. When all registers are exhausted and an additional
- * error occurs then an error overflow register records that an
- * error occurred and the type of error, but doesn't have any
- * further information. The ce/ue versions make for cleaner
- * reporting logic and function interface - reduces conditional
- * statement clutter and extra function arguments.
- */
-extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page,
- unsigned long syndrome, int row, int channel,
- const char *msg);
-extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row,
- const char *msg);
-extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
- const char *msg);
-extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel0, unsigned int channel1,
- char *msg);
-extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
- unsigned int channel, char *msg);
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int layer0,
+ const int layer1,
+ const int layer2,
+ const char *msg,
+ const char *other_detail,
+ const void *mcelog);
/*
* edac_device APIs
@@ -496,6 +480,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
int inst_nr, int block_nr, const char *msg);
extern int edac_device_alloc_index(void);
+extern const char *edac_layer_name[];
/*
* edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 45b8f4b..ee3f1f8 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -79,7 +79,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
unsigned total_size;
unsigned count;
unsigned instance, block, attr;
- void *pvt;
+ void *pvt, *p;
int err;
debugf4("%s() instances=%d blocks=%d\n",
@@ -92,35 +92,30 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
* to be at least as stringent as what the compiler would
* provide if we could simply hardcode everything into a single struct.
*/
- dev_ctl = (struct edac_device_ctl_info *)NULL;
+ p = NULL;
+ dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
/* Calc the 'end' offset past end of ONE ctl_info structure
* which will become the start of the 'instance' array
*/
- dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
+ dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
/* Calc the 'end' offset past the instance array within the ctl_info
* which will become the start of the block array
*/
- dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
+ count = nr_instances * nr_blocks;
+ dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
/* Calc the 'end' offset past the dev_blk array
* which will become the start of the attrib array, if any.
*/
- count = nr_instances * nr_blocks;
- dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
-
- /* Check for case of when an attribute array is specified */
- if (nr_attrib > 0) {
- /* calc how many nr_attrib we need */
+ /* calc how many nr_attrib we need */
+ if (nr_attrib > 0)
count *= nr_attrib;
+ dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
- /* Calc the 'end' offset past the attributes array */
- pvt = edac_align_ptr(&dev_attrib[count], sz_private);
- } else {
- /* no attribute array specified */
- pvt = edac_align_ptr(dev_attrib, sz_private);
- }
+ /* Calc the 'end' offset past the attributes array */
+ pvt = edac_align_ptr(&p, sz_private, 1);
/* 'pvt' now points to where the private data area is.
* At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index feef773..de5ba86e 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -43,9 +43,26 @@ static void edac_mc_dump_channel(struct rank_info *chan)
{
debugf4("\tchannel = %p\n", chan);
debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
- debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
- debugf4("\tchannel->label = '%s'\n", chan->label);
debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
+ debugf4("\tchannel->dimm = %p\n", chan->dimm);
+}
+
+static void edac_mc_dump_dimm(struct dimm_info *dimm)
+{
+ int i;
+
+ debugf4("\tdimm = %p\n", dimm);
+ debugf4("\tdimm->label = '%s'\n", dimm->label);
+ debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
+ debugf4("\tdimm location ");
+ for (i = 0; i < dimm->mci->n_layers; i++) {
+ printk(KERN_CONT "%d", dimm->location[i]);
+ if (i < dimm->mci->n_layers - 1)
+ printk(KERN_CONT ".");
+ }
+ printk(KERN_CONT "\n");
+ debugf4("\tdimm->grain = %d\n", dimm->grain);
+ debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
}
static void edac_mc_dump_csrow(struct csrow_info *csrow)
@@ -55,7 +72,6 @@ static void edac_mc_dump_csrow(struct csrow_info *csrow)
debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
- debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
debugf4("\tcsrow->channels = %p\n", csrow->channels);
debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
@@ -70,6 +86,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
debugf4("\tmci->edac_check = %p\n", mci->edac_check);
debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
mci->nr_csrows, mci->csrows);
+ debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
+ mci->tot_dimms, mci->dimms);
debugf3("\tdev = %p\n", mci->dev);
debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -101,18 +119,37 @@ const char *edac_mem_types[] = {
};
EXPORT_SYMBOL_GPL(edac_mem_types);
-/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
- * Adjust 'ptr' so that its alignment is at least as stringent as what the
- * compiler would provide for X and return the aligned result.
+/**
+ * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
+ * @p: pointer to a pointer with the memory offset to be used. At
+ * return, this will be incremented to point to the next offset
+ * @size: Size of the data structure to be reserved
+ * @n_elems: Number of elements that should be reserved
*
* If 'size' is a constant, the compiler will optimize this whole function
- * down to either a no-op or the addition of a constant to the value of 'ptr'.
+ * down to either a no-op or the addition of a constant to the value of '*p'.
+ *
+ * The 'p' pointer is absolutely needed to keep the proper advancing
+ * further in memory to the proper offsets when allocating the struct along
+ * with its embedded structs, as edac_device_alloc_ctl_info() does it
+ * above, for example.
+ *
+ * At return, the pointer 'p' will be incremented to be used on a next call
+ * to this function.
*/
-void *edac_align_ptr(void *ptr, unsigned size)
+void *edac_align_ptr(void **p, unsigned size, int n_elems)
{
unsigned align, r;
+ void *ptr = *p;
+
+ *p += size * n_elems;
- /* Here we assume that the alignment of a "long long" is the most
+ /*
+ * 'p' can possibly be an unaligned item X such that sizeof(X) is
+ * 'size'. Adjust 'p' so that its alignment is at least as
+ * stringent as what the compiler would provide for X and return
+ * the aligned result.
+ * Here we assume that the alignment of a "long long" is the most
* stringent alignment that the compiler will ever provide by default.
* As far as I know, this is a reasonable assumption.
*/
@@ -127,19 +164,23 @@ void *edac_align_ptr(void *ptr, unsigned size)
else
return (char *)ptr;
- r = size % align;
+ r = (unsigned long)p % align;
if (r == 0)
return (char *)ptr;
+ *p += align - r;
+
return (void *)(((unsigned long)ptr) + align - r);
}
/**
- * edac_mc_alloc: Allocate a struct mem_ctl_info structure
- * @size_pvt: size of private storage needed
- * @nr_csrows: Number of CWROWS needed for this MC
- * @nr_chans: Number of channels for the MC
+ * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
+ * @mc_num: Memory controller number
+ * @n_layers: Number of MC hierarchy layers
+ * layers: Describes each layer as seen by the Memory Controller
+ * @size_pvt: size of private storage needed
+ *
*
* Everything is kmalloc'ed as one big chunk - more efficient.
* Only can be used if all structures have the same lifetime - otherwise
@@ -147,32 +188,77 @@ void *edac_align_ptr(void *ptr, unsigned size)
*
* Use edac_mc_free() to free mc structures allocated by this function.
*
+ * NOTE: drivers handle multi-rank memories in different ways: in some
+ * drivers, one multi-rank memory stick is mapped as one entry, while, in
+ * others, a single multi-rank memory stick would be mapped into several
+ * entries. Currently, this function will allocate multiple struct dimm_info
+ * on such scenarios, as grouping the multiple ranks require drivers change.
+ *
* Returns:
- * NULL allocation failed
- * struct mem_ctl_info pointer
+ * On failure: NULL
+ * On success: struct mem_ctl_info pointer
*/
-struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
- unsigned nr_chans, int edac_index)
+struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
+ unsigned n_layers,
+ struct edac_mc_layer *layers,
+ unsigned sz_pvt)
{
struct mem_ctl_info *mci;
- struct csrow_info *csi, *csrow;
+ struct edac_mc_layer *layer;
+ struct csrow_info *csi, *csr;
struct rank_info *chi, *chp, *chan;
- void *pvt;
- unsigned size;
- int row, chn;
- int err;
+ struct dimm_info *dimm;
+ u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
+ unsigned pos[EDAC_MAX_LAYERS];
+ unsigned size, tot_dimms = 1, count = 1;
+ unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
+ void *pvt, *p, *ptr = NULL;
+ int i, j, err, row, chn, n, len;
+ bool per_rank = false;
+
+ BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
+ /*
+ * Calculate the total amount of dimms and csrows/cschannels while
+ * in the old API emulation mode
+ */
+ for (i = 0; i < n_layers; i++) {
+ tot_dimms *= layers[i].size;
+ if (layers[i].is_virt_csrow)
+ tot_csrows *= layers[i].size;
+ else
+ tot_channels *= layers[i].size;
+
+ if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
+ per_rank = true;
+ }
/* Figure out the offsets of the various items from the start of an mc
* structure. We want the alignment of each item to be at least as
* stringent as what the compiler would provide if we could simply
* hardcode everything into a single struct.
*/
- mci = (struct mem_ctl_info *)0;
- csi = edac_align_ptr(&mci[1], sizeof(*csi));
- chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi));
- pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt);
+ mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
+ layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
+ csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
+ chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
+ dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
+ for (i = 0; i < n_layers; i++) {
+ count *= layers[i].size;
+ debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
+ ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+ ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
+ tot_errcount += 2 * count;
+ }
+
+ debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
+ pvt = edac_align_ptr(&ptr, sz_pvt, 1);
size = ((unsigned long)pvt) + sz_pvt;
+ debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
+ __func__, size,
+ tot_dimms,
+ per_rank ? "ranks" : "dimms",
+ tot_csrows * tot_channels);
mci = kzalloc(size, GFP_KERNEL);
if (mci == NULL)
return NULL;
@@ -180,28 +266,103 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
/* Adjust pointers so they point within the memory we just allocated
* rather than an imaginary chunk of memory located at address 0.
*/
+ layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
+ dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
+ for (i = 0; i < n_layers; i++) {
+ mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
+ mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
+ }
pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
/* setup index and various internal pointers */
- mci->mc_idx = edac_index;
+ mci->mc_idx = mc_num;
mci->csrows = csi;
+ mci->dimms = dimm;
+ mci->tot_dimms = tot_dimms;
mci->pvt_info = pvt;
- mci->nr_csrows = nr_csrows;
-
- for (row = 0; row < nr_csrows; row++) {
- csrow = &csi[row];
- csrow->csrow_idx = row;
- csrow->mci = mci;
- csrow->nr_channels = nr_chans;
- chp = &chi[row * nr_chans];
- csrow->channels = chp;
+ mci->n_layers = n_layers;
+ mci->layers = layer;
+ memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
+ mci->nr_csrows = tot_csrows;
+ mci->num_cschannel = tot_channels;
+ mci->mem_is_per_rank = per_rank;
- for (chn = 0; chn < nr_chans; chn++) {
+ /*
+ * Fill the csrow struct
+ */
+ for (row = 0; row < tot_csrows; row++) {
+ csr = &csi[row];
+ csr->csrow_idx = row;
+ csr->mci = mci;
+ csr->nr_channels = tot_channels;
+ chp = &chi[row * tot_channels];
+ csr->channels = chp;
+
+ for (chn = 0; chn < tot_channels; chn++) {
chan = &chp[chn];
chan->chan_idx = chn;
- chan->csrow = csrow;
+ chan->csrow = csr;
+ }
+ }
+
+ /*
+ * Fill the dimm struct
+ */
+ memset(&pos, 0, sizeof(pos));
+ row = 0;
+ chn = 0;
+ debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
+ per_rank ? "ranks" : "dimms");
+ for (i = 0; i < tot_dimms; i++) {
+ chan = &csi[row].channels[chn];
+ dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
+ pos[0], pos[1], pos[2]);
+ dimm->mci = mci;
+
+ debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
+ i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
+ pos[0], pos[1], pos[2], row, chn);
+
+ /*
+ * Copy DIMM location and initialize it.
+ */
+ len = sizeof(dimm->label);
+ p = dimm->label;
+ n = snprintf(p, len, "mc#%u", mc_num);
+ p += n;
+ len -= n;
+ for (j = 0; j < n_layers; j++) {
+ n = snprintf(p, len, "%s#%u",
+ edac_layer_name[layers[j].type],
+ pos[j]);
+ p += n;
+ len -= n;
+ dimm->location[j] = pos[j];
+
+ if (len <= 0)
+ break;
+ }
+
+ /* Link it to the csrows old API data */
+ chan->dimm = dimm;
+ dimm->csrow = row;
+ dimm->cschannel = chn;
+
+ /* Increment csrow location */
+ row++;
+ if (row == tot_csrows) {
+ row = 0;
+ chn++;
+ }
+
+ /* Increment dimm location */
+ for (j = n_layers - 1; j >= 0; j--) {
+ pos[j]++;
+ if (pos[j] < layers[j].size)
+ break;
+ pos[j] = 0;
}
}
@@ -490,7 +651,6 @@ EXPORT_SYMBOL(edac_mc_find);
* edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
* create sysfs entries associated with mci structure
* @mci: pointer to the mci structure to be added to the list
- * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
*
* Return:
* 0 Success
@@ -517,6 +677,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
edac_mc_dump_channel(&mci->csrows[i].
channels[j]);
}
+ for (i = 0; i < mci->tot_dimms; i++)
+ edac_mc_dump_dimm(&mci->dimms[i]);
}
#endif
mutex_lock(&mem_ctls_mutex);
@@ -636,15 +798,19 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
{
struct csrow_info *csrows = mci->csrows;
- int row, i;
+ int row, i, j, n;
debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
row = -1;
for (i = 0; i < mci->nr_csrows; i++) {
struct csrow_info *csrow = &csrows[i];
-
- if (csrow->nr_pages == 0)
+ n = 0;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
+ n += dimm->nr_pages;
+ }
+ if (n == 0)
continue;
debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
@@ -670,249 +836,307 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
}
EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
-/* FIXME - setable log (warning/emerg) levels */
-/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */
-void edac_mc_handle_ce(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, unsigned long syndrome,
- int row, int channel, const char *msg)
-{
- unsigned long remapped_page;
+const char *edac_layer_name[] = {
+ [EDAC_MC_LAYER_BRANCH] = "branch",
+ [EDAC_MC_LAYER_CHANNEL] = "channel",
+ [EDAC_MC_LAYER_SLOT] = "slot",
+ [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
+};
+EXPORT_SYMBOL_GPL(edac_layer_name);
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
+static void edac_inc_ce_error(struct mem_ctl_info *mci,
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS])
+{
+ int i, index = 0;
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ mci->ce_mc++;
- if (channel >= mci->csrows[row].nr_channels || channel < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range "
- "(%d >= %d)\n", channel,
- mci->csrows[row].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
+ if (!enable_per_layer_report) {
+ mci->ce_noinfo_count++;
return;
}
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE page 0x%lx, offset 0x%lx, grain %d, syndrome "
- "0x%lx, row %d, channel %d, label \"%s\": %s\n",
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, syndrome, row, channel,
- mci->csrows[row].channels[channel].label, msg);
-
- mci->ce_count++;
- mci->csrows[row].ce_count++;
- mci->csrows[row].channels[channel].ce_count++;
-
- if (mci->scrub_mode & SCRUB_SW_SRC) {
- /*
- * Some MC's can remap memory so that it is still available
- * at a different address when PCI devices map into memory.
- * MC's that can't do this lose the memory where PCI devices
- * are mapped. This mapping is MC dependent and so we call
- * back into the MC driver for it to map the MC page to
- * a physical (CPU) page which can then be mapped to a virtual
- * page - which can then be scrubbed.
- */
- remapped_page = mci->ctl_page_to_phys ?
- mci->ctl_page_to_phys(mci, page_frame_number) :
- page_frame_number;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ break;
+ index += pos[i];
+ mci->ce_per_layer[i][index]++;
- edac_mc_scrub_block(remapped_page, offset_in_page,
- mci->csrows[row].grain);
+ if (i < mci->n_layers - 1)
+ index *= mci->layers[i + 1].size;
}
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
-void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_inc_ue_error(struct mem_ctl_info *mci,
+ bool enable_per_layer_report,
+ const int pos[EDAC_MAX_LAYERS])
{
- if (edac_mc_get_log_ce())
- edac_mc_printk(mci, KERN_WARNING,
- "CE - no information available: %s\n", msg);
+ int i, index = 0;
- mci->ce_noinfo_count++;
- mci->ce_count++;
-}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
+ mci->ue_mc++;
-void edac_mc_handle_ue(struct mem_ctl_info *mci,
- unsigned long page_frame_number,
- unsigned long offset_in_page, int row, const char *msg)
-{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chan;
- int chars;
-
- debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
-
- /* FIXME - maybe make panic on INTERNAL ERROR an option */
- if (row >= mci->nr_csrows || row < 0) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range "
- "(%d >= %d)\n", row, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
+ if (!enable_per_layer_report) {
+ mci->ce_noinfo_count++;
return;
}
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[row].channels[0].label);
- len -= chars;
- pos += chars;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ break;
+ index += pos[i];
+ mci->ue_per_layer[i][index]++;
- for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0);
- chan++) {
- chars = snprintf(pos, len + 1, ":%s",
- mci->csrows[row].channels[chan].label);
- len -= chars;
- pos += chars;
+ if (i < mci->n_layers - 1)
+ index *= mci->layers[i + 1].size;
}
+}
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE page 0x%lx, offset 0x%lx, grain %d, row %d, "
- "labels \"%s\": %s\n", page_frame_number,
- offset_in_page, mci->csrows[row].grain, row,
- labels, msg);
+static void edac_ce_error(struct mem_ctl_info *mci,
+ const int pos[EDAC_MAX_LAYERS],
+ const char *msg,
+ const char *location,
+ const char *label,
+ const char *detail,
+ const char *other_detail,
+ const bool enable_per_layer_report,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ u32 grain)
+{
+ unsigned long remapped_page;
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, "
- "row %d, labels \"%s\": %s\n", mci->mc_idx,
- page_frame_number, offset_in_page,
- mci->csrows[row].grain, row, labels, msg);
+ if (edac_mc_get_log_ce()) {
+ if (other_detail && *other_detail)
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE %s on %s (%s%s - %s)\n",
+ msg, label, location,
+ detail, other_detail);
+ else
+ edac_mc_printk(mci, KERN_WARNING,
+ "CE %s on %s (%s%s)\n",
+ msg, label, location,
+ detail);
+ }
+ edac_inc_ce_error(mci, enable_per_layer_report, pos);
- mci->ue_count++;
- mci->csrows[row].ue_count++;
+ if (mci->scrub_mode & SCRUB_SW_SRC) {
+ /*
+ * Some memory controllers (called MCs below) can remap
+ * memory so that it is still available at a different
+ * address when PCI devices map into memory.
+ * MC's that can't do this, lose the memory where PCI
+ * devices are mapped. This mapping is MC-dependent
+ * and so we call back into the MC driver for it to
+ * map the MC page to a physical (CPU) page which can
+ * then be mapped to a virtual page - which can then
+ * be scrubbed.
+ */
+ remapped_page = mci->ctl_page_to_phys ?
+ mci->ctl_page_to_phys(mci, page_frame_number) :
+ page_frame_number;
+
+ edac_mc_scrub_block(remapped_page,
+ offset_in_page, grain);
+ }
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
-void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg)
+static void edac_ue_error(struct mem_ctl_info *mci,
+ const int pos[EDAC_MAX_LAYERS],
+ const char *msg,
+ const char *location,
+ const char *label,
+ const char *detail,
+ const char *other_detail,
+ const bool enable_per_layer_report)
{
- if (edac_mc_get_panic_on_ue())
- panic("EDAC MC%d: Uncorrected Error", mci->mc_idx);
+ if (edac_mc_get_log_ue()) {
+ if (other_detail && *other_detail)
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE %s on %s (%s%s - %s)\n",
+ msg, label, location, detail,
+ other_detail);
+ else
+ edac_mc_printk(mci, KERN_WARNING,
+ "UE %s on %s (%s%s)\n",
+ msg, label, location, detail);
+ }
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_WARNING,
- "UE - no information available: %s\n", msg);
- mci->ue_noinfo_count++;
- mci->ue_count++;
+ if (edac_mc_get_panic_on_ue()) {
+ if (other_detail && *other_detail)
+ panic("UE %s on %s (%s%s - %s)\n",
+ msg, label, location, detail, other_detail);
+ else
+ panic("UE %s on %s (%s%s)\n",
+ msg, label, location, detail);
+ }
+
+ edac_inc_ue_error(mci, enable_per_layer_report, pos);
}
-EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process UE events
- */
-void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci,
- unsigned int csrow,
- unsigned int channela,
- unsigned int channelb, char *msg)
+#define OTHER_LABEL " or "
+void edac_mc_handle_error(const enum hw_event_mc_err_type type,
+ struct mem_ctl_info *mci,
+ const unsigned long page_frame_number,
+ const unsigned long offset_in_page,
+ const unsigned long syndrome,
+ const int layer0,
+ const int layer1,
+ const int layer2,
+ const char *msg,
+ const char *other_detail,
+ const void *mcelog)
{
- int len = EDAC_MC_LABEL_LEN * 4;
- char labels[len + 1];
- char *pos = labels;
- int chars;
+ /* FIXME: too much for stack: move it to some pre-alocated area */
+ char detail[80], location[80];
+ char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
+ char *p;
+ int row = -1, chan = -1;
+ int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
+ int i;
+ u32 grain;
+ bool enable_per_layer_report = false;
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
- if (channela >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-a out of range "
- "(%d >= %d)\n",
- channela, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
+ /*
+ * Check if the event report is consistent and if the memory
+ * location is known. If it is known, enable_per_layer_report will be
+ * true, the DIMM(s) label info will be filled and the per-layer
+ * error counters will be incremented.
+ */
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] >= (int)mci->layers[i].size) {
+ if (type == HW_EVENT_ERR_CORRECTED)
+ p = "CE";
+ else
+ p = "UE";
+
+ edac_mc_printk(mci, KERN_ERR,
+ "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
+ edac_layer_name[mci->layers[i].type],
+ pos[i], mci->layers[i].size);
+ /*
+ * Instead of just returning it, let's use what's
+ * known about the error. The increment routines and
+ * the DIMM filter logic will do the right thing by
+ * pointing the likely damaged DIMMs.
+ */
+ pos[i] = -1;
+ }
+ if (pos[i] >= 0)
+ enable_per_layer_report = true;
}
- if (channelb >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel-b out of range "
- "(%d >= %d)\n",
- channelb, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
- return;
- }
+ /*
+ * Get the dimm label/grain that applies to the match criteria.
+ * As the error algorithm may not be able to point to just one memory
+ * stick, the logic here will get all possible labels that could
+ * pottentially be affected by the error.
+ * On FB-DIMM memory controllers, for uncorrected errors, it is common
+ * to have only the MC channel and the MC dimm (also called "branch")
+ * but the channel is not known, as the memory is arranged in pairs,
+ * where each memory belongs to a separate channel within the same
+ * branch.
+ */
+ grain = 0;
+ p = label;
+ *p = '\0';
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm = &mci->dimms[i];
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
+ if (layer0 >= 0 && layer0 != dimm->location[0])
+ continue;
+ if (layer1 >= 0 && layer1 != dimm->location[1])
+ continue;
+ if (layer2 >= 0 && layer2 != dimm->location[2])
+ continue;
- /* Generate the DIMM labels from the specified channels */
- chars = snprintf(pos, len + 1, "%s",
- mci->csrows[csrow].channels[channela].label);
- len -= chars;
- pos += chars;
- chars = snprintf(pos, len + 1, "-%s",
- mci->csrows[csrow].channels[channelb].label);
+ /* get the max grain, over the error match range */
+ if (dimm->grain > grain)
+ grain = dimm->grain;
- if (edac_mc_get_log_ue())
- edac_mc_printk(mci, KERN_EMERG,
- "UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela, channelb,
- labels, msg);
+ /*
+ * If the error is memory-controller wide, there's no need to
+ * seek for the affected DIMMs because the whole
+ * channel/memory controller/... may be affected.
+ * Also, don't show errors for empty DIMM slots.
+ */
+ if (enable_per_layer_report && dimm->nr_pages) {
+ if (p != label) {
+ strcpy(p, OTHER_LABEL);
+ p += strlen(OTHER_LABEL);
+ }
+ strcpy(p, dimm->label);
+ p += strlen(p);
+ *p = '\0';
+
+ /*
+ * get csrow/channel of the DIMM, in order to allow
+ * incrementing the compat API counters
+ */
+ debugf4("%s: %s csrows map: (%d,%d)\n",
+ __func__,
+ mci->mem_is_per_rank ? "rank" : "dimm",
+ dimm->csrow, dimm->cschannel);
+
+ if (row == -1)
+ row = dimm->csrow;
+ else if (row >= 0 && row != dimm->csrow)
+ row = -2;
+
+ if (chan == -1)
+ chan = dimm->cschannel;
+ else if (chan >= 0 && chan != dimm->cschannel)
+ chan = -2;
+ }
+ }
- if (edac_mc_get_panic_on_ue())
- panic("UE row %d, channel-a= %d channel-b= %d "
- "labels \"%s\": %s\n", csrow, channela,
- channelb, labels, msg);
-}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ue);
+ if (!enable_per_layer_report) {
+ strcpy(label, "any memory");
+ } else {
+ debugf4("%s: csrow/channel to increment: (%d,%d)\n",
+ __func__, row, chan);
+ if (p == label)
+ strcpy(label, "unknown memory");
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ if (row >= 0) {
+ mci->csrows[row].ce_count++;
+ if (chan >= 0)
+ mci->csrows[row].channels[chan].ce_count++;
+ }
+ } else
+ if (row >= 0)
+ mci->csrows[row].ue_count++;
+ }
-/*************************************************************
- * On Fully Buffered DIMM modules, this help function is
- * called to process CE events
- */
-void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
- unsigned int csrow, unsigned int channel, char *msg)
-{
+ /* Fill the RAM location data */
+ p = location;
+ for (i = 0; i < mci->n_layers; i++) {
+ if (pos[i] < 0)
+ continue;
- /* Ensure boundary values */
- if (csrow >= mci->nr_csrows) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: row out of range (%d >= %d)\n",
- csrow, mci->nr_csrows);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
- }
- if (channel >= mci->csrows[csrow].nr_channels) {
- /* something is wrong */
- edac_mc_printk(mci, KERN_ERR,
- "INTERNAL ERROR: channel out of range (%d >= %d)\n",
- channel, mci->csrows[csrow].nr_channels);
- edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
- return;
+ p += sprintf(p, "%s:%d ",
+ edac_layer_name[mci->layers[i].type],
+ pos[i]);
}
- if (edac_mc_get_log_ce())
- /* FIXME - put in DIMM location */
- edac_mc_printk(mci, KERN_WARNING,
- "CE row %d, channel %d, label \"%s\": %s\n",
- csrow, channel,
- mci->csrows[csrow].channels[channel].label, msg);
+ /* Memory type dependent details about the error */
+ if (type == HW_EVENT_ERR_CORRECTED) {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
+ page_frame_number, offset_in_page,
+ grain, syndrome);
+ edac_ce_error(mci, pos, msg, location, label, detail,
+ other_detail, enable_per_layer_report,
+ page_frame_number, offset_in_page, grain);
+ } else {
+ snprintf(detail, sizeof(detail),
+ "page:0x%lx offset:0x%lx grain:%d",
+ page_frame_number, offset_in_page, grain);
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[channel].ce_count++;
+ edac_ue_error(mci, pos, msg, location, label, detail,
+ other_detail, enable_per_layer_report);
+ }
}
-EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
+EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e9a28f5..f6a29b0 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -144,25 +144,31 @@ static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages));
+ int i;
+ u32 nr_pages = 0;
+
+ for (i = 0; i < csrow->nr_channels; i++)
+ nr_pages += csrow->channels[i].dimm->nr_pages;
+
+ return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
}
static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", mem_types[csrow->mtype]);
+ return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
}
static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", dev_types[csrow->dtype]);
+ return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
}
static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
int private)
{
- return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]);
+ return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
}
/* show/store functions for DIMM Label attributes */
@@ -170,11 +176,11 @@ static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
char *data, int channel)
{
/* if field has not been initialized, there is nothing to send */
- if (!csrow->channels[channel].label[0])
+ if (!csrow->channels[channel].dimm->label[0])
return 0;
return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
- csrow->channels[channel].label);
+ csrow->channels[channel].dimm->label);
}
static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
@@ -184,8 +190,8 @@ static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
ssize_t max_size = 0;
max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
- strncpy(csrow->channels[channel].label, data, max_size);
- csrow->channels[channel].label[max_size] = '\0';
+ strncpy(csrow->channels[channel].dimm->label, data, max_size);
+ csrow->channels[channel].dimm->label[max_size] = '\0';
return max_size;
}
@@ -419,8 +425,8 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
mci->ue_noinfo_count = 0;
mci->ce_noinfo_count = 0;
- mci->ue_count = 0;
- mci->ce_count = 0;
+ mci->ue_mc = 0;
+ mci->ce_mc = 0;
for (row = 0; row < mci->nr_csrows; row++) {
struct csrow_info *ri = &mci->csrows[row];
@@ -489,12 +495,12 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
/* default attribute files for the MCI object */
static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
{
- return sprintf(data, "%d\n", mci->ue_count);
+ return sprintf(data, "%d\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
{
- return sprintf(data, "%d\n", mci->ce_count);
+ return sprintf(data, "%d\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
@@ -519,16 +525,16 @@ static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
{
- int total_pages, csrow_idx;
+ int total_pages = 0, csrow_idx, j;
- for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows;
- csrow_idx++) {
+ for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
struct csrow_info *csrow = &mci->csrows[csrow_idx];
- if (!csrow->nr_pages)
- continue;
+ for (j = 0; j < csrow->nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- total_pages += csrow->nr_pages;
+ total_pages += dimm->nr_pages;
+ }
}
return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
@@ -900,7 +906,7 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
*/
int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ int i, j;
int err;
struct csrow_info *csrow;
struct kobject *kobj_mci = &mci->edac_mci_kobj;
@@ -934,10 +940,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
/* Make directories for each CSROW object under the mc<id> kobject
*/
for (i = 0; i < mci->nr_csrows; i++) {
+ int nr_pages = 0;
+
csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
- /* Only expose populated CSROWs */
- if (csrow->nr_pages > 0) {
+ if (nr_pages > 0) {
err = edac_create_csrow_object(mci, csrow, i);
if (err) {
debugf1("%s() failure: create csrow %d obj\n",
@@ -949,12 +958,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
return 0;
- /* CSROW error: backout what has already been registered, */
fail1:
for (i--; i >= 0; i--) {
- if (csrow->nr_pages > 0) {
+ int nr_pages = 0;
+
+ csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
+ if (nr_pages > 0)
kobject_put(&mci->csrows[i].kobj);
- }
}
/* remove the mci instance's attributes, if any */
@@ -973,14 +985,20 @@ fail0:
*/
void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
{
- int i;
+ struct csrow_info *csrow;
+ int i, j;
debugf0("%s()\n", __func__);
/* remove all csrow kobjects */
debugf4("%s() unregister this mci kobj\n", __func__);
for (i = 0; i < mci->nr_csrows; i++) {
- if (mci->csrows[i].nr_pages > 0) {
+ int nr_pages = 0;
+
+ csrow = &mci->csrows[i];
+ for (j = 0; j < csrow->nr_channels; j++)
+ nr_pages += csrow->channels[j].dimm->nr_pages;
+ if (nr_pages > 0) {
debugf0("%s() unreg csrow-%d\n", __func__, i);
kobject_put(&mci->csrows[i].kobj);
}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 00f81b4..0ea7d14 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -50,7 +50,7 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
extern void edac_mc_reset_delay_period(int value);
-extern void *edac_align_ptr(void *ptr, unsigned size);
+extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
/*
* EDAC PCI functions
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 63af1c5..f1ac866 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -42,13 +42,13 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
const char *edac_pci_name)
{
struct edac_pci_ctl_info *pci;
- void *pvt;
+ void *p = NULL, *pvt;
unsigned int size;
debugf1("%s()\n", __func__);
- pci = (struct edac_pci_ctl_info *)0;
- pvt = edac_align_ptr(&pci[1], sz_pvt);
+ pci = edac_align_ptr(&p, sizeof(*pci), 1);
+ pvt = edac_align_ptr(&p, 1, sz_pvt);
size = ((unsigned long)pvt) + sz_pvt;
/* Alloc the needed control struct memory */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 277689a..8ad1744 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -245,7 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -256,10 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, pfn);
if (info->errsts & I3000_ERRSTS_UE)
- edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ pfn, offset, 0,
+ row, -1, -1,
+ "i3000 UE", "", NULL);
else
- edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row,
- multi_chan ? channel : 0, "i3000 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, offset, info->derrsyn,
+ row, multi_chan ? channel : 0, -1,
+ "i3000 CE", "", NULL);
return 1;
}
@@ -304,9 +311,10 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_cumul_size;
+ struct edac_mc_layer layers[2];
+ unsigned long last_cumul_size, nr_pages;
int interleaved, nr_channels;
unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
@@ -347,7 +355,14 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
*/
interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
nr_channels = interleaved ? 2 : 1;
- mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0);
+
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I3000_RANKS / nr_channels;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
@@ -386,19 +401,23 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
cumul_size <<= 1;
debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
__func__, i, cumul_size);
- if (cumul_size == last_cumul_size) {
- csrow->mtype = MEM_EMPTY;
+ if (cumul_size == last_cumul_size)
continue;
- }
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = I3000_DEAP_GRAIN;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / nr_channels;
+ dimm->grain = I3000_DEAP_GRAIN;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
/*
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 046808c..bbe43ef 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -23,6 +23,7 @@
#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
+#define I3200_DIMMS 4
#define I3200_RANKS 8
#define I3200_RANKS_PER_CHANNEL 4
#define I3200_CHANNELS 2
@@ -217,21 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
for (channel = 0; channel < nr_channels; channel++) {
log = info->eccerrlog[channel];
if (log & I3200_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log),
- "i3200 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "i3000 UE", "", NULL);
} else if (log & I3200_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0,
- "i3200 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "i3000 UE", "", NULL);
}
}
}
@@ -319,9 +324,9 @@ static unsigned long drb_to_nr_pages(
static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
+ struct edac_mc_layer layers[2];
u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
@@ -336,8 +341,14 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
i3200_get_drbs(window, drbs);
nr_channels = how_many_channels(pdev);
- mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS,
- nr_channels, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I3200_DIMMS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_channels;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(struct i3200_priv));
if (!mci)
return -ENOMEM;
@@ -366,7 +377,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
@@ -375,20 +385,18 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
i / I3200_RANKS_PER_CHANNEL,
i % I3200_RANKS_PER_CHANNEL);
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
+ if (nr_pages == 0)
continue;
- }
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
+ for (j = 0; j < nr_channels; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+ dimm->nr_pages = nr_pages / nr_channels;
+ dimm->grain = nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
i3200_clear_error_info(mci);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a2680d8..11ea835 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -270,7 +270,8 @@
#define MTR3 0x8C
#define NUM_MTRS 4
-#define CHANNELS_PER_BRANCH (2)
+#define CHANNELS_PER_BRANCH 2
+#define MAX_BRANCHES 2
/* Defines to extract the vaious fields from the
* MTRx - Memory Technology Registers
@@ -473,7 +474,6 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
char msg[EDAC_MC_LABEL_LEN + 1 + 160];
char *specific = NULL;
u32 allErrors;
- int branch;
int channel;
int bank;
int rank;
@@ -485,8 +485,7 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
if (!allErrors)
return; /* if no error, return now */
- branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
- channel = branch;
+ channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
/* Use the NON-Recoverable macros to extract data */
bank = NREC_BANK(info->nrecmema);
@@ -495,9 +494,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
ras = NREC_RAS(info->nrecmemb);
cas = NREC_CAS(info->nrecmemb);
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
- "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
- rank, channel, channel + 1, branch >> 1, bank,
+ debugf0("\t\tCSROW= %d Channel= %d "
+ "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
+ rank, channel, bank,
rdwr ? "Write" : "Read", ras, cas);
/* Only 1 bit will be on */
@@ -533,13 +532,14 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d "
- "FATAL Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- allErrors, specific);
+ "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
+ bank, ras, cas, allErrors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ channel >> 1, channel & 1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/*
@@ -633,13 +633,14 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
- "CAS=%d, UE Err=0x%x (%s))",
- branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
- ue_errors, specific);
+ "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
+ rank, bank, ras, cas, ue_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ channel >> 1, -1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/* Check correctable errors */
@@ -685,13 +686,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d "
+ "Rank=%d Bank=%d RDWR=%s RAS=%d "
"CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
rdwr ? "Write" : "Read", ras, cas, ce_errors,
specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ channel >> 1, channel % 2, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
if (!misc_messages)
@@ -731,11 +735,12 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "(Branch=%d Err=%#x (%s))", branch >> 1,
- misc_errors, specific);
+ "Err=%#x (%s)", misc_errors, specific);
/* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, 0, 0, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ branch >> 1, -1, -1,
+ "Misc error", msg, NULL);
}
}
@@ -956,14 +961,14 @@ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
*
* return the proper MTR register as determine by the csrow and channel desired
*/
-static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
{
int mtr;
if (channel < CHANNELS_PER_BRANCH)
- mtr = pvt->b0_mtr[csrow >> 1];
+ mtr = pvt->b0_mtr[slot];
else
- mtr = pvt->b1_mtr[csrow >> 1];
+ mtr = pvt->b1_mtr[slot];
return mtr;
}
@@ -988,37 +993,34 @@ static void decode_mtr(int slot_row, u16 mtr)
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
-static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
struct i5000_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
- mtr = determine_mtr(pvt, csrow, channel);
+ mtr = determine_mtr(pvt, slot, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
- /* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << (csrow >> 1))) {
+ /* Determine if there is a DIMM present in this DIMM slot */
+ if (amb_present_reg) {
dinfo->dual_rank = MTR_DIMM_RANK(mtr);
- if (!((dinfo->dual_rank == 0) &&
- ((csrow & 0x1) == 0x1))) {
- /* Start with the number of bits for a Bank
- * on the DRAM */
- addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
- /* Add thenumber of ROW bits */
- addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
- /* add the number of COLUMN bits */
- addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
-
- addrBits += 6; /* add 64 bits per DIMM */
- addrBits -= 20; /* divide by 2^^20 */
- addrBits -= 3; /* 8 bits per bytes */
-
- dinfo->megabytes = 1 << addrBits;
- }
+ /* Start with the number of bits for a Bank
+ * on the DRAM */
+ addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
+ /* Add the number of ROW bits */
+ addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
+ /* add the number of COLUMN bits */
+ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
+
+ addrBits += 6; /* add 64 bits per DIMM */
+ addrBits -= 20; /* divide by 2^^20 */
+ addrBits -= 3; /* 8 bits per bytes */
+
+ dinfo->megabytes = 1 << addrBits;
}
}
}
@@ -1032,10 +1034,9 @@ static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
static void calculate_dimm_size(struct i5000_pvt *pvt)
{
struct i5000_dimm_info *dinfo;
- int csrow, max_csrows;
+ int slot, channel, branch;
char *p, *mem_buffer;
int space, n;
- int channel;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
@@ -1046,22 +1047,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
return;
}
- n = snprintf(p, space, "\n");
- p += n;
- space -= n;
-
- /* Scan all the actual CSROWS (which is # of DIMMS * 2)
+ /* Scan all the actual slots
* and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
+ * Start with the highest slot first, to display it first
+ * and work toward the 0th slot
*/
- max_csrows = pvt->maxdimmperch * 2;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+ for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
- /* on an odd csrow, first output a 'boundary' marker,
+ /* on an odd slot, first output a 'boundary' marker,
* then reset the message buffer */
- if (csrow & 0x1) {
- n = snprintf(p, space, "---------------------------"
+ if (slot & 0x1) {
+ n = snprintf(p, space, "--------------------------"
"--------------------------------");
p += n;
space -= n;
@@ -1069,30 +1065,39 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p = mem_buffer;
space = PAGE_SIZE;
}
- n = snprintf(p, space, "csrow %2d ", csrow);
+ n = snprintf(p, space, "slot %2d ", slot);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
- n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
+ dinfo = &pvt->dimm_info[slot][channel];
+ handle_channel(pvt, slot, channel, dinfo);
+ if (dinfo->megabytes)
+ n = snprintf(p, space, "%4d MB %dR| ",
+ dinfo->megabytes, dinfo->dual_rank + 1);
+ else
+ n = snprintf(p, space, "%4d MB | ", 0);
p += n;
space -= n;
}
- n = snprintf(p, space, "\n");
p += n;
space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
}
/* Output the last bottom 'boundary' marker */
- n = snprintf(p, space, "---------------------------"
- "--------------------------------\n");
+ n = snprintf(p, space, "--------------------------"
+ "--------------------------------");
p += n;
space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
/* now output the 'channel' labels */
- n = snprintf(p, space, " ");
+ n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1100,9 +1105,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
p += n;
space -= n;
}
- n = snprintf(p, space, "\n");
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+
+ n = snprintf(p, space, " ");
p += n;
- space -= n;
+ for (branch = 0; branch < MAX_BRANCHES; branch++) {
+ n = snprintf(p, space, " branch %d | ", branch);
+ p += n;
+ space -= n;
+ }
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
@@ -1235,13 +1248,13 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
static int i5000_init_csrows(struct mem_ctl_info *mci)
{
struct i5000_pvt *pvt;
- struct csrow_info *p_csrow;
+ struct dimm_info *dimm;
int empty, channel_count;
int max_csrows;
- int mtr, mtr1;
+ int mtr;
int csrow_megs;
int channel;
- int csrow;
+ int slot;
pvt = mci->pvt_info;
@@ -1250,43 +1263,40 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
empty = 1; /* Assume NO memory */
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = pvt->b0_mtr[csrow >> 1];
- mtr1 = pvt->b1_mtr[csrow >> 1];
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
- continue;
+ /*
+ * FIXME: The memory layout used to map slot/channel into the
+ * real memory architecture is weird: branch+slot are "csrows"
+ * and channel is channel. That required an extra array (dimm_info)
+ * to map the dimms. A good cleanup would be to remove this array,
+ * and do a loop here with branch, channel, slot
+ */
+ for (slot = 0; slot < max_csrows; slot++) {
+ for (channel = 0; channel < pvt->maxch; channel++) {
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
+ mtr = determine_mtr(pvt, slot, channel);
- p_csrow->grain = 8;
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++) {
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
- }
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ channel / MAX_BRANCHES,
+ channel % MAX_BRANCHES, slot);
- p_csrow->nr_pages = csrow_megs << 8;
+ csrow_megs = pvt->dimm_info[slot][channel].megabytes;
+ dimm->grain = 8;
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
+ /* Assume DDR2 for now */
+ dimm->mtype = MEM_FB_DDR2;
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
+ /* ask what device type on this row */
+ if (MTR_DRAM_WIDTH(mtr))
+ dimm->dtype = DEV_X8;
+ else
+ dimm->dtype = DEV_X4;
- p_csrow->edac_mode = EDAC_S8ECD8ED;
+ dimm->edac_mode = EDAC_S8ECD8ED;
+ dimm->nr_pages = csrow_megs << 8;
+ }
empty = 0;
}
@@ -1317,7 +1327,7 @@ static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
}
/*
- * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels)
+ * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
*
* ask the device how many channels are present and how many CSROWS
* as well
@@ -1332,7 +1342,7 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
* supported on this memory controller
*/
pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
- *num_dimms_per_channel = (int)value *2;
+ *num_dimms_per_channel = (int)value;
pci_read_config_byte(pdev, MAXCH, &value);
*num_channels = (int)value;
@@ -1348,10 +1358,10 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[3];
struct i5000_pvt *pvt;
int num_channels;
int num_dimms_per_channel;
- int num_csrows;
debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
__FILE__, __func__,
@@ -1377,14 +1387,22 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
*/
i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
&num_channels);
- num_csrows = num_dimms_per_channel * 2;
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
+ debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n",
+ __func__, num_channels, num_dimms_per_channel);
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = num_channels / MAX_BRANCHES;
+ layers[1].is_virt_csrow = false;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = num_dimms_per_channel;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d500749..e9e7c2a 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -14,6 +14,11 @@
* rows for each respective channel are laid out one after another,
* the first half belonging to channel 0, the second half belonging
* to channel 1.
+ *
+ * This driver is for DDR2 DIMMs, and it uses chip select to select among the
+ * several ranks. However, instead of showing memories as ranks, it outputs
+ * them as DIMM's. An internal table creates the association between ranks
+ * and DIMM's.
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -410,14 +415,6 @@ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
return csrow / priv->ranksperchan;
}
-static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
- int chan, int rank)
-{
- const struct i5100_priv *priv = mci->pvt_info;
-
- return chan * priv->ranksperchan + rank;
-}
-
static void i5100_handle_ce(struct mem_ctl_info *mci,
int chan,
unsigned bank,
@@ -427,17 +424,17 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ char detail[80];
- printk(KERN_ERR
- "CE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
+ /* Form out message */
+ snprintf(detail, sizeof(detail),
+ "bank %u, cas %u, ras %u\n",
+ bank, cas, ras);
- mci->ce_count++;
- mci->csrows[csrow].ce_count++;
- mci->csrows[csrow].channels[0].ce_count++;
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, syndrome,
+ chan, rank, -1,
+ msg, detail, NULL);
}
static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -449,16 +446,17 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
unsigned ras,
const char *msg)
{
- const int csrow = i5100_rank_to_csrow(mci, chan, rank);
+ char detail[80];
- printk(KERN_ERR
- "UE chan %d, bank %u, rank %u, syndrome 0x%lx, "
- "cas %u, ras %u, csrow %u, label \"%s\": %s\n",
- chan, bank, rank, syndrome, cas, ras,
- csrow, mci->csrows[csrow].channels[0].label, msg);
+ /* Form out message */
+ snprintf(detail, sizeof(detail),
+ "bank %u, cas %u, ras %u\n",
+ bank, cas, ras);
- mci->ue_count++;
- mci->csrows[csrow].ue_count++;
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, syndrome,
+ chan, rank, -1,
+ msg, detail, NULL);
}
static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -835,10 +833,10 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
{
int i;
- unsigned long total_pages = 0UL;
struct i5100_priv *priv = mci->pvt_info;
- for (i = 0; i < mci->nr_csrows; i++) {
+ for (i = 0; i < mci->tot_dimms; i++) {
+ struct dimm_info *dimm;
const unsigned long npages = i5100_npages(mci, i);
const unsigned chan = i5100_csrow_to_chan(mci, i);
const unsigned rank = i5100_csrow_to_rank(mci, i);
@@ -846,33 +844,23 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
if (!npages)
continue;
- /*
- * FIXME: these two are totally bogus -- I don't see how to
- * map them correctly to this structure...
- */
- mci->csrows[i].first_page = total_pages;
- mci->csrows[i].last_page = total_pages + npages - 1;
- mci->csrows[i].page_mask = 0UL;
-
- mci->csrows[i].nr_pages = npages;
- mci->csrows[i].grain = 32;
- mci->csrows[i].csrow_idx = i;
- mci->csrows[i].dtype =
- (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8;
- mci->csrows[i].ue_count = 0;
- mci->csrows[i].ce_count = 0;
- mci->csrows[i].mtype = MEM_RDDR2;
- mci->csrows[i].edac_mode = EDAC_SECDED;
- mci->csrows[i].mci = mci;
- mci->csrows[i].nr_channels = 1;
- mci->csrows[i].channels[0].chan_idx = 0;
- mci->csrows[i].channels[0].ce_count = 0;
- mci->csrows[i].channels[0].csrow = mci->csrows + i;
- snprintf(mci->csrows[i].channels[0].label,
- sizeof(mci->csrows[i].channels[0].label),
- "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
-
- total_pages += npages;
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ chan, rank, 0);
+
+ dimm->nr_pages = npages;
+ if (npages) {
+ dimm->grain = 32;
+ dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
+ DEV_X4 : DEV_X8;
+ dimm->mtype = MEM_RDDR2;
+ dimm->edac_mode = EDAC_SECDED;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "DIMM%u",
+ i5100_rank_to_slot(mci, chan, rank));
+ }
+
+ debugf2("dimm channel %d, rank %d, size %ld\n",
+ chan, rank, (long)PAGES_TO_MiB(npages));
}
}
@@ -881,6 +869,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
{
int rc;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i5100_priv *priv;
struct pci_dev *ch0mm, *ch1mm;
int ret = 0;
@@ -941,7 +930,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
goto bail_ch1;
}
- mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0);
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 2;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = ranksperch;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+ sizeof(*priv));
if (!mci) {
ret = -ENOMEM;
goto bail_disable_ch1;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 1869a10..6640c29 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -18,6 +18,10 @@
* Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
* http://developer.intel.com/design/chipsets/datashts/313070.htm
*
+ * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
+ * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
+ * 4 dimm's, each with up to 8GB.
+ *
*/
#include <linux/module.h>
@@ -44,12 +48,10 @@
edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
/* Limits for i5400 */
-#define NUM_MTRS_PER_BRANCH 4
+#define MAX_BRANCHES 2
#define CHANNELS_PER_BRANCH 2
-#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH
-#define MAX_CHANNELS 4
-/* max possible csrows per channel */
-#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
+#define DIMMS_PER_CHANNEL 4
+#define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
/* Device 16,
* Function 0: System Address
@@ -347,16 +349,16 @@ struct i5400_pvt {
u16 mir0, mir1;
- u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
+ u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b0_ambpresent0; /* Branch 0, Channel 0 */
u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
- u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */
+ u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
u16 b1_ambpresent0; /* Branch 1, Channel 8 */
u16 b1_ambpresent1; /* Branch 1, Channel 1 */
/* DIMM information matrix, allocating architecture maximums */
- struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS];
+ struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
/* Actual values for this controller */
int maxch; /* Max channels */
@@ -532,13 +534,15 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
int ras, cas;
int errnum;
char *type = NULL;
+ enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
if (!allErrors)
return; /* if no error, return now */
- if (allErrors & ERROR_FAT_MASK)
+ if (allErrors & ERROR_FAT_MASK) {
type = "FATAL";
- else if (allErrors & FERR_NF_UNCORRECTABLE)
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else if (allErrors & FERR_NF_UNCORRECTABLE)
type = "NON-FATAL uncorrected";
else
type = "NON-FATAL recoverable";
@@ -556,7 +560,7 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
ras = nrec_ras(info);
cas = nrec_cas(info);
- debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d "
+ debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
"DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, channel + 1, branch >> 1, bank,
buf_id, rdwr_str(rdwr), ras, cas);
@@ -566,13 +570,13 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
/* Form out message */
snprintf(msg, sizeof(msg),
- "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s "
- "RAS=%d CAS=%d %s Err=0x%lx (%s))",
- type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
- type, allErrors, error_name[errnum]);
+ "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
+ bank, buf_id, ras, cas, allErrors, error_name[errnum]);
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg);
+ edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ branch >> 1, -1, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
}
/*
@@ -630,7 +634,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
/* Only 1 bit will be on */
errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
- debugf0("\t\tCSROW= %d Channel= %d (Branch %d "
+ debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
"DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
rank, channel, branch >> 1, bank,
rdwr_str(rdwr), ras, cas);
@@ -642,8 +646,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
branch >> 1, bank, rdwr_str(rdwr), ras, cas,
allErrors, error_name[errnum]);
- /* Call the helper to output message */
- edac_mc_handle_fbd_ce(mci, rank, channel, msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ branch >> 1, channel % 2, rank,
+ rdwr ? "Write error" : "Read error",
+ msg, NULL);
return;
}
@@ -831,8 +837,8 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
/*
* determine_amb_present
*
- * the information is contained in NUM_MTRS_PER_BRANCH different
- * registers determining which of the NUM_MTRS_PER_BRANCH requires
+ * the information is contained in DIMMS_PER_CHANNEL different
+ * registers determining which of the DIMMS_PER_CHANNEL requires
* knowing which channel is in question
*
* 2 branches, each with 2 channels
@@ -861,11 +867,11 @@ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
}
/*
- * determine_mtr(pvt, csrow, channel)
+ * determine_mtr(pvt, dimm, channel)
*
- * return the proper MTR register as determine by the csrow and desired channel
+ * return the proper MTR register as determine by the dimm and desired channel
*/
-static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
+static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
{
int mtr;
int n;
@@ -873,11 +879,11 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
/* There is one MTR for each slot pair of FB-DIMMs,
Each slot pair may be at branch 0 or branch 1.
*/
- n = csrow;
+ n = dimm;
- if (n >= NUM_MTRS_PER_BRANCH) {
- debugf0("ERROR: trying to access an invalid csrow: %d\n",
- csrow);
+ if (n >= DIMMS_PER_CHANNEL) {
+ debugf0("ERROR: trying to access an invalid dimm: %d\n",
+ dimm);
return 0;
}
@@ -913,19 +919,19 @@ static void decode_mtr(int slot_row, u16 mtr)
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
}
-static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
+static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
struct i5400_dimm_info *dinfo)
{
int mtr;
int amb_present_reg;
int addrBits;
- mtr = determine_mtr(pvt, csrow, channel);
+ mtr = determine_mtr(pvt, dimm, channel);
if (MTR_DIMMS_PRESENT(mtr)) {
amb_present_reg = determine_amb_present_reg(pvt, channel);
/* Determine if there is a DIMM present in this DIMM slot */
- if (amb_present_reg & (1 << csrow)) {
+ if (amb_present_reg & (1 << dimm)) {
/* Start with the number of bits for a Bank
* on the DRAM */
addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
@@ -954,10 +960,10 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
static void calculate_dimm_size(struct i5400_pvt *pvt)
{
struct i5400_dimm_info *dinfo;
- int csrow, max_csrows;
+ int dimm, max_dimms;
char *p, *mem_buffer;
int space, n;
- int channel;
+ int channel, branch;
/* ================= Generate some debug output ================= */
space = PAGE_SIZE;
@@ -968,32 +974,32 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
return;
}
- /* Scan all the actual CSROWS
+ /* Scan all the actual DIMMS
* and calculate the information for each DIMM
- * Start with the highest csrow first, to display it first
- * and work toward the 0th csrow
+ * Start with the highest dimm first, to display it first
+ * and work toward the 0th dimm
*/
- max_csrows = pvt->maxdimmperch;
- for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
+ max_dimms = pvt->maxdimmperch;
+ for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
- /* on an odd csrow, first output a 'boundary' marker,
+ /* on an odd dimm, first output a 'boundary' marker,
* then reset the message buffer */
- if (csrow & 0x1) {
+ if (dimm & 0x1) {
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "-------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
p = mem_buffer;
space = PAGE_SIZE;
}
- n = snprintf(p, space, "csrow %2d ", csrow);
+ n = snprintf(p, space, "dimm %2d ", dimm);
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
- dinfo = &pvt->dimm_info[csrow][channel];
- handle_channel(pvt, csrow, channel, dinfo);
+ dinfo = &pvt->dimm_info[dimm][channel];
+ handle_channel(pvt, dimm, channel, dinfo);
n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
p += n;
space -= n;
@@ -1005,7 +1011,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
/* Output the last bottom 'boundary' marker */
n = snprintf(p, space, "---------------------------"
- "--------------------------------");
+ "-------------------------------");
p += n;
space -= n;
debugf2("%s\n", mem_buffer);
@@ -1013,7 +1019,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
space = PAGE_SIZE;
/* now output the 'channel' labels */
- n = snprintf(p, space, " ");
+ n = snprintf(p, space, " ");
p += n;
space -= n;
for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1022,6 +1028,19 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
space -= n;
}
+ space -= n;
+ debugf2("%s\n", mem_buffer);
+ p = mem_buffer;
+ space = PAGE_SIZE;
+
+ n = snprintf(p, space, " ");
+ p += n;
+ for (branch = 0; branch < MAX_BRANCHES; branch++) {
+ n = snprintf(p, space, " branch %d | ", branch);
+ p += n;
+ space -= n;
+ }
+
/* output the last message and free buffer */
debugf2("%s\n", mem_buffer);
kfree(mem_buffer);
@@ -1080,7 +1099,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
/* Get the set of MTR[0-3] regs by each branch */
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) {
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
int where = MTR0 + (slot_row * sizeof(u16));
/* Branch 0 set of MTR registers */
@@ -1105,7 +1124,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
/* Read and dump branch 0's MTRs */
debugf2("\nMemory Technology Registers:\n");
debugf2(" Branch 0:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
@@ -1122,7 +1141,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
} else {
/* Read and dump branch 1's MTRs */
debugf2(" Branch 1:\n");
- for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++)
+ for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
@@ -1141,7 +1160,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
}
/*
- * i5400_init_csrows Initialize the 'csrows' table within
+ * i5400_init_dimms Initialize the 'dimms' table within
* the mci control structure with the
* addressing of memory.
*
@@ -1149,64 +1168,68 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
* 0 success
* 1 no actual memory found on this MC
*/
-static int i5400_init_csrows(struct mem_ctl_info *mci)
+static int i5400_init_dimms(struct mem_ctl_info *mci)
{
struct i5400_pvt *pvt;
- struct csrow_info *p_csrow;
- int empty, channel_count;
- int max_csrows;
+ struct dimm_info *dimm;
+ int ndimms, channel_count;
+ int max_dimms;
int mtr;
- int csrow_megs;
- int channel;
- int csrow;
+ int size_mb;
+ int channel, slot;
pvt = mci->pvt_info;
channel_count = pvt->maxch;
- max_csrows = pvt->maxdimmperch;
+ max_dimms = pvt->maxdimmperch;
- empty = 1; /* Assume NO memory */
+ ndimms = 0;
- for (csrow = 0; csrow < max_csrows; csrow++) {
- p_csrow = &mci->csrows[csrow];
-
- p_csrow->csrow_idx = csrow;
-
- /* use branch 0 for the basis */
- mtr = determine_mtr(pvt, csrow, 0);
-
- /* if no DIMMS on this row, continue */
- if (!MTR_DIMMS_PRESENT(mtr))
- continue;
-
- /* FAKE OUT VALUES, FIXME */
- p_csrow->first_page = 0 + csrow * 20;
- p_csrow->last_page = 9 + csrow * 20;
- p_csrow->page_mask = 0xFFF;
-
- p_csrow->grain = 8;
-
- csrow_megs = 0;
- for (channel = 0; channel < pvt->maxch; channel++)
- csrow_megs += pvt->dimm_info[csrow][channel].megabytes;
-
- p_csrow->nr_pages = csrow_megs << 8;
-
- /* Assume DDR2 for now */
- p_csrow->mtype = MEM_FB_DDR2;
-
- /* ask what device type on this row */
- if (MTR_DRAM_WIDTH(mtr))
- p_csrow->dtype = DEV_X8;
- else
- p_csrow->dtype = DEV_X4;
-
- p_csrow->edac_mode = EDAC_S8ECD8ED;
-
- empty = 0;
+ /*
+ * FIXME: remove pvt->dimm_info[slot][channel] and use the 3
+ * layers here.
+ */
+ for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
+ channel++) {
+ for (slot = 0; slot < mci->layers[2].size; slot++) {
+ mtr = determine_mtr(pvt, slot, channel);
+
+ /* if no DIMMS on this slot, continue */
+ if (!MTR_DIMMS_PRESENT(mtr))
+ continue;
+
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ channel / 2, channel % 2, slot);
+
+ size_mb = pvt->dimm_info[slot][channel].megabytes;
+
+ debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
+ __func__, dimm - mci->dimms,
+ channel / 2, channel % 2, slot,
+ size_mb / 1000, size_mb % 1000);
+
+ dimm->nr_pages = size_mb << 8;
+ dimm->grain = 8;
+ dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
+ dimm->mtype = MEM_FB_DDR2;
+ /*
+ * The eccc mechanism is SDDC (aka SECC), with
+ * is similar to Chipkill.
+ */
+ dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
+ EDAC_S8ECD8ED : EDAC_S4ECD4ED;
+ ndimms++;
+ }
}
- return empty;
+ /*
+ * When just one memory is provided, it should be at location (0,0,0).
+ * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
+ */
+ if (ndimms == 1)
+ mci->dimms[0].edac_mode = EDAC_SECDED;
+
+ return (ndimms == 0);
}
/*
@@ -1242,9 +1265,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
struct i5400_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
+ struct edac_mc_layer layers[3];
if (dev_idx >= ARRAY_SIZE(i5400_devs))
return -EINVAL;
@@ -1258,23 +1279,21 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
+ /*
+ * allocate a new MC control structure
+ *
+ * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
*/
- num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL;
- num_channels = MAX_CHANNELS;
- num_csrows = num_dimms_per_channel;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
- /* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = CHANNELS_PER_BRANCH;
+ layers[1].is_virt_csrow = false;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = DIMMS_PER_CHANNEL;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
@@ -1284,8 +1303,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
pvt = mci->pvt_info;
pvt->system_address = pdev; /* Record this device in our private */
- pvt->maxch = num_channels;
- pvt->maxdimmperch = num_dimms_per_channel;
+ pvt->maxch = MAX_CHANNELS;
+ pvt->maxdimmperch = DIMMS_PER_CHANNEL;
/* 'get' the pci devices we want to reserve for our use */
if (i5400_get_devices(mci, dev_idx))
@@ -1307,13 +1326,13 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
/* Set the function pointer to an actual operation function */
mci->edac_check = i5400_check_error;
- /* initialize the MC control structure 'csrows' table
+ /* initialize the MC control structure 'dimms' table
* with the mapping and control information */
- if (i5400_init_csrows(mci)) {
+ if (i5400_init_dimms(mci)) {
debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
- " because i5400_init_csrows() returned nonzero "
+ " because i5400_init_dimms() returned nonzero "
"value\n");
- mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */
+ mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
} else {
debugf1("MC: Enable error reporting now\n");
i5400_enable_error_reporting(mci);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 3bafa3b..97c22fd 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -464,17 +464,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
FERR_FAT_FBD, error_reg);
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "FATAL (Branch=%d DRAM-Bank=%d %s "
- "RAS=%d CAS=%d Err=0x%lx (%s))",
- branch, bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, specific);
-
- /* Call the helper to output message */
- edac_mc_handle_fbd_ue(mci, rank, branch << 1,
- (branch << 1) + 1,
- pvt->tmp_prt_buffer);
+ "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
+ bank, ras, cas, errors, specific);
+
+ edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
+ branch, -1, rank,
+ is_wr ? "Write error" : "Read error",
+ pvt->tmp_prt_buffer, NULL);
+
}
/* read in the 1st NON-FATAL error register */
@@ -513,23 +510,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
/* Form out message */
snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
- "Corrected error (Branch=%d, Channel %d), "
- " DRAM-Bank=%d %s "
- "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))",
- branch, channel,
- bank,
- is_wr ? "RDWR" : "RD",
- ras, cas,
- errors, syndrome, specific);
-
- /*
- * Call the helper to output message
- * NOTE: Errors are reported per-branch, and not per-channel
- * Currently, we don't know how to identify the right
- * channel.
- */
- edac_mc_handle_fbd_ce(mci, rank, channel,
- pvt->tmp_prt_buffer);
+ "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
+ bank, ras, cas, errors, specific);
+
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
+ syndrome,
+ branch >> 1, channel % 2, rank,
+ is_wr ? "Write error" : "Read error",
+ pvt->tmp_prt_buffer, NULL);
}
return;
}
@@ -617,8 +605,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
static int decode_mtr(struct i7300_pvt *pvt,
int slot, int ch, int branch,
struct i7300_dimm_info *dinfo,
- struct csrow_info *p_csrow,
- u32 *nr_pages)
+ struct dimm_info *dimm)
{
int mtr, ans, addrBits, channel;
@@ -650,7 +637,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
addrBits -= 3; /* 8 bits per bytes */
dinfo->megabytes = 1 << addrBits;
- *nr_pages = dinfo->megabytes << 8;
debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
@@ -663,11 +649,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
- p_csrow->grain = 8;
- p_csrow->mtype = MEM_FB_DDR2;
- p_csrow->csrow_idx = slot;
- p_csrow->page_mask = 0;
-
/*
* The type of error detection actually depends of the
* mode of operation. When it is just one single memory chip, at
@@ -677,15 +658,18 @@ static int decode_mtr(struct i7300_pvt *pvt,
* See datasheet Sections 7.3.6 to 7.3.8
*/
+ dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
+ dimm->grain = 8;
+ dimm->mtype = MEM_FB_DDR2;
if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
- p_csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
} else {
debugf2("\t\tECC code is on Lockstep mode\n");
if (MTR_DRAM_WIDTH(mtr) == 8)
- p_csrow->edac_mode = EDAC_S8ECD8ED;
+ dimm->edac_mode = EDAC_S8ECD8ED;
else
- p_csrow->edac_mode = EDAC_S4ECD4ED;
+ dimm->edac_mode = EDAC_S4ECD4ED;
}
/* ask what device type on this row */
@@ -694,9 +678,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
"enhanced" : "normal");
- p_csrow->dtype = DEV_X8;
+ dimm->dtype = DEV_X8;
} else
- p_csrow->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
return mtr;
}
@@ -774,11 +758,10 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
{
struct i7300_pvt *pvt;
struct i7300_dimm_info *dinfo;
- struct csrow_info *p_csrow;
int rc = -ENODEV;
int mtr;
int ch, branch, slot, channel;
- u32 last_page = 0, nr_pages;
+ struct dimm_info *dimm;
pvt = mci->pvt_info;
@@ -809,25 +792,23 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
where,
&pvt->mtr[slot][branch]);
- for (ch = 0; ch < MAX_BRANCHES; ch++) {
+ for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) {
int channel = to_channel(ch, branch);
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+ mci->n_layers, branch, ch, slot);
+
dinfo = &pvt->dimm_info[slot][channel];
- p_csrow = &mci->csrows[slot];
mtr = decode_mtr(pvt, slot, ch, branch,
- dinfo, p_csrow, &nr_pages);
+ dinfo, dimm);
+
/* if no DIMMS on this row, continue */
if (!MTR_DIMMS_PRESENT(mtr))
continue;
- /* Update per_csrow memory count */
- p_csrow->nr_pages += nr_pages;
- p_csrow->first_page = last_page;
- last_page += nr_pages;
- p_csrow->last_page = last_page;
-
rc = 0;
+
}
}
}
@@ -1042,10 +1023,8 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[3];
struct i7300_pvt *pvt;
- int num_channels;
- int num_dimms_per_channel;
- int num_csrows;
int rc;
/* wake up device */
@@ -1062,23 +1041,17 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
if (PCI_FUNC(pdev->devfn) != 0)
return -ENODEV;
- /* As we don't have a motherboard identification routine to determine
- * actual number of slots/dimms per channel, we thus utilize the
- * resource as specified by the chipset. Thus, we might have
- * have more DIMMs per channel than actually on the mobo, but this
- * allows the driver to support up to the chipset max, without
- * some fancy mobo determination.
- */
- num_dimms_per_channel = MAX_SLOTS;
- num_channels = MAX_CHANNELS;
- num_csrows = MAX_SLOTS * MAX_CHANNELS;
-
- debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
- __func__, num_channels, num_dimms_per_channel, num_csrows);
-
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
-
+ layers[0].type = EDAC_MC_LAYER_BRANCH;
+ layers[0].size = MAX_BRANCHES;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = MAX_CH_PER_BRANCH;
+ layers[1].is_virt_csrow = true;
+ layers[2].type = EDAC_MC_LAYER_SLOT;
+ layers[2].size = MAX_SLOTS;
+ layers[2].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 7f1dfcc..a499c7e 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -221,7 +221,9 @@ struct i7core_inject {
};
struct i7core_channel {
- u32 ranks;
+ bool is_3dimms_present;
+ bool is_single_4rank;
+ bool has_4rank;
u32 dimms;
};
@@ -257,7 +259,6 @@ struct i7core_pvt {
struct i7core_channel channel[NUM_CHANS];
int ce_count_available;
- int csrow_map[NUM_CHANS][MAX_DIMMS];
/* ECC corrected errors counts per udimm */
unsigned long udimm_ce_count[MAX_DIMMS];
@@ -492,116 +493,15 @@ static void free_i7core_dev(struct i7core_dev *i7core_dev)
/****************************************************************************
Memory check routines
****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
- unsigned func)
-{
- struct i7core_dev *i7core_dev = get_i7core_dev(socket);
- int i;
-
- if (!i7core_dev)
- return NULL;
-
- for (i = 0; i < i7core_dev->n_devs; i++) {
- if (!i7core_dev->pdev[i])
- continue;
-
- if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
- PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
- return i7core_dev->pdev[i];
- }
- }
-
- return NULL;
-}
-
-/**
- * i7core_get_active_channels() - gets the number of channels and csrows
- * @socket: Quick Path Interconnect socket
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket.
- * this is used in order to properly allocate the size of mci components.
- *
- * It should be noticed that none of the current available datasheets explain
- * or even mention how csrows are seen by the memory controller. So, we need
- * to add a fake description for csrows.
- * So, this driver is attributing one DIMM memory for one csrow.
- */
-static int i7core_get_active_channels(const u8 socket, unsigned *channels,
- unsigned *csrows)
-{
- struct pci_dev *pdev = NULL;
- int i, j;
- u32 status, control;
-
- *channels = 0;
- *csrows = 0;
-
- pdev = get_pdev_slot_func(socket, 3, 0);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
- socket);
- return -ENODEV;
- }
-
- /* Device 3 function 0 reads */
- pci_read_config_dword(pdev, MC_STATUS, &status);
- pci_read_config_dword(pdev, MC_CONTROL, &control);
-
- for (i = 0; i < NUM_CHANS; i++) {
- u32 dimm_dod[3];
- /* Check if the channel is active */
- if (!(control & (1 << (8 + i))))
- continue;
-
- /* Check if the channel is disabled */
- if (status & (1 << i))
- continue;
-
- pdev = get_pdev_slot_func(socket, i + 4, 1);
- if (!pdev) {
- i7core_printk(KERN_ERR, "Couldn't find socket %d "
- "fn %d.%d!!!\n",
- socket, i + 4, 1);
- return -ENODEV;
- }
- /* Devices 4-6 function 1 */
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM0, &dimm_dod[0]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM1, &dimm_dod[1]);
- pci_read_config_dword(pdev,
- MC_DOD_CH_DIMM2, &dimm_dod[2]);
-
- (*channels)++;
-
- for (j = 0; j < 3; j++) {
- if (!DIMM_PRESENT(dimm_dod[j]))
- continue;
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels on socket %d: %d\n",
- socket, *channels);
- return 0;
-}
-
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
{
struct i7core_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
struct pci_dev *pdev;
int i, j;
- int csrow = 0;
- unsigned long last_page = 0;
enum edac_type mode;
enum mem_type mtype;
+ struct dimm_info *dimm;
/* Get data from the MC register, function 0 */
pdev = pvt->pci_mcr[0];
@@ -657,21 +557,20 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pci_read_config_dword(pvt->pci_ch[i][0],
MC_CHANNEL_DIMM_INIT_PARAMS, &data);
- pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
- 4 : 2;
+
+ if (data & THREE_DIMMS_PRESENT)
+ pvt->channel[i].is_3dimms_present = true;
+
+ if (data & SINGLE_QUAD_RANK_PRESENT)
+ pvt->channel[i].is_single_4rank = true;
+
+ if (data & QUAD_RANK_PRESENT)
+ pvt->channel[i].has_4rank = true;
if (data & REGISTERED_DIMM)
mtype = MEM_RDDR3;
else
mtype = MEM_DDR3;
-#if 0
- if (data & THREE_DIMMS_PRESENT)
- pvt->channel[i].dimms = 3;
- else if (data & SINGLE_QUAD_RANK_PRESENT)
- pvt->channel[i].dimms = 1;
- else
- pvt->channel[i].dimms = 2;
-#endif
/* Devices 4-6 function 1 */
pci_read_config_dword(pvt->pci_ch[i][1],
@@ -682,11 +581,13 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
MC_DOD_CH_DIMM2, &dimm_dod[2]);
debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
- "%d ranks, %cDIMMs\n",
+ "%s%s%s%cDIMMs\n",
i,
RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
data,
- pvt->channel[i].ranks,
+ pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
+ pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
+ pvt->channel[i].has_4rank ? "HAS_4R " : "",
(data & REGISTERED_DIMM) ? 'R' : 'U');
for (j = 0; j < 3; j++) {
@@ -696,6 +597,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
if (!DIMM_PRESENT(dimm_dod[j]))
continue;
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ i, j, 0);
banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
@@ -704,8 +607,6 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- pvt->channel[i].dimms++;
-
debugf0("\tdimm %d %d Mb offset: %x, "
"bank: %d, rank: %d, row: %#x, col: %#x\n",
j, size,
@@ -714,44 +615,28 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
npages = MiB_TO_PAGES(size);
- csr = &mci->csrows[csrow];
- csr->first_page = last_page + 1;
- last_page += npages;
- csr->last_page = last_page;
- csr->nr_pages = npages;
-
- csr->page_mask = 0;
- csr->grain = 8;
- csr->csrow_idx = csrow;
- csr->nr_channels = 1;
-
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
-
- pvt->csrow_map[i][j] = csrow;
+ dimm->nr_pages = npages;
switch (banks) {
case 4:
- csr->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
break;
case 8:
- csr->dtype = DEV_X8;
+ dimm->dtype = DEV_X8;
break;
case 16:
- csr->dtype = DEV_X16;
+ dimm->dtype = DEV_X16;
break;
default:
- csr->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
}
- csr->edac_mode = mode;
- csr->mtype = mtype;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
- "CPU#%uChannel#%u_DIMM#%u",
- pvt->i7core_dev->socket, i, j);
-
- csrow++;
+ snprintf(dimm->label, sizeof(dimm->label),
+ "CPU#%uChannel#%u_DIMM#%u",
+ pvt->i7core_dev->socket, i, j);
+ dimm->grain = 8;
+ dimm->edac_mode = mode;
+ dimm->mtype = mtype;
}
pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -1567,22 +1452,16 @@ error:
/****************************************************************************
Error check routines
****************************************************************************/
-static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
const int chan,
const int dimm,
const int add)
{
- char *msg;
- struct i7core_pvt *pvt = mci->pvt_info;
- int row = pvt->csrow_map[chan][dimm], i;
+ int i;
for (i = 0; i < add; i++) {
- msg = kasprintf(GFP_KERNEL, "Corrected error "
- "(Socket=%d channel=%d dimm=%d)",
- pvt->i7core_dev->socket, chan, dimm);
-
- edac_mc_handle_fbd_ce(mci, row, 0, msg);
- kfree (msg);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
+ chan, dimm, -1, "error", "", NULL);
}
}
@@ -1623,11 +1502,11 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
/*updated the edac core */
if (add0 != 0)
- i7core_rdimm_update_csrow(mci, chan, 0, add0);
+ i7core_rdimm_update_errcount(mci, chan, 0, add0);
if (add1 != 0)
- i7core_rdimm_update_csrow(mci, chan, 1, add1);
+ i7core_rdimm_update_errcount(mci, chan, 1, add1);
if (add2 != 0)
- i7core_rdimm_update_csrow(mci, chan, 2, add2);
+ i7core_rdimm_update_errcount(mci, chan, 2, add2);
}
@@ -1747,20 +1626,30 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
const struct mce *m)
{
struct i7core_pvt *pvt = mci->pvt_info;
- char *type, *optype, *err, *msg;
+ char *type, *optype, *err, msg[80];
+ enum hw_event_mc_err_type tp_event;
unsigned long error = m->status & 0x1ff0000l;
+ bool uncorrected_error = m->mcgstatus & 1ll << 61;
+ bool ripv = m->mcgstatus & 1;
u32 optypenum = (m->status >> 4) & 0x07;
u32 core_err_cnt = (m->status >> 38) & 0x7fff;
u32 dimm = (m->misc >> 16) & 0x3;
u32 channel = (m->misc >> 18) & 0x3;
u32 syndrome = m->misc >> 32;
u32 errnum = find_first_bit(&error, 32);
- int csrow;
- if (m->mcgstatus & 1)
- type = "FATAL";
- else
- type = "NON_FATAL";
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
switch (optypenum) {
case 0:
@@ -1815,27 +1704,20 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
err = "unknown";
}
- /* FIXME: should convert addr into bank and rank information */
- msg = kasprintf(GFP_ATOMIC,
- "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
- "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
- type, (long long) m->addr, m->cpu, dimm, channel,
- syndrome, core_err_cnt, (long long)m->status,
- (long long)m->misc, optype, err);
-
- debugf0("%s", msg);
+ snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
- csrow = pvt->csrow_map[channel][dimm];
-
- /* Call the helper to output message */
- if (m->mcgstatus & 1)
- edac_mc_handle_fbd_ue(mci, csrow, 0,
- 0 /* FIXME: should be channel here */, msg);
- else if (!pvt->is_registered)
- edac_mc_handle_fbd_ce(mci, csrow,
- 0 /* FIXME: should be channel here */, msg);
-
- kfree(msg);
+ /*
+ * Call the helper to output message
+ * FIXME: what to do if core_err_cnt > 1? Currently, it generates
+ * only one event
+ */
+ if (uncorrected_error || !pvt->is_registered)
+ edac_mc_handle_error(tp_event, mci,
+ m->addr >> PAGE_SHIFT,
+ m->addr & ~PAGE_MASK,
+ syndrome,
+ channel, dimm, -1,
+ err, msg, m);
}
/*
@@ -1932,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->bank != 8)
return NOTIFY_DONE;
-#ifdef CONFIG_SMP
- /* Only handle if it is the right mc controller */
- if (mce->socketid != pvt->i7core_dev->socket)
- return NOTIFY_DONE;
-#endif
-
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
smp_wmb();
@@ -2234,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
if (pvt->enable_scrub)
disable_sdram_scrub_setting(mci);
- mce_unregister_decode_chain(&i7_mce_dec);
-
/* Disable EDAC polling */
i7core_pci_ctl_release(pvt);
@@ -2252,15 +2126,19 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
{
struct mem_ctl_info *mci;
struct i7core_pvt *pvt;
- int rc, channels, csrows;
-
- /* Check the number of active and not disabled channels */
- rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
- if (unlikely(rc < 0))
- return rc;
+ int rc;
+ struct edac_mc_layer layers[2];
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket);
+
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = MAX_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
if (unlikely(!mci))
return -ENOMEM;
@@ -2336,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* DCLK for scrub rate setting */
pvt->dclk_freq = get_dclk_freq();
- mce_register_decode_chain(&i7_mce_dec);
-
return 0;
fail0:
@@ -2481,8 +2357,10 @@ static int __init i7core_init(void)
pci_rc = pci_register_driver(&i7core_driver);
- if (pci_rc >= 0)
+ if (pci_rc >= 0) {
+ mce_register_decode_chain(&i7_mce_dec);
return 0;
+ }
i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
pci_rc);
@@ -2498,6 +2376,7 @@ static void __exit i7core_exit(void)
{
debugf2("MC: " __FILE__ ": %s()\n", __func__);
pci_unregister_driver(&i7core_driver);
+ mce_unregister_decode_chain(&i7_mce_dec);
}
module_init(i7core_init);
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 3bf2b2f..52072c2 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -12,7 +12,7 @@
* 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
*
* Written with reference to 82443BX Host Bridge Datasheet:
- * http://download.intel.com/design/chipsets/datashts/29063301.pdf
+ * http://download.intel.com/design/chipsets/datashts/29063301.pdf
* references to this document given in [].
*
* This module doesn't support the 440LX, but it may be possible to
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, pageoffset,
- /* 440BX/GX don't make syndrome information
- * available */
- 0, edac_mc_find_csrow_by_page(mci, page), 0,
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, pageoffset, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1, mci->ctl_name, "", NULL);
}
if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
error_found = 1;
if (handle_errors)
- edac_mc_handle_ue(mci, page, pageoffset,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, pageoffset, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1, mci->ctl_name, "", NULL);
}
return error_found;
@@ -189,6 +189,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
enum mem_type mtype)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
u8 drbar, dramc;
u32 row_base, row_high_limit, row_high_limit_last;
@@ -197,6 +198,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_high_limit_last = 0;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
mci->mc_idx, __FILE__, __func__, index, drbar);
@@ -217,14 +220,14 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
row_base = row_high_limit_last;
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+ dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* EAP reports in 4kilobyte granularity [61] */
- csrow->grain = 1 << 12;
- csrow->mtype = mtype;
+ dimm->grain = 1 << 12;
+ dimm->mtype = mtype;
/* I don't think 440BX can tell you device type? FIXME? */
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
/* Mode is global to all rows on 440BX */
- csrow->edac_mode = edac_mode;
+ dimm->edac_mode = edac_mode;
row_high_limit_last = row_high_limit;
}
}
@@ -232,6 +235,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u8 dramc;
u32 nbxcfg, ecc_mode;
enum mem_type mtype;
@@ -245,8 +249,13 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
return -EIO;
- mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82443BXGX_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = I82443BXGX_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index c779092..0804505 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -99,6 +99,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
struct i82860_error_info *info,
int handle_errors)
{
+ struct dimm_info *dimm;
int row;
if (!(info->errsts2 & 0x0003))
@@ -108,18 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
info->eap >>= PAGE_SHIFT;
row = edac_mc_find_csrow_by_page(mci, info->eap);
+ dimm = mci->csrows[row].channels[0].dimm;
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, 0,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 UE", "", NULL);
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0,
- "i82860 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, info->derrsyn,
+ dimm->location[0], dimm->location[1], -1,
+ "i82860 CE", "", NULL);
return 1;
}
@@ -140,6 +148,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
u16 value;
u32 cumul_size;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
@@ -153,6 +162,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
*/
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
cumul_size = (value & I82860_GBA_MASK) <<
(I82860_GBA_SHIFT - PAGE_SHIFT);
@@ -164,30 +175,38 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ dimm->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
- csrow->mtype = MEM_RMBS;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
+ dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
+ dimm->mtype = MEM_RMBS;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
}
}
static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82860_error_info discard;
- /* RDRAM has channels but these don't map onto the abstractions that
- edac uses.
- The device groups from the GRA registers seem to map reasonably
- well onto the notion of a chip select row.
- There are 16 GRA registers and since the name is associated with
- the channel and the GRA registers map to physical devices so we are
- going to make 1 channel for group.
+ /*
+ * RDRAM has channels but these don't map onto the csrow abstraction.
+ * According with the datasheet, there are 2 Rambus channels, supporting
+ * up to 16 direct RDRAM devices.
+ * The device groups from the GRA registers seem to map reasonably
+ * well onto the notion of a chip select row.
+ * There are 16 GRA registers and since the name is associated with
+ * the channel and the GRA registers map to physical devices so we are
+ * going to make 1 channel for group.
*/
- mci = edac_mc_alloc(0, 16, 1, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = 2;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = 8;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 10f15d8..b613e31 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -38,7 +38,8 @@
#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
/* four csrows in dual channel, eight in single channel */
-#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82875P_NR_DIMMS 8
+#define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
@@ -235,7 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0081) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -243,11 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
row = edac_mc_find_csrow_by_page(mci, info->eap);
if (info->errsts & 0x0080)
- edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ info->eap, 0, 0,
+ row, -1, -1,
+ "i82875p UE", "", NULL);
else
- edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row,
- multi_chan ? (info->des & 0x1) : 0,
- "i82875p CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ info->eap, 0, info->derrsyn,
+ row, multi_chan ? (info->des & 0x1) : 0,
+ -1, "i82875p CE", "", NULL);
return 1;
}
@@ -342,11 +349,13 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
void __iomem * ovrfl_window, u32 drc)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
+ unsigned nr_chans = dual_channel_active(drc) + 1;
unsigned long last_cumul_size;
u8 value;
u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
- u32 cumul_size;
- int index;
+ u32 cumul_size, nr_pages;
+ int index, j;
drc_ddim = (drc >> 18) & 0x1;
last_cumul_size = 0;
@@ -369,12 +378,18 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
+ nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+
+ for (j = 0; j < nr_chans; j++) {
+ dimm = csrow->channels[j].dimm;
+
+ dimm->nr_pages = nr_pages / nr_chans;
+ dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
+ dimm->mtype = MEM_DDR;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
+ }
}
}
@@ -382,6 +397,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82875p_pvt *pvt;
struct pci_dev *ovrfl_pdev;
void __iomem *ovrfl_window;
@@ -397,9 +413,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
return -ENODEV;
drc = readl(ovrfl_window + I82875P_DRC);
nr_chans = dual_channel_active(drc) + 1;
- mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
- nr_chans, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82875P_NR_CSROWS(nr_chans);
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = nr_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0cd8368..433332c 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -29,7 +29,8 @@
#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
-#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans))
+#define I82975X_NR_DIMMS 8
+#define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans))
/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
@@ -287,7 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
return 1;
if ((info->errsts ^ info->errsts2) & 0x0003) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1, "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
@@ -309,13 +311,18 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
offst = info->eap
& ((1 << PAGE_SHIFT) -
- (1 << mci->csrows[row].grain));
+ (1 << mci->csrows[row].channels[chan].dimm->grain));
if (info->errsts & 0x0002)
- edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offst, 0,
+ row, -1, -1,
+ "i82975x UE", "", NULL);
else
- edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
- chan, "i82975x CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, offst, info->derrsyn,
+ row, chan ? chan : 0, -1,
+ "i82975x CE", "", NULL);
return 1;
}
@@ -370,8 +377,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
struct csrow_info *csrow;
unsigned long last_cumul_size;
u8 value;
- u32 cumul_size;
+ u32 cumul_size, nr_pages;
int index, chan;
+ struct dimm_info *dimm;
+ enum dev_type dtype;
last_cumul_size = 0;
@@ -400,28 +409,33 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
cumul_size);
+ nr_pages = cumul_size - last_cumul_size;
+ if (!nr_pages)
+ continue;
+
/*
* Initialise dram labels
* index values:
* [0-7] for single-channel; i.e. csrow->nr_channels = 1
* [0-3] for dual-channel; i.e. csrow->nr_channels = 2
*/
- for (chan = 0; chan < csrow->nr_channels; chan++)
- strncpy(csrow->channels[chan].label,
+ dtype = i82975x_dram_type(mch_window, index);
+ for (chan = 0; chan < csrow->nr_channels; chan++) {
+ dimm = mci->csrows[index].channels[chan].dimm;
+
+ dimm->nr_pages = nr_pages / csrow->nr_channels;
+ strncpy(csrow->channels[chan].dimm->label,
labels[(index >> 1) + (chan * 2)],
EDAC_MC_LABEL_LEN);
-
- if (cumul_size == last_cumul_size)
- continue; /* not populated */
+ dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
+ dimm->dtype = i82975x_dram_type(mch_window, index);
+ dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
+ dimm->edac_mode = EDAC_SECDED; /* only supported */
+ }
csrow->first_page = last_cumul_size;
csrow->last_page = cumul_size - 1;
- csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
- csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
- csrow->dtype = i82975x_dram_type(mch_window, index);
- csrow->edac_mode = EDAC_SECDED; /* only supported */
}
}
@@ -463,6 +477,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc = -ENODEV;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct i82975x_pvt *pvt;
void __iomem *mch_window;
u32 mchbar;
@@ -531,8 +546,13 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
chans = dual_channel_active(mch_window) + 1;
/* assuming only one controller, index thus is 0 */
- mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans),
- chans, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = I82975X_NR_DIMMS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = I82975X_NR_CSROWS(chans);
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
if (!mci) {
rc = -ENOMEM;
goto fail1;
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index c6074c5..8c87a5e 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -5,8 +5,6 @@
#include <asm/mce.h>
-#define BIT_64(n) (U64_C(1) << (n))
-
#define EC(x) ((x) & 0xffff)
#define XEC(x, mask) (((x) >> 16) & mask)
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 73464a6..0e37462 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,12 +854,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
if (err_detect & DDR_EDE_SBE)
- edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK,
- syndrome, row_index, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "", NULL);
if (err_detect & DDR_EDE_MBE)
- edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK,
- row_index, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ pfn, err_addr & ~PAGE_MASK, syndrome,
+ row_index, 0, -1,
+ mci->ctl_name, "", NULL);
out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
}
@@ -883,6 +887,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
{
struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 sdram_ctl;
u32 sdtype;
enum mem_type mtype;
@@ -929,6 +934,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
u32 end;
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
+
cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
(index * MPC85XX_MC_CS_BNDS_OFS));
@@ -944,19 +951,21 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
csrow->first_page = start;
csrow->last_page = end;
- csrow->nr_pages = end + 1 - start;
- csrow->grain = 8;
- csrow->mtype = mtype;
- csrow->dtype = DEV_UNKNOWN;
+
+ dimm->nr_pages = end + 1 - start;
+ dimm->grain = 8;
+ dimm->mtype = mtype;
+ dimm->dtype = DEV_UNKNOWN;
if (sdram_ctl & DSC_X32_EN)
- csrow->dtype = DEV_X32;
- csrow->edac_mode = EDAC_SECDED;
+ dimm->dtype = DEV_X32;
+ dimm->edac_mode = EDAC_SECDED;
}
}
static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct mpc85xx_mc_pdata *pdata;
struct resource r;
u32 sdram_ctl;
@@ -965,7 +974,14 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
- mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 4;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(*pdata));
if (!mci) {
devres_release_group(&op->dev, mpc85xx_mc_err_probe);
return -ENOMEM;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 7e5ff36..b0bb5a3 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -611,12 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
if (!(reg & 0x1))
- edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, syndrome, 0, 0,
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, syndrome,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
else /* 2 bit error, UE */
- edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT,
- err_addr & PAGE_MASK, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ err_addr >> PAGE_SHIFT,
+ err_addr & PAGE_MASK, 0,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
/* clear the error */
out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -656,6 +661,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
struct mv64x60_mc_pdata *pdata)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
+
u32 devtype;
u32 ctl;
@@ -664,35 +671,36 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
csrow = &mci->csrows[0];
- csrow->first_page = 0;
- csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = 8;
+ dimm = csrow->channels[0].dimm;
+
+ dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
+ dimm->grain = 8;
- csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
+ dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
devtype = (ctl >> 20) & 0x3;
switch (devtype) {
case 0x0:
- csrow->dtype = DEV_X32;
+ dimm->dtype = DEV_X32;
break;
case 0x2: /* could be X8 too, but no way to tell */
- csrow->dtype = DEV_X16;
+ dimm->dtype = DEV_X16;
break;
case 0x3:
- csrow->dtype = DEV_X4;
+ dimm->dtype = DEV_X4;
break;
default:
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
break;
}
- csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
}
static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct mv64x60_mc_pdata *pdata;
struct resource *r;
u32 ctl;
@@ -701,7 +709,14 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
return -ENOMEM;
- mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = 1;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = 1;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
+ sizeof(struct mv64x60_mc_pdata));
if (!mci) {
printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 7f71ee4..b095a90 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -110,15 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
/* uncorrectable/multi-bit errors */
if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
MCDEBUG_ERRSTA_RFL_STATUS)) {
- edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0,
- cs, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ mci->csrows[cs].first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "", NULL);
}
/* correctable/single-bit errors */
- if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) {
- edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0,
- 0, cs, 0, mci->ctl_name);
- }
+ if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ mci->csrows[cs].first_page, 0, 0,
+ cs, 0, -1, mci->ctl_name, "", NULL);
}
static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -135,11 +136,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
enum edac_type edac_mode)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
u32 rankcfg;
int index;
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
pci_read_config_dword(pdev,
MCDRAM_RANKCFG + (index * 12),
@@ -151,20 +154,20 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
MCDRAM_RANKCFG_TYPE_SIZE_S) {
case 0:
- csrow->nr_pages = 128 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
break;
case 1:
- csrow->nr_pages = 256 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
break;
case 2:
case 3:
- csrow->nr_pages = 512 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
break;
case 4:
- csrow->nr_pages = 1024 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
break;
case 5:
- csrow->nr_pages = 2048 << (20 - PAGE_SHIFT);
+ dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
break;
default:
edac_mc_printk(mci, KERN_ERR,
@@ -174,13 +177,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
}
csrow->first_page = last_page_in_mmc;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- last_page_in_mmc += csrow->nr_pages;
+ csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
+ last_page_in_mmc += dimm->nr_pages;
csrow->page_mask = 0;
- csrow->grain = PASEMI_EDAC_ERROR_GRAIN;
- csrow->mtype = MEM_DDR;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = edac_mode;
+ dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
+ dimm->mtype = MEM_DDR;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = edac_mode;
}
return 0;
}
@@ -189,6 +192,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
u32 errctl1, errcor, scrub, mcen;
pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
@@ -205,9 +209,14 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
MCDEBUG_ERRCTL1_RFL_LOG_EN;
pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
- mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS,
- system_mmc_id++);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = PASEMI_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = PASEMI_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
+ 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index d427c69..f3f9fed 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,7 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ce_no_info(mci, message);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ row, 0, -1,
+ message, "", NULL);
}
/**
@@ -755,7 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
for (row = 0; row < mci->nr_csrows; row++)
if (ppc4xx_edac_check_bank_error(status, row))
- edac_mc_handle_ue(mci, page, offset, row, message);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, offset, 0,
+ row, 0, -1,
+ message, "", NULL);
}
/**
@@ -895,9 +901,8 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
enum mem_type mtype;
enum dev_type dtype;
enum edac_type edac_mode;
- int row;
- u32 mbxcf, size;
- static u32 ppc4xx_last_page;
+ int row, j;
+ u32 mbxcf, size, nr_pages;
/* Establish the memory type and width */
@@ -948,7 +953,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
case SDRAM_MBCF_SZ_2GB:
case SDRAM_MBCF_SZ_4GB:
case SDRAM_MBCF_SZ_8GB:
- csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
+ nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
break;
default:
ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -959,10 +964,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
goto done;
}
- csi->first_page = ppc4xx_last_page;
- csi->last_page = csi->first_page + csi->nr_pages - 1;
- csi->page_mask = 0;
-
/*
* It's unclear exactly what grain should be set to
* here. The SDRAM_ECCES register allows resolution of
@@ -975,15 +976,17 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
* possible values would be the PLB width (16), the
* page size (PAGE_SIZE) or the memory width (2 or 4).
*/
+ for (j = 0; j < csi->nr_channels; j++) {
+ struct dimm_info *dimm = csi->channels[j].dimm;
- csi->grain = 1;
-
- csi->mtype = mtype;
- csi->dtype = dtype;
+ dimm->nr_pages = nr_pages / csi->nr_channels;
+ dimm->grain = 1;
- csi->edac_mode = edac_mode;
+ dimm->mtype = mtype;
+ dimm->dtype = dtype;
- ppc4xx_last_page += csi->nr_pages;
+ dimm->edac_mode = edac_mode;
+ }
}
done:
@@ -1236,6 +1239,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
dcr_host_t dcr_host;
const struct device_node *np = op->dev.of_node;
struct mem_ctl_info *mci = NULL;
+ struct edac_mc_layer layers[2];
static int ppc4xx_edac_instance;
/*
@@ -1281,12 +1285,14 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
* controller instance and perform the appropriate
* initialization.
*/
-
- mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata),
- ppc4xx_edac_nr_csrows,
- ppc4xx_edac_nr_chans,
- ppc4xx_edac_instance);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = ppc4xx_edac_nr_csrows;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = ppc4xx_edac_nr_chans;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
+ sizeof(struct ppc4xx_edac_pdata));
if (mci == NULL) {
ppc4xx_edac_printk(KERN_ERR, "%s: "
"Failed to allocate EDAC MC instance!\n",
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 6d908ad..e1cacd1 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -179,10 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
error_found = 1;
if (handle_errors)
- edac_mc_handle_ce(mci, page, 0, /* not avail */
- syndrome,
- edac_mc_find_csrow_by_page(mci, page),
- 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ page, 0, syndrome,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1,
+ mci->ctl_name, "", NULL);
}
if (info->eapr & BIT(1)) { /* UE? */
@@ -190,9 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
if (handle_errors)
/* 82600 doesn't give enough info */
- edac_mc_handle_ue(mci, page, 0,
- edac_mc_find_csrow_by_page(mci, page),
- mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ page, 0, 0,
+ edac_mc_find_csrow_by_page(mci, page),
+ 0, -1,
+ mci->ctl_name, "", NULL);
}
return error_found;
@@ -216,6 +219,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
u8 dramcr)
{
struct csrow_info *csrow;
+ struct dimm_info *dimm;
int index;
u8 drbar; /* SDRAM Row Boundary Address Register */
u32 row_high_limit, row_high_limit_last;
@@ -227,6 +231,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
for (index = 0; index < mci->nr_csrows; index++) {
csrow = &mci->csrows[index];
+ dimm = csrow->channels[0].dimm;
/* find the DRAM Chip Select Base address and mask */
pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
@@ -247,16 +252,17 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
csrow->first_page = row_base >> PAGE_SHIFT;
csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
- csrow->nr_pages = csrow->last_page - csrow->first_page + 1;
+
+ dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
/* Error address is top 19 bits - so granularity is *
* 14 bits */
- csrow->grain = 1 << 14;
- csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
+ dimm->grain = 1 << 14;
+ dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
/* FIXME - check that this is unknowable with this chipset */
- csrow->dtype = DEV_UNKNOWN;
+ dimm->dtype = DEV_UNKNOWN;
/* Mode is global on 82600 */
- csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
+ dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
row_high_limit_last = row_high_limit;
}
}
@@ -264,6 +270,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
u8 dramcr;
u32 eapr;
u32 scrub_disabled;
@@ -278,8 +285,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
sdram_refresh_rate);
debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
- mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0);
-
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = R82600_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = R82600_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (mci == NULL)
return -ENOMEM;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 123204f..36ad17e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -314,8 +314,6 @@ struct sbridge_pvt {
struct sbridge_info info;
struct sbridge_channel channel[NUM_CHANNELS];
- int csrow_map[NUM_CHANNELS][MAX_DIMMS];
-
/* Memory type detection */
bool is_mirrored, is_lockstep, is_close_pg;
@@ -487,29 +485,14 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
}
/**
- * sbridge_get_active_channels() - gets the number of channels and csrows
+ * check_if_ecc_is_active() - Checks if ECC is active
* bus: Device bus
- * @channels: Number of channels that will be returned
- * @csrows: Number of csrows found
- *
- * Since EDAC core needs to know in advance the number of available channels
- * and csrows, in order to allocate memory for csrows/channels, it is needed
- * to run two similar steps. At the first step, implemented on this function,
- * it checks the number of csrows/channels present at one socket, identified
- * by the associated PCI bus.
- * this is used in order to properly allocate the size of mci components.
- * Note: one csrow is one dimm.
*/
-static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
- unsigned *csrows)
+static int check_if_ecc_is_active(const u8 bus)
{
struct pci_dev *pdev = NULL;
- int i, j;
u32 mcmtr;
- *channels = 0;
- *csrows = 0;
-
pdev = get_pdev_slot_func(bus, 15, 0);
if (!pdev) {
sbridge_printk(KERN_ERR, "Couldn't find PCI device "
@@ -523,41 +506,14 @@ static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
return -ENODEV;
}
-
- for (i = 0; i < NUM_CHANNELS; i++) {
- u32 mtr;
-
- /* Device 15 functions 2 - 5 */
- pdev = get_pdev_slot_func(bus, 15, 2 + i);
- if (!pdev) {
- sbridge_printk(KERN_ERR, "Couldn't find PCI device "
- "%2x.%02d.%d!!!\n",
- bus, 15, 2 + i);
- return -ENODEV;
- }
- (*channels)++;
-
- for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
- pci_read_config_dword(pdev, mtr_regs[j], &mtr);
- debugf1("Bus#%02x channel #%d MTR%d = %x\n", bus, i, j, mtr);
- if (IS_DIMM_PRESENT(mtr))
- (*csrows)++;
- }
- }
-
- debugf0("Number of active channels: %d, number of active dimms: %d\n",
- *channels, *csrows);
-
return 0;
}
-static int get_dimm_config(const struct mem_ctl_info *mci)
+static int get_dimm_config(struct mem_ctl_info *mci)
{
struct sbridge_pvt *pvt = mci->pvt_info;
- struct csrow_info *csr;
+ struct dimm_info *dimm;
int i, j, banks, ranks, rows, cols, size, npages;
- int csrow = 0;
- unsigned long last_page = 0;
u32 reg;
enum edac_type mode;
enum mem_type mtype;
@@ -599,7 +555,7 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pvt->is_close_pg = false;
}
- pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg);
+ pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
if (IS_RDIMM_ENABLED(reg)) {
/* FIXME: Can also be LRDIMM */
debugf0("Memory is registered\n");
@@ -616,6 +572,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
u32 mtr;
for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
+ dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
+ i, j, 0);
pci_read_config_dword(pvt->pci_tad[i],
mtr_regs[j], &mtr);
debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
@@ -634,29 +592,15 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
pvt->sbridge_dev->mc, i, j,
size, npages,
banks, ranks, rows, cols);
- csr = &mci->csrows[csrow];
-
- csr->first_page = last_page;
- csr->last_page = last_page + npages - 1;
- csr->page_mask = 0UL; /* Unused */
- csr->nr_pages = npages;
- csr->grain = 32;
- csr->csrow_idx = csrow;
- csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
- csr->ce_count = 0;
- csr->ue_count = 0;
- csr->mtype = mtype;
- csr->edac_mode = mode;
- csr->nr_channels = 1;
- csr->channels[0].chan_idx = i;
- csr->channels[0].ce_count = 0;
- pvt->csrow_map[i][j] = csrow;
- snprintf(csr->channels[0].label,
- sizeof(csr->channels[0].label),
+
+ dimm->nr_pages = npages;
+ dimm->grain = 32;
+ dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
+ dimm->mtype = mtype;
+ dimm->edac_mode = mode;
+ snprintf(dimm->label, sizeof(dimm->label),
"CPU_SrcID#%u_Channel#%u_DIMM#%u",
pvt->sbridge_dev->source_id, i, j);
- last_page += npages;
- csrow++;
}
}
}
@@ -844,11 +788,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
u8 *socket,
long *channel_mask,
u8 *rank,
- char *area_type)
+ char **area_type, char *msg)
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
- char msg[256];
int n_rir, n_sads, n_tads, sad_way, sck_xch;
int sad_interl, idx, base_ch;
int interleave_mode;
@@ -870,12 +813,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
*/
if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr >= (u64)pvt->tohm) {
sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
@@ -892,7 +833,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = SAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr <= limit)
@@ -901,10 +841,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
}
if (n_sads == MAX_SAD) {
sprintf(msg, "Can't discover the memory socket");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
- area_type = get_dram_attr(reg);
+ *area_type = get_dram_attr(reg);
interleave_mode = INTERLEAVE_MODE(reg);
pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -942,7 +881,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Can't discover socket interleave");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
*socket = sad_interleave[idx];
@@ -957,7 +895,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (!new_mci) {
sprintf(msg, "Struct for socket #%u wasn't initialized",
*socket);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
mci = new_mci;
@@ -973,7 +910,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
limit = TAD_LIMIT(reg);
if (limit <= prv) {
sprintf(msg, "Can't discover the memory channel");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
if (addr <= limit)
@@ -1013,7 +949,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Can't discover the TAD target");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
*channel_mask = 1 << base_ch;
@@ -1027,7 +962,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
break;
default:
sprintf(msg, "Invalid mirror set. Can't decode addr");
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
} else
@@ -1055,7 +989,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (offset > addr) {
sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
offset, addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
addr -= offset;
@@ -1095,7 +1028,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (n_rir == MAX_RIR_RANGES) {
sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
ch_addr);
- edac_mc_handle_ce_no_info(mci, msg);
return -EINVAL;
}
rir_way = RIR_WAY(reg);
@@ -1409,7 +1341,8 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
{
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
- char *type, *optype, *msg, *recoverable_msg;
+ enum hw_event_mc_err_type tp_event;
+ char *type, *optype, msg[256];
bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
bool overflow = GET_BITFIELD(m->status, 62, 62);
bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1421,13 +1354,21 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
long channel_mask, first_channel;
u8 rank, socket;
- int csrow, rc, dimm;
- char *area_type = "Unknown";
-
- if (ripv)
- type = "NON_FATAL";
- else
- type = "FATAL";
+ int rc, dimm;
+ char *area_type = NULL;
+
+ if (uncorrected_error) {
+ if (ripv) {
+ type = "FATAL";
+ tp_event = HW_EVENT_ERR_FATAL;
+ } else {
+ type = "NON_FATAL";
+ tp_event = HW_EVENT_ERR_UNCORRECTED;
+ }
+ } else {
+ type = "CORRECTED";
+ tp_event = HW_EVENT_ERR_CORRECTED;
+ }
/*
* According with Table 15-9 of the Intel Architecture spec vol 3A,
@@ -1445,19 +1386,19 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
} else {
switch (optypenum) {
case 0:
- optype = "generic undef request";
+ optype = "generic undef request error";
break;
case 1:
- optype = "memory read";
+ optype = "memory read error";
break;
case 2:
- optype = "memory write";
+ optype = "memory write error";
break;
case 3:
- optype = "addr/cmd";
+ optype = "addr/cmd error";
break;
case 4:
- optype = "memory scrubbing";
+ optype = "memory scrubbing error";
break;
default:
optype = "reserved";
@@ -1466,13 +1407,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
}
rc = get_memory_error_data(mci, m->addr, &socket,
- &channel_mask, &rank, area_type);
+ &channel_mask, &rank, &area_type, msg);
if (rc < 0)
- return;
+ goto err_parsing;
new_mci = get_mci_for_node_id(socket);
if (!new_mci) {
- edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!");
- return;
+ strcpy(msg, "Error: socket got corrupted!");
+ goto err_parsing;
}
mci = new_mci;
pvt = mci->pvt_info;
@@ -1486,45 +1427,39 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
else
dimm = 2;
- csrow = pvt->csrow_map[first_channel][dimm];
-
- if (uncorrected_error && recoverable)
- recoverable_msg = " recoverable";
- else
- recoverable_msg = "";
/*
- * FIXME: What should we do with "channel" information on mcelog?
- * Probably, we can just discard it, as the channel information
- * comes from the get_memory_error_data() address decoding
+ * FIXME: On some memory configurations (mirror, lockstep), the
+ * Memory Controller can't point the error to a single DIMM. The
+ * EDAC core should be handling the channel mask, in order to point
+ * to the group of dimm's where the error may be happening.
*/
- msg = kasprintf(GFP_ATOMIC,
- "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), "
- "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n",
- core_err_cnt,
- area_type,
- optype,
- type,
- recoverable_msg,
- overflow ? "OVERFLOW" : "",
- m->cpu,
- mscod, errcode,
- channel, /* 1111b means not specified */
- (long long) m->addr,
- socket,
- first_channel, /* This is the real channel on SB */
- channel_mask,
- rank);
+ snprintf(msg, sizeof(msg),
+ "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
+ core_err_cnt,
+ overflow ? " OVERFLOW" : "",
+ (uncorrected_error && recoverable) ? " recoverable" : "",
+ area_type,
+ mscod, errcode,
+ socket,
+ channel_mask,
+ rank);
debugf0("%s", msg);
+ /* FIXME: need support for channel mask */
+
/* Call the helper to output message */
- if (uncorrected_error)
- edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg);
- else
- edac_mc_handle_fbd_ce(mci, csrow, 0, msg);
+ edac_mc_handle_error(tp_event, mci,
+ m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
+ channel, dimm, -1,
+ optype, msg, m);
+ return;
+err_parsing:
+ edac_mc_handle_error(tp_event, mci, 0, 0, 0,
+ -1, -1, -1,
+ msg, "", m);
- kfree(msg);
}
/*
@@ -1669,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
__func__, mci, &sbridge_dev->pdev[0]->dev);
- mce_unregister_decode_chain(&sbridge_mce_dec);
-
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->dev);
@@ -1683,16 +1616,25 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
{
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct sbridge_pvt *pvt;
- int rc, channels, csrows;
+ int rc;
/* Check the number of active and not disabled channels */
- rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows);
+ rc = check_if_ecc_is_active(sbridge_dev->bus);
if (unlikely(rc < 0))
return rc;
/* allocate a new MC control structure */
- mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc);
+ layers[0].type = EDAC_MC_LAYER_CHANNEL;
+ layers[0].size = NUM_CHANNELS;
+ layers[0].is_virt_csrow = false;
+ layers[1].type = EDAC_MC_LAYER_SLOT;
+ layers[1].size = MAX_DIMMS;
+ layers[1].is_virt_csrow = true;
+ mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
+ sizeof(*pvt));
+
if (unlikely(!mci))
return -ENOMEM;
@@ -1738,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
goto fail0;
}
- mce_register_decode_chain(&sbridge_mce_dec);
return 0;
fail0:
@@ -1867,8 +1808,10 @@ static int __init sbridge_init(void)
pci_rc = pci_register_driver(&sbridge_driver);
- if (pci_rc >= 0)
+ if (pci_rc >= 0) {
+ mce_register_decode_chain(&sbridge_mce_dec);
return 0;
+ }
sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
pci_rc);
@@ -1884,6 +1827,7 @@ static void __exit sbridge_exit(void)
{
debugf2("MC: " __FILE__ ": %s()\n", __func__);
pci_unregister_driver(&sbridge_driver);
+ mce_unregister_decode_chain(&sbridge_mce_dec);
}
module_init(sbridge_init);
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index e99d009..7bb4614 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -71,7 +71,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
if (mem_error.sbe_count != priv->ce_count) {
dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
priv->ce_count = mem_error.sbe_count;
- edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name);
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, 0,
+ 0, 0, -1,
+ mci->ctl_name, "", NULL);
}
}
@@ -84,6 +87,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
struct csrow_info *csrow = &mci->csrows[0];
struct tile_edac_priv *priv = mci->pvt_info;
struct mshim_mem_info mem_info;
+ struct dimm_info *dimm = csrow->channels[0].dimm;
if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -93,27 +97,25 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
}
if (mem_info.mem_ecc)
- csrow->edac_mode = EDAC_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
else
- csrow->edac_mode = EDAC_NONE;
+ dimm->edac_mode = EDAC_NONE;
switch (mem_info.mem_type) {
case DDR2:
- csrow->mtype = MEM_DDR2;
+ dimm->mtype = MEM_DDR2;
break;
case DDR3:
- csrow->mtype = MEM_DDR3;
+ dimm->mtype = MEM_DDR3;
break;
default:
return -1;
}
- csrow->first_page = 0;
- csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
- csrow->last_page = csrow->first_page + csrow->nr_pages - 1;
- csrow->grain = TILE_EDAC_ERROR_GRAIN;
- csrow->dtype = DEV_UNKNOWN;
+ dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
+ dimm->grain = TILE_EDAC_ERROR_GRAIN;
+ dimm->dtype = DEV_UNKNOWN;
return 0;
}
@@ -123,6 +125,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
char hv_file[32];
int hv_devhdl;
struct mem_ctl_info *mci;
+ struct edac_mc_layer layers[2];
struct tile_edac_priv *priv;
int rc;
@@ -132,8 +135,14 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
return -EINVAL;
/* A TILE MC has a single channel and one chip-select row. */
- mci = edac_mc_alloc(sizeof(struct tile_edac_priv),
- TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = TILE_EDAC_NR_CSROWS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = TILE_EDAC_NR_CHANS;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
+ sizeof(struct tile_edac_priv));
if (mci == NULL)
return -ENOMEM;
priv = mci->pvt_info;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index a438297..1ac7962 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -215,19 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
return;
if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
- edac_mc_handle_ce_no_info(mci, "UE overwrote CE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
+ -1, -1, -1,
+ "UE overwrote CE", "", NULL);
info->errsts = info->errsts2;
}
for (channel = 0; channel < x38_channel_num; channel++) {
log = info->eccerrlog[channel];
if (log & X38_ECCERRLOG_UE) {
- edac_mc_handle_ue(mci, 0, 0,
- eccerrlog_row(channel, log), "x38 UE");
+ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
+ 0, 0, 0,
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "x38 UE", "", NULL);
} else if (log & X38_ECCERRLOG_CE) {
- edac_mc_handle_ce(mci, 0, 0,
- eccerrlog_syndrome(log),
- eccerrlog_row(channel, log), 0, "x38 CE");
+ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
+ 0, 0, eccerrlog_syndrome(log),
+ eccerrlog_row(channel, log),
+ -1, -1,
+ "x38 CE", "", NULL);
}
}
}
@@ -317,9 +324,9 @@ static unsigned long drb_to_nr_pages(
static int x38_probe1(struct pci_dev *pdev, int dev_idx)
{
int rc;
- int i;
+ int i, j;
struct mem_ctl_info *mci = NULL;
- unsigned long last_page;
+ struct edac_mc_layer layers[2];
u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
bool stacked;
void __iomem *window;
@@ -335,7 +342,13 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
how_many_channel(pdev);
/* FIXME: unconventional pvt_info usage */
- mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0);
+ layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+ layers[0].size = X38_RANKS;
+ layers[0].is_virt_csrow = true;
+ layers[1].type = EDAC_MC_LAYER_CHANNEL;
+ layers[1].size = x38_channel_num;
+ layers[1].is_virt_csrow = false;
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
if (!mci)
return -ENOMEM;
@@ -363,7 +376,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
* cumulative; the last one will contain the total memory
* contained in all ranks.
*/
- last_page = -1UL;
for (i = 0; i < mci->nr_csrows; i++) {
unsigned long nr_pages;
struct csrow_info *csrow = &mci->csrows[i];
@@ -372,20 +384,18 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
i / X38_RANKS_PER_CHANNEL,
i % X38_RANKS_PER_CHANNEL);
- if (nr_pages == 0) {
- csrow->mtype = MEM_EMPTY;
+ if (nr_pages == 0)
continue;
- }
- csrow->first_page = last_page + 1;
- last_page += nr_pages;
- csrow->last_page = last_page;
- csrow->nr_pages = nr_pages;
+ for (j = 0; j < x38_channel_num; j++) {
+ struct dimm_info *dimm = csrow->channels[j].dimm;
- csrow->grain = nr_pages << PAGE_SHIFT;
- csrow->mtype = MEM_DDR2;
- csrow->dtype = DEV_UNKNOWN;
- csrow->edac_mode = EDAC_UNKNOWN;
+ dimm->nr_pages = nr_pages / x38_channel_num;
+ dimm->grain = nr_pages << PAGE_SHIFT;
+ dimm->mtype = MEM_DDR2;
+ dimm->dtype = DEV_UNKNOWN;
+ dimm->edac_mode = EDAC_UNKNOWN;
+ }
}
x38_clear_error_info(mci);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 23416e4..a4ed30b 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = {
[5] = "Charge-downstream",
[6] = "MHL",
[7] = "Dock-desk",
- [7] = "Dock-card",
- [8] = "JIG",
+ [8] = "Dock-card",
+ [9] = "JIG",
NULL,
};
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
extcon_dev_unregister(info->edev);
+ kfree(info->edev);
kfree(info);
return 0;
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon_class.c
index f598a70..159aeb0 100644
--- a/drivers/extcon/extcon_class.c
+++ b/drivers/extcon/extcon_class.c
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
#if defined(CONFIG_ANDROID)
if (switch_class)
ret = class_compat_create_link(switch_class, edev->dev,
- dev);
+ NULL);
#endif /* CONFIG_ANDROID */
spin_lock_init(&edev->lock);
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe7a07b..8a0dcc1 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
if (ret < 0)
goto err_request_irq;
+ platform_set_drvdata(pdev, extcon_data);
/* Perform initial detection */
gpio_extcon_work(&extcon_data->work.work);
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
cancel_delayed_work_sync(&extcon_data->work);
+ free_irq(extcon_data->irq, extcon_data);
gpio_free(extcon_data->gpio);
extcon_dev_unregister(&extcon_data->edev);
devm_kfree(&pdev->dev, extcon_data);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0356099..542f0c0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -114,6 +114,14 @@ config GPIO_EP93XX
depends on ARCH_EP93XX
select GPIO_GENERIC
+config GPIO_MM_LANTIQ
+ bool "Lantiq Memory mapped GPIOs"
+ depends on LANTIQ && SOC_XWAY
+ help
+ This enables support for memory mapped GPIOs on the External Bus Unit
+ (EBU) found on Lantiq SoCs. The gpios are output only as they are
+ created by attaching a 16bit latch to the bus.
+
config GPIO_MPC5200
def_bool y
depends on PPC_MPC52xx
@@ -128,7 +136,7 @@ config GPIO_MPC8XXX
config GPIO_MSM_V1
tristate "Qualcomm MSM GPIO v1"
- depends on GPIOLIB && ARCH_MSM
+ depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
help
Say yes here to support the GPIO interface on ARM v6 based
Qualcomm MSM chips. Most of the pins on the MSM can be
@@ -358,6 +366,16 @@ config GPIO_STMPE
This enables support for the GPIOs found on the STMPE I/O
Expanders.
+config GPIO_STP_XWAY
+ bool "XWAY STP GPIOs"
+ depends on SOC_XWAY
+ help
+ This enables support for the Serial To Parallel (STP) unit found on
+ XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
+ that can be up to 24 bit. This peripheral is aimed at driving leds.
+ Some of the gpios/leds can be auto updated by the soc with dsl and
+ phy status.
+
config GPIO_TC3589X
bool "TC3589X GPIOs"
depends on MFD_TC3589X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index fde36e5..0f55662 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
@@ -54,6 +55,7 @@ obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
+obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 9e9947c..1077754 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -98,6 +98,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
return 0;
}
+EXPORT_SYMBOL(devm_gpio_request_one);
/**
* devm_gpio_free - free an interrupt
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
new file mode 100644
index 0000000..2983dfb
--- /dev/null
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -0,0 +1,158 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * By attaching hardware latches to the EBU it is possible to create output
+ * only gpios. This driver configures a special memory address, which when
+ * written to outputs 16 bit to the latches.
+ */
+
+#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
+#define LTQ_EBU_WP 0x80000000 /* write protect bit */
+
+struct ltq_mm {
+ struct of_mm_gpio_chip mmchip;
+ u16 shadow; /* shadow the latches state */
+};
+
+/**
+ * ltq_mm_apply() - write the shadow value to the ebu address.
+ * @chip: Pointer to our private data structure.
+ *
+ * Write the shadow value to the EBU to set the gpios. We need to set the
+ * global EBU lock to make sure that PCI/MTD dont break.
+ */
+static void ltq_mm_apply(struct ltq_mm *chip)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
+ __raw_writew(chip->shadow, chip->mmchip.regs);
+ ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+}
+
+/**
+ * ltq_mm_set() - gpio_chip->set - set gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_mm_apply.
+ */
+static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct ltq_mm *chip =
+ container_of(mm_gc, struct ltq_mm, mmchip);
+
+ if (value)
+ chip->shadow |= (1 << offset);
+ else
+ chip->shadow &= ~(1 << offset);
+ ltq_mm_apply(chip);
+}
+
+/**
+ * ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Same as ltq_mm_set, always returns 0.
+ */
+static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
+{
+ ltq_mm_set(gc, offset, value);
+
+ return 0;
+}
+
+/**
+ * ltq_mm_save_regs() - Set initial values of GPIO pins
+ * @mm_gc: pointer to memory mapped GPIO chip structure
+ */
+static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct ltq_mm *chip =
+ container_of(mm_gc, struct ltq_mm, mmchip);
+
+ /* tell the ebu controller which memory address we will be using */
+ ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
+
+ ltq_mm_apply(chip);
+}
+
+static int ltq_mm_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct ltq_mm *chip;
+ const __be32 *shadow;
+ int ret = 0;
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource\n");
+ return -ENOENT;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->mmchip.gc.ngpio = 16;
+ chip->mmchip.gc.label = "gpio-mm-ltq";
+ chip->mmchip.gc.direction_output = ltq_mm_dir_out;
+ chip->mmchip.gc.set = ltq_mm_set;
+ chip->mmchip.save_regs = ltq_mm_save_regs;
+
+ /* store the shadow value if one was passed by the devicetree */
+ shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+ if (shadow)
+ chip->shadow = be32_to_cpu(*shadow);
+
+ ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
+ if (ret)
+ kfree(chip);
+ return ret;
+}
+
+static const struct of_device_id ltq_mm_match[] = {
+ { .compatible = "lantiq,gpio-mm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mm_match);
+
+static struct platform_driver ltq_mm_driver = {
+ .probe = ltq_mm_probe,
+ .driver = {
+ .name = "gpio-mm-ltq",
+ .owner = THIS_MODULE,
+ .of_match_table = ltq_mm_match,
+ },
+};
+
+static int __init ltq_mm_init(void)
+{
+ return platform_driver_register(&ltq_mm_driver);
+}
+
+subsys_initcall(ltq_mm_init);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index c337143..04691d3 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -33,8 +34,6 @@
#include <asm-generic/bug.h>
#include <asm/mach/irq.h>
-#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
-
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
@@ -61,7 +60,7 @@ struct mxc_gpio_port {
void __iomem *base;
int irq;
int irq_high;
- int virtual_irq_start;
+ struct irq_domain *domain;
struct bgpio_chip bgc;
u32 both_edges;
};
@@ -144,14 +143,15 @@ static LIST_HEAD(mxc_gpio_ports);
static int gpio_set_irq_type(struct irq_data *d, u32 type)
{
- u32 gpio = irq_to_gpio(d->irq);
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
u32 bit, val;
+ u32 gpio_idx = d->hwirq;
+ u32 gpio = port->bgc.gc.base + gpio_idx;
int edge;
void __iomem *reg = port->base;
- port->both_edges &= ~(1 << (gpio & 31));
+ port->both_edges &= ~(1 << gpio_idx);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
edge = GPIO_INT_RISE_EDGE;
@@ -168,7 +168,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
edge = GPIO_INT_HIGH_LEV;
pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
}
- port->both_edges |= 1 << (gpio & 31);
+ port->both_edges |= 1 << gpio_idx;
break;
case IRQ_TYPE_LEVEL_LOW:
edge = GPIO_INT_LOW_LEV;
@@ -180,11 +180,11 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
return -EINVAL;
}
- reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
- bit = gpio & 0xf;
+ reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* ICR1 or ICR2 */
+ bit = gpio_idx & 0xf;
val = readl(reg) & ~(0x3 << (bit << 1));
writel(val | (edge << (bit << 1)), reg);
- writel(1 << (gpio & 0x1f), port->base + GPIO_ISR);
+ writel(1 << gpio_idx, port->base + GPIO_ISR);
return 0;
}
@@ -217,15 +217,13 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
/* handle 32 interrupts in one status register */
static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
{
- u32 gpio_irq_no_base = port->virtual_irq_start;
-
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
if (port->both_edges & (1 << irqoffset))
mxc_flip_edge(port, irqoffset);
- generic_handle_irq(gpio_irq_no_base + irqoffset);
+ generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
irq_stat &= ~(1 << irqoffset);
}
@@ -276,10 +274,9 @@ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
*/
static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
{
- u32 gpio = irq_to_gpio(d->irq);
- u32 gpio_idx = gpio & 0x1F;
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
+ u32 gpio_idx = d->hwirq;
if (enable) {
if (port->irq_high && (gpio_idx >= 16))
@@ -296,12 +293,12 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
return 0;
}
-static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port)
+static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- gc = irq_alloc_generic_chip("gpio-mxc", 1, port->virtual_irq_start,
+ gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base,
port->base, handle_level_irq);
gc->private = port;
@@ -352,7 +349,7 @@ static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
struct mxc_gpio_port *port =
container_of(bgc, struct mxc_gpio_port, bgc);
- return port->virtual_irq_start + offset;
+ return irq_find_mapping(port->domain, offset);
}
static int __devinit mxc_gpio_probe(struct platform_device *pdev)
@@ -360,6 +357,7 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
struct resource *iores;
+ int irq_base;
int err;
mxc_gpio_get_hw(pdev);
@@ -398,10 +396,12 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
writel(~0, port->base + GPIO_ISR);
if (mxc_gpio_hwtype == IMX21_GPIO) {
- /* setup one handler for all GPIO interrupts */
- if (pdev->id == 0)
- irq_set_chained_handler(port->irq,
- mx2_gpio_irq_handler);
+ /*
+ * Setup one handler for all GPIO interrupts. Actually setting
+ * the handler is needed only once, but doing it for every port
+ * is more robust and easier.
+ */
+ irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
} else {
/* setup one handler for each entry */
irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
@@ -430,20 +430,30 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
if (err)
goto out_bgpio_remove;
- /*
- * In dt case, we use gpio number range dynamically
- * allocated by gpio core.
- */
- port->virtual_irq_start = MXC_GPIO_IRQ_START + (np ? port->bgc.gc.base :
- pdev->id * 32);
+ irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
+ if (irq_base < 0) {
+ err = irq_base;
+ goto out_gpiochip_remove;
+ }
+
+ port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (!port->domain) {
+ err = -ENODEV;
+ goto out_irqdesc_free;
+ }
/* gpio-mxc can be a generic irq chip */
- mxc_gpio_init_gc(port);
+ mxc_gpio_init_gc(port, irq_base);
list_add_tail(&port->node, &mxc_gpio_ports);
return 0;
+out_irqdesc_free:
+ irq_free_descs(irq_base, 32);
+out_gpiochip_remove:
+ WARN_ON(gpiochip_remove(&port->bgc.gc) < 0);
out_bgpio_remove:
bgpio_remove(&port->bgc);
out_iounmap:
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index c4ed172..4fbc208 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -174,12 +174,22 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
if (bank->dbck_enable_mask && !bank->dbck_enabled) {
clk_enable(bank->dbck);
bank->dbck_enabled = true;
+
+ __raw_writel(bank->dbck_enable_mask,
+ bank->base + bank->regs->debounce_en);
}
}
static inline void _gpio_dbck_disable(struct gpio_bank *bank)
{
if (bank->dbck_enable_mask && bank->dbck_enabled) {
+ /*
+ * Disable debounce before cutting it's clock. If debounce is
+ * enabled but the clock is not, GPIO module seems to be unable
+ * to detect events and generate interrupts at least on OMAP3.
+ */
+ __raw_writel(0, bank->base + bank->regs->debounce_en);
+
clk_disable(bank->dbck);
bank->dbck_enabled = false;
}
@@ -1081,7 +1091,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
bank->is_mpuio = pdata->is_mpuio;
bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
bank->loses_context = pdata->loses_context;
- bank->get_context_loss_count = pdata->get_context_loss_count;
bank->regs = pdata->regs;
#ifdef CONFIG_OF_GPIO
bank->chip.of_node = of_node_get(node);
@@ -1135,6 +1144,9 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
omap_gpio_chip_init(bank);
omap_gpio_show_rev(bank);
+ if (bank->loses_context)
+ bank->get_context_loss_count = pdata->get_context_loss_count;
+
pm_runtime_put(bank->dev);
list_add_tail(&bank->node, &omap_gpio_list);
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 7bb0044..b6453d0 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void)
}
/* need to set base address for gpc4 */
- exonys5_gpios_1[11].base = gpio_base1 + 0x2E0;
+ exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
/* need to set base address for gpx */
chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 38416be..6064fb3 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -383,8 +383,9 @@ static int __devinit gsta_probe(struct platform_device *dev)
}
spin_lock_init(&chip->lock);
gsta_gpio_setup(chip);
- for (i = 0; i < GSTA_NR_GPIO; i++)
- gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+ if (gpio_pdata)
+ for (i = 0; i < GSTA_NR_GPIO; i++)
+ gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
/* 384 was used in previous code: be compatible for other drivers */
err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
new file mode 100644
index 0000000..e35096b
--- /dev/null
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -0,0 +1,301 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <lantiq_soc.h>
+
+/*
+ * The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
+ * peripheral controller used to drive external shift register cascades. At most
+ * 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
+ * to drive the 2 LSBs of the cascade automatically.
+ */
+
+/* control register 0 */
+#define XWAY_STP_CON0 0x00
+/* control register 1 */
+#define XWAY_STP_CON1 0x04
+/* data register 0 */
+#define XWAY_STP_CPU0 0x08
+/* data register 1 */
+#define XWAY_STP_CPU1 0x0C
+/* access register */
+#define XWAY_STP_AR 0x10
+
+/* software or hardware update select bit */
+#define XWAY_STP_CON_SWU BIT(31)
+
+/* automatic update rates */
+#define XWAY_STP_2HZ 0
+#define XWAY_STP_4HZ BIT(23)
+#define XWAY_STP_8HZ BIT(24)
+#define XWAY_STP_10HZ (BIT(24) | BIT(23))
+#define XWAY_STP_SPEED_MASK (0xf << 23)
+
+/* clock source for automatic update */
+#define XWAY_STP_UPD_FPI BIT(31)
+#define XWAY_STP_UPD_MASK (BIT(31) | BIT(30))
+
+/* let the adsl core drive the 2 LSBs */
+#define XWAY_STP_ADSL_SHIFT 24
+#define XWAY_STP_ADSL_MASK 0x3
+
+/* 2 groups of 3 bits can be driven by the phys */
+#define XWAY_STP_PHY_MASK 0x3
+#define XWAY_STP_PHY1_SHIFT 27
+#define XWAY_STP_PHY2_SHIFT 15
+
+/* STP has 3 groups of 8 bits */
+#define XWAY_STP_GROUP0 BIT(0)
+#define XWAY_STP_GROUP1 BIT(1)
+#define XWAY_STP_GROUP2 BIT(2)
+#define XWAY_STP_GROUP_MASK (0x7)
+
+/* Edge configuration bits */
+#define XWAY_STP_FALLING BIT(26)
+#define XWAY_STP_EDGE_MASK BIT(26)
+
+#define xway_stp_r32(m, reg) __raw_readl(m + reg)
+#define xway_stp_w32(m, val, reg) __raw_writel(val, m + reg)
+#define xway_stp_w32_mask(m, clear, set, reg) \
+ ltq_w32((ltq_r32(m + reg) & ~(clear)) | (set), \
+ m + reg)
+
+struct xway_stp {
+ struct gpio_chip gc;
+ void __iomem *virt;
+ u32 edge; /* rising or falling edge triggered shift register */
+ u16 shadow; /* shadow the shift registers state */
+ u8 groups; /* we can drive 1-3 groups of 8bit each */
+ u8 dsl; /* the 2 LSBs can be driven by the dsl core */
+ u8 phy1; /* 3 bits can be driven by phy1 */
+ u8 phy2; /* 3 bits can be driven by phy2 */
+ u8 reserved; /* mask out the hw driven bits in gpio_request */
+};
+
+/**
+ * xway_stp_set() - gpio_chip->set - set gpios.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Set the shadow value and call ltq_ebu_apply.
+ */
+static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ struct xway_stp *chip =
+ container_of(gc, struct xway_stp, gc);
+
+ if (val)
+ chip->shadow |= BIT(gpio);
+ else
+ chip->shadow &= ~BIT(gpio);
+ xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
+ xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+}
+
+/**
+ * xway_stp_dir_out() - gpio_chip->dir_out - set gpio direction.
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ * @val: Value to be written to specified signal.
+ *
+ * Same as xway_stp_set, always returns 0.
+ */
+static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
+{
+ xway_stp_set(gc, gpio, val);
+
+ return 0;
+}
+
+/**
+ * xway_stp_request() - gpio_chip->request
+ * @gc: Pointer to gpio_chip device structure.
+ * @gpio: GPIO signal number.
+ *
+ * We mask out the HW driven pins
+ */
+static int xway_stp_request(struct gpio_chip *gc, unsigned gpio)
+{
+ struct xway_stp *chip =
+ container_of(gc, struct xway_stp, gc);
+
+ if ((gpio < 8) && (chip->reserved & BIT(gpio))) {
+ dev_err(gc->dev, "GPIO %d is driven by hardware\n", gpio);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * xway_stp_hw_init() - Configure the STP unit and enable the clock gate
+ * @virt: pointer to the remapped register range
+ */
+static int xway_stp_hw_init(struct xway_stp *chip)
+{
+ /* sane defaults */
+ xway_stp_w32(chip->virt, 0, XWAY_STP_AR);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CPU0);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CPU1);
+ xway_stp_w32(chip->virt, XWAY_STP_CON_SWU, XWAY_STP_CON0);
+ xway_stp_w32(chip->virt, 0, XWAY_STP_CON1);
+
+ /* apply edge trigger settings for the shift register */
+ xway_stp_w32_mask(chip->virt, XWAY_STP_EDGE_MASK,
+ chip->edge, XWAY_STP_CON0);
+
+ /* apply led group settings */
+ xway_stp_w32_mask(chip->virt, XWAY_STP_GROUP_MASK,
+ chip->groups, XWAY_STP_CON1);
+
+ /* tell the hardware which pins are controlled by the dsl modem */
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_ADSL_MASK << XWAY_STP_ADSL_SHIFT,
+ chip->dsl << XWAY_STP_ADSL_SHIFT,
+ XWAY_STP_CON0);
+
+ /* tell the hardware which pins are controlled by the phys */
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY1_SHIFT,
+ chip->phy1 << XWAY_STP_PHY1_SHIFT,
+ XWAY_STP_CON0);
+ xway_stp_w32_mask(chip->virt,
+ XWAY_STP_PHY_MASK << XWAY_STP_PHY2_SHIFT,
+ chip->phy2 << XWAY_STP_PHY2_SHIFT,
+ XWAY_STP_CON1);
+
+ /* mask out the hw driven bits in gpio_request */
+ chip->reserved = (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl;
+
+ /*
+ * if we have pins that are driven by hw, we need to tell the stp what
+ * clock to use as a timer.
+ */
+ if (chip->reserved)
+ xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
+ XWAY_STP_UPD_FPI, XWAY_STP_CON1);
+
+ return 0;
+}
+
+static int __devinit xway_stp_probe(struct platform_device *pdev)
+{
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ const __be32 *shadow, *groups, *dsl, *phy;
+ struct xway_stp *chip;
+ struct clk *clk;
+ int ret = 0;
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to request STP resource\n");
+ return -ENOENT;
+ }
+
+ chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->virt = devm_request_and_ioremap(&pdev->dev, res);
+ if (!chip->virt) {
+ dev_err(&pdev->dev, "failed to remap STP memory\n");
+ return -ENOMEM;
+ }
+ chip->gc.dev = &pdev->dev;
+ chip->gc.label = "stp-xway";
+ chip->gc.direction_output = xway_stp_dir_out;
+ chip->gc.set = xway_stp_set;
+ chip->gc.request = xway_stp_request;
+ chip->gc.base = -1;
+ chip->gc.owner = THIS_MODULE;
+
+ /* store the shadow value if one was passed by the devicetree */
+ shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
+ if (shadow)
+ chip->shadow = be32_to_cpu(*shadow);
+
+ /* find out which gpio groups should be enabled */
+ groups = of_get_property(pdev->dev.of_node, "lantiq,groups", NULL);
+ if (groups)
+ chip->groups = be32_to_cpu(*groups) & XWAY_STP_GROUP_MASK;
+ else
+ chip->groups = XWAY_STP_GROUP0;
+ chip->gc.ngpio = fls(chip->groups) * 8;
+
+ /* find out which gpios are controlled by the dsl core */
+ dsl = of_get_property(pdev->dev.of_node, "lantiq,dsl", NULL);
+ if (dsl)
+ chip->dsl = be32_to_cpu(*dsl) & XWAY_STP_ADSL_MASK;
+
+ /* find out which gpios are controlled by the phys */
+ if (of_machine_is_compatible("lantiq,ar9") ||
+ of_machine_is_compatible("lantiq,gr9") ||
+ of_machine_is_compatible("lantiq,vr9")) {
+ phy = of_get_property(pdev->dev.of_node, "lantiq,phy1", NULL);
+ if (phy)
+ chip->phy1 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+ phy = of_get_property(pdev->dev.of_node, "lantiq,phy2", NULL);
+ if (phy)
+ chip->phy2 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
+ }
+
+ /* check which edge trigger we should use, default to a falling edge */
+ if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
+ chip->edge = XWAY_STP_FALLING;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+ clk_enable(clk);
+
+ ret = xway_stp_hw_init(chip);
+ if (!ret)
+ ret = gpiochip_add(&chip->gc);
+
+ if (!ret)
+ dev_info(&pdev->dev, "Init done\n");
+
+ return ret;
+}
+
+static const struct of_device_id xway_stp_match[] = {
+ { .compatible = "lantiq,gpio-stp-xway" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xway_stp_match);
+
+static struct platform_driver xway_stp_driver = {
+ .probe = xway_stp_probe,
+ .driver = {
+ .name = "gpio-stp-xway",
+ .owner = THIS_MODULE,
+ .of_match_table = xway_stp_match,
+ },
+};
+
+int __init xway_stp_init(void)
+{
+ return platform_driver_register(&xway_stp_driver);
+}
+
+subsys_initcall(xway_stp_init);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index c1ad288..11f29c8 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -149,6 +149,9 @@ static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
tps65910_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
+#endif
if (pdata && pdata->gpio_base)
tps65910_gpio->gpio_chip.base = pdata->gpio_base;
else
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 92ea535..aa61ad2 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+ if (value)
+ value = WM8994_GPN_LVL;
+
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
- WM8994_GPN_DIR, 0);
+ WM8994_GPN_DIR | WM8994_GPN_LVL, value);
}
static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index d703823..7053140 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{0,}
};
+
+static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
+ kfree(ap);
+}
+
static int __devinit
cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ cirrus_kick_out_firmware_fb(pdev);
+
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 21bdfa8..64ea597 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -145,7 +145,7 @@ struct cirrus_device {
struct ttm_bo_device bdev;
atomic_t validate_sequence;
} ttm;
-
+ bool mm_inited;
};
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ebcd11..50e170f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
pci_resource_len(dev->pdev, 0),
DRM_MTRR_WC);
+ cirrus->mm_inited = true;
return 0;
}
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
struct drm_device *dev = cirrus->dev;
+
+ if (!cirrus->mm_inited)
+ return;
+
ttm_bo_device_release(&cirrus->ttm.bdev);
cirrus_ttm_global_release(cirrus);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 92cea9d..08a7aa7 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2116,7 +2116,7 @@ out:
return ret;
}
-static int format_check(struct drm_mode_fb_cmd2 *r)
+static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
@@ -2185,7 +2185,7 @@ static int format_check(struct drm_mode_fb_cmd2 *r)
}
}
-static int framebuffer_check(struct drm_mode_fb_cmd2 *r)
+static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
int ret, hsub, vsub, num_planes, i;
@@ -3126,7 +3126,7 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
static bool drm_property_change_is_valid(struct drm_property *property,
- __u64 value)
+ uint64_t value)
{
if (property->flags & DRM_MODE_PROP_IMMUTABLE)
return false;
@@ -3136,7 +3136,7 @@ static bool drm_property_change_is_valid(struct drm_property *property,
return true;
} else if (property->flags & DRM_MODE_PROP_BITMASK) {
int i;
- __u64 valid_mask = 0;
+ uint64_t valid_mask = 0;
for (i = 0; i < property->num_values; i++)
valid_mask |= (1ULL << property->values[i]);
return !(value & ~valid_mask);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 608bddf..a8743c3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
-#include <linux/export.h>
+#include <linux/module.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_edid_modes.h"
@@ -66,6 +66,8 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+/* Force reduced-blanking timings for detailed modes */
+#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -120,6 +122,9 @@ static struct edid_quirk {
/* Samsung SyncMaster 22[5-6]BW */
{ "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
{ "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+
+ /* ViewSonic VA2026w */
+ { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
/*** DDC fetch and block validation ***/
@@ -144,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
}
EXPORT_SYMBOL(drm_edid_header_is_valid);
+static int edid_fixup __read_mostly = 6;
+module_param_named(edid_fixup, edid_fixup, int, 0400);
+MODULE_PARM_DESC(edid_fixup,
+ "Minimum number of valid EDID header bytes (0-8, default 6)");
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
@@ -155,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
u8 csum = 0;
struct edid *edid = (struct edid *)raw_edid;
+ if (edid_fixup > 8 || edid_fixup < 0)
+ edid_fixup = 6;
+
if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
if (score == 8) ;
- else if (score >= 6) {
+ else if (score >= edid_fixup) {
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
@@ -598,7 +610,7 @@ static bool
drm_monitor_supports_rb(struct edid *edid)
{
if (edid->revision >= 4) {
- bool ret;
+ bool ret = false;
drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
return ret;
}
@@ -885,12 +897,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
"Wrong Hsync/Vsync pulse width\n");
return NULL;
}
+
+ if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
+ goto set_size;
+ }
+
mode = drm_mode_create(dev);
if (!mode)
return NULL;
- mode->type = DRM_MODE_TYPE_DRIVER;
-
if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
timing->pixel_clock = cpu_to_le16(1088);
@@ -914,8 +933,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
drm_mode_do_interlace_quirk(mode, pt);
- drm_mode_set_name(mode);
-
if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
pt->misc |= DRM_EDID_PT_HSYNC_POSITIVE | DRM_EDID_PT_VSYNC_POSITIVE;
}
@@ -925,6 +942,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
@@ -938,6 +956,9 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->height_mm = edid->height_cm * 10;
}
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ drm_mode_set_name(mode);
+
return mode;
}
@@ -1018,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
return true;
}
+static bool valid_inferred_mode(const struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+ bool ok = false;
+
+ list_for_each_entry(m, &connector->probed_modes, head) {
+ if (mode->hdisplay == m->hdisplay &&
+ mode->vdisplay == m->vdisplay &&
+ drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+ return false; /* duplicated */
+ if (mode->hdisplay <= m->hdisplay &&
+ mode->vdisplay <= m->vdisplay)
+ ok = true;
+ }
+ return ok;
+}
+
static int
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
@@ -1027,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -1067,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
fixup_mode_1366x768(newmode);
- if (!mode_in_range(newmode, edid, timing)) {
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
@@ -1095,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
fixup_mode_1366x768(newmode);
- if (!mode_in_range(newmode, edid, timing)) {
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 4209531..d6de2e07f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = {
};
static struct drm_driver exynos_drm_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
- DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
+ DRIVER_GEM | DRIVER_PRIME,
.load = exynos_drm_load,
.unload = exynos_drm_unload,
.open = exynos_drm_open,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 6e9ac7b..23d5ad3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
manager_ops->commit(manager->dev);
}
-static struct drm_crtc *
-exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
-{
- return encoder->crtc;
-}
-
static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
.dpms = exynos_drm_encoder_dpms,
.mode_fixup = exynos_drm_encoder_mode_fixup,
.mode_set = exynos_drm_encoder_mode_set,
.prepare = exynos_drm_encoder_prepare,
.commit = exynos_drm_encoder_commit,
- .get_crtc = exynos_drm_encoder_get_crtc,
};
static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index f82a299..4ccfe43 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -51,11 +51,22 @@ struct exynos_drm_fb {
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+ unsigned int i;
DRM_DEBUG_KMS("%s\n", __FILE__);
drm_framebuffer_cleanup(fb);
+ for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
+ struct drm_gem_object *obj;
+
+ if (exynos_fb->exynos_gem_obj[i] == NULL)
+ continue;
+
+ obj = &exynos_fb->exynos_gem_obj[i]->base;
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
kfree(exynos_fb);
exynos_fb = NULL;
}
@@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- drm_gem_object_unreference_unlocked(obj);
-
fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
- if (IS_ERR(fb))
+ if (IS_ERR(fb)) {
+ drm_gem_object_unreference_unlocked(obj);
return fb;
+ }
exynos_fb = to_exynos_fb(fb);
nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
- drm_gem_object_unreference_unlocked(obj);
-
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 3ecb30d..5082375 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -31,10 +31,10 @@
static inline int exynos_drm_format_num_buffers(uint32_t format)
{
switch (format) {
- case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12:
case DRM_FORMAT_NV12MT:
return 2;
- case DRM_FORMAT_YUV420M:
+ case DRM_FORMAT_YUV420:
return 3;
default:
return 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fc91293..5c8b683 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
int ret = 0;
@@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- exynos_gem_obj = to_exynos_gem_obj(obj);
-
- if (!exynos_gem_obj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+ if (!obj->map_list.map) {
+ ret = drm_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 68ef010..e2147a2 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
switch (win_data->pixel_format) {
case DRM_FORMAT_NV12MT:
tiled_mode = true;
- case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12:
crcb_mode = false;
buf_num = 2;
break;
@@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx)
mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
/* setting graphical layers */
-
val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
/* the same configuration for both layers */
mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
-
- val |= MXR_GRP_CFG_BLEND_PRE_MUL;
- val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+ /* setting video layers */
+ val = MXR_GRP_CFG_ALPHA_VAL(0);
+ mixer_reg_write(res, MXR_VIDEO_CFG, val);
+
/* configuration of Video Processor Registers */
vp_win_reset(ctx);
vp_default_filter(res);
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 9764045..b7e7b49 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -78,21 +78,6 @@ static int cdv_backlight_combination_mode(struct drm_device *dev)
return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
}
-static int cdv_get_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(bd);
- u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-
- if (cdv_backlight_combination_mode(dev)) {
- u8 lbpc;
-
- val &= ~1;
- pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
- val *= lbpc;
- }
- return val;
-}
-
static u32 cdv_get_max_backlight(struct drm_device *dev)
{
u32 max = REG_READ(BLC_PWM_CTL);
@@ -110,6 +95,22 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
return max;
}
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+
+ if (cdv_backlight_combination_mode(dev)) {
+ u8 lbpc;
+
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ val *= lbpc;
+ }
+ return (val * 100)/cdv_get_max_backlight(dev);
+
+}
+
static int cdv_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
@@ -120,6 +121,9 @@ static int cdv_set_brightness(struct backlight_device *bd)
if (level < 1)
level = 1;
+ level *= cdv_get_max_backlight(dev);
+ level /= 100;
+
if (cdv_backlight_combination_mode(dev)) {
u32 max = cdv_get_max_backlight(dev);
u8 lbpc;
@@ -157,7 +161,6 @@ static int cdv_backlight_init(struct drm_device *dev)
cdv_backlight_device->props.brightness =
cdv_get_brightness(cdv_backlight_device);
- cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
backlight_update_status(cdv_backlight_device);
dev_priv->backlight_device = cdv_backlight_device;
return 0;
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 4f186ec..c430bd4 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -144,6 +144,8 @@ struct opregion_asle {
#define ASLE_CBLV_VALID (1<<31)
+static struct psb_intel_opregion *system_opregion;
+
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -205,7 +207,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
- if (asle) {
+ if (asle && system_opregion ) {
/* Don't do this on Medfield or other non PC like devices, they
use the bit for something different altogether */
psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
@@ -221,7 +223,6 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
-static struct psb_intel_opregion *system_opregion;
static int psb_intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
@@ -266,9 +267,6 @@ void psb_intel_opregion_init(struct drm_device *dev)
system_opregion = opregion;
register_acpi_notifier(&psb_intel_opregion_notifier);
}
-
- if (opregion->asle)
- psb_intel_opregion_enable_asle(dev);
}
void psb_intel_opregion_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/opregion.h b/drivers/gpu/drm/gma500/opregion.h
index 72dc6b9..4a90f8b 100644
--- a/drivers/gpu/drm/gma500/opregion.h
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -27,6 +27,7 @@ extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
extern void psb_intel_opregion_init(struct drm_device *dev);
extern void psb_intel_opregion_fini(struct drm_device *dev);
extern int psb_intel_opregion_setup(struct drm_device *dev);
+extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
#else
@@ -46,4 +47,8 @@ extern inline int psb_intel_opregion_setup(struct drm_device *dev)
{
return 0;
}
+
+extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+}
#endif
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index eff039b..5971bc8 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -144,6 +144,10 @@ static int psb_backlight_init(struct drm_device *dev)
psb_backlight_device->props.max_brightness = 100;
backlight_update_status(psb_backlight_device);
dev_priv->backlight_device = psb_backlight_device;
+
+ /* This must occur after the backlight is properly initialised */
+ psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -354,13 +358,6 @@ static int psb_chip_setup(struct drm_device *dev)
return 0;
}
-/* Not exactly an erratum more an irritation */
-static void psb_chip_errata(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- psb_lid_timer_init(dev_priv);
-}
-
static void psb_chip_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -379,7 +376,6 @@ const struct psb_ops psb_chip_ops = {
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
- .errata = psb_chip_errata,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index caba6e0..a8858a9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -374,6 +374,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
return ret;
+ psb_intel_opregion_enable_asle(dev);
#if 0
/*enable runtime pm at last*/
pm_runtime_enable(&dev->pdev->dev);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index f920fb5..fa94391 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
return -EINVAL;
/* This is all entirely broken */
- down_write(&current->mm->mmap_sem);
old_fops = file_priv->filp->f_op;
file_priv->filp->f_op = &i810_buffer_fops;
dev_priv->mmap_buffer = buf;
- buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
+ buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
PROT_READ | PROT_WRITE,
MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
retcode = PTR_ERR(buf_priv->virtual);
buf_priv->virtual = NULL;
}
- up_write(&current->mm->mmap_sem);
return retcode;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index eb2b3c2..5363e9c 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2032,6 +2032,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor);
+ drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
+ 1, minor);
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f947926..36822b9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
}
}
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+ struct apertures_struct *ap;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
+ bool primary;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = dev_priv->dev->agp->base;
+ ap->ranges[0].size =
+ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ primary =
+ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+ remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+ kfree(ap);
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto put_bridge;
+ }
+
+ i915_kick_out_firmware_fb(dev_priv);
+
pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto out_rmmap;
- }
-
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_mapping =
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 238a521..9fe9ebe 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_sandybridge_m_info = {
@@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_ivybridge_d_info = {
@@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_ivybridge_m_info = {
@@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_valleyview_m_info = {
@@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct intel_device_info intel_haswell_m_info = {
@@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = {
.has_blt_ring = 1,
.has_llc = 1,
.has_pch_split = 1,
+ .has_force_wake = 1,
};
static const struct pci_device_id pciidlist[] = { /* aka */
@@ -1139,10 +1145,9 @@ MODULE_LICENSE("GPL and additional rights");
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE)) && \
- (!IS_VALLEYVIEW((dev_priv)->dev))
+ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 377c21f..b0b676a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -285,6 +285,7 @@ struct intel_device_info {
u8 is_ivybridge:1;
u8 is_valleyview:1;
u8 has_pch_split:1;
+ u8 has_force_wake:1;
u8 is_haswell:1;
u8 has_fbc:1;
u8 has_pipe_cxsr:1;
@@ -942,6 +943,9 @@ struct drm_i915_gem_object {
/* prime dma-buf support */
struct sg_table *sg_table;
+ void *dma_buf_vmapping;
+ int vmapping_count;
+
/**
* Used for performing relocations during execbuffer insertion.
*/
@@ -1098,6 +1102,8 @@ struct drm_i915_file_private {
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
+
#include "i915_trace.h"
/**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c1e5c66..288d7b8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2063,10 +2063,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (obj->gtt_space == NULL)
return 0;
- if (obj->pin_count != 0) {
- DRM_ERROR("Attempting to unbind pinned buffer\n");
- return -EINVAL;
- }
+ if (obj->pin_count)
+ return -EBUSY;
ret = i915_gem_object_finish_gpu(obj);
if (ret)
@@ -3293,6 +3291,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct address_space *mapping;
+ u32 mask;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3303,8 +3302,15 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
+ if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
+ /* 965gm cannot relocate objects above 4GiB. */
+ mask &= ~__GFP_HIGHMEM;
+ mask |= __GFP_DMA32;
+ }
+
mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ mapping_set_gfp_mask(mapping, mask);
i915_gem_info_add_obj(dev_priv, size);
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 8e26917..aa308e1 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -74,6 +74,59 @@ static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
}
}
+static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (obj->dma_buf_vmapping) {
+ obj->vmapping_count++;
+ goto out_unlock;
+ }
+
+ if (!obj->pages) {
+ ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ }
+
+ obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
+ if (!obj->dma_buf_vmapping) {
+ DRM_ERROR("failed to vmap object\n");
+ goto out_unlock;
+ }
+
+ obj->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return obj->dma_buf_vmapping;
+}
+
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_device *dev = obj->base.dev;
+ int ret;
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return;
+
+ --obj->vmapping_count;
+ if (obj->vmapping_count == 0) {
+ vunmap(obj->dma_buf_vmapping);
+ obj->dma_buf_vmapping = NULL;
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
{
return NULL;
@@ -93,6 +146,11 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
}
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -101,6 +159,9 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
.kunmap = i915_gem_dmabuf_kunmap,
.kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+ .mmap = i915_gem_dmabuf_mmap,
+ .vmap = i915_gem_dmabuf_vmap,
+ .vunmap = i915_gem_dmabuf_vunmap,
};
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index cc4a633..ed3224c 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -350,8 +350,8 @@ static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps_work);
- u8 new_delay = dev_priv->cur_delay;
u32 pm_iir, pm_imr;
+ u8 new_delay;
spin_lock_irq(&dev_priv->rps_lock);
pm_iir = dev_priv->pm_iir;
@@ -360,41 +360,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
I915_WRITE(GEN6_PMIMR, 0);
spin_unlock_irq(&dev_priv->rps_lock);
- if (!pm_iir)
+ if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
mutex_lock(&dev_priv->dev->struct_mutex);
- if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
- if (dev_priv->cur_delay != dev_priv->max_delay)
- new_delay = dev_priv->cur_delay + 1;
- if (new_delay > dev_priv->max_delay)
- new_delay = dev_priv->max_delay;
- } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
- gen6_gt_force_wake_get(dev_priv);
- if (dev_priv->cur_delay != dev_priv->min_delay)
- new_delay = dev_priv->cur_delay - 1;
- if (new_delay < dev_priv->min_delay) {
- new_delay = dev_priv->min_delay;
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
- ((new_delay << 16) & 0x3f0000));
- } else {
- /* Make sure we continue to get down interrupts
- * until we hit the minimum frequency */
- I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
- }
- gen6_gt_force_wake_put(dev_priv);
- }
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
+ new_delay = dev_priv->cur_delay + 1;
+ else
+ new_delay = dev_priv->cur_delay - 1;
gen6_set_rps(dev_priv->dev, new_delay);
- dev_priv->cur_delay = new_delay;
- /*
- * rps_lock not held here because clearing is non-destructive. There is
- * an *extremely* unlikely race with gen6_rps_enable() that is prevented
- * by holding struct_mutex for the duration of the write.
- */
mutex_unlock(&dev_priv->dev->struct_mutex);
}
@@ -435,7 +412,6 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
*/
spin_lock_irqsave(&dev_priv->rps_lock, flags);
- WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
dev_priv->pm_iir |= pm_iir;
I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
POSTING_READ(GEN6_PMIMR);
@@ -533,7 +509,7 @@ out:
return ret;
}
-static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
+static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -573,6 +549,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
+static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ int pipe;
+
+ if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
+ DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+ (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
+ SDE_AUDIO_POWER_SHIFT_CPT);
+
+ if (pch_iir & SDE_AUX_MASK_CPT)
+ DRM_DEBUG_DRIVER("AUX channel interrupt\n");
+
+ if (pch_iir & SDE_GMBUS_CPT)
+ DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
+ DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+
+ if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
+ DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+
+ if (pch_iir & SDE_FDI_MASK_CPT)
+ for_each_pipe(pipe)
+ DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+}
+
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -614,7 +619,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev, pch_iir);
+ cpt_irq_handler(dev, pch_iir);
/* clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
@@ -707,7 +712,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
- pch_irq_handler(dev, pch_iir);
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
}
if (de_iir & DE_PCU_EVENT) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2d49b95..48d5e8e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -210,6 +210,14 @@
#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
+/* IVB has funny definitions for which plane to flip. */
+#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
+#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
+#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
+
#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
#define MI_MM_SPACE_GTT (1<<8)
#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -3313,7 +3321,7 @@
/* PCH */
-/* south display engine interrupt */
+/* south display engine interrupt: IBX */
#define SDE_AUDIO_POWER_D (1 << 27)
#define SDE_AUDIO_POWER_C (1 << 26)
#define SDE_AUDIO_POWER_B (1 << 25)
@@ -3349,15 +3357,44 @@
#define SDE_TRANSA_CRC_ERR (1 << 1)
#define SDE_TRANSA_FIFO_UNDER (1 << 0)
#define SDE_TRANS_MASK (0x3f)
-/* CPT */
-#define SDE_CRT_HOTPLUG_CPT (1 << 19)
+
+/* south display engine interrupt: CPT/PPT */
+#define SDE_AUDIO_POWER_D_CPT (1 << 31)
+#define SDE_AUDIO_POWER_C_CPT (1 << 30)
+#define SDE_AUDIO_POWER_B_CPT (1 << 29)
+#define SDE_AUDIO_POWER_SHIFT_CPT 29
+#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
+#define SDE_AUXD_CPT (1 << 27)
+#define SDE_AUXC_CPT (1 << 26)
+#define SDE_AUXB_CPT (1 << 25)
+#define SDE_AUX_MASK_CPT (7 << 25)
#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
+#define SDE_CRT_HOTPLUG_CPT (1 << 19)
#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
SDE_PORTD_HOTPLUG_CPT | \
SDE_PORTC_HOTPLUG_CPT | \
SDE_PORTB_HOTPLUG_CPT)
+#define SDE_GMBUS_CPT (1 << 17)
+#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
+#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
+#define SDE_FDI_RXC_CPT (1 << 8)
+#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
+#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
+#define SDE_FDI_RXB_CPT (1 << 4)
+#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
+#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
+#define SDE_FDI_RXA_CPT (1 << 0)
+#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
+ SDE_AUDIO_CP_REQ_B_CPT | \
+ SDE_AUDIO_CP_REQ_A_CPT)
+#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
+ SDE_AUDIO_CP_CHG_B_CPT | \
+ SDE_AUDIO_CP_CHG_A_CPT)
+#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
+ SDE_FDI_RXB_CPT | \
+ SDE_FDI_RXA_CPT)
#define SDEISR 0xc4000
#define SDEIMR 0xc4004
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 0ede02a..a748e5c 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -740,8 +740,11 @@ static void i915_restore_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
+ /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
+ * otherwise we get blank eDP screen after S3 on some machines
+ */
I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2);
+ I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ee61ad1..a8538ac 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -910,9 +910,10 @@ static void assert_pll(struct drm_i915_private *dev_priv,
/* For ILK+ */
static void assert_pch_pll(struct drm_i915_private *dev_priv,
- struct intel_crtc *intel_crtc, bool state)
+ struct intel_pch_pll *pll,
+ struct intel_crtc *crtc,
+ bool state)
{
- int reg;
u32 val;
bool cur_state;
@@ -921,30 +922,37 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv,
return;
}
- if (!intel_crtc->pch_pll) {
- WARN(1, "asserting PCH PLL enabled with no PLL\n");
+ if (WARN (!pll,
+ "asserting PCH PLL %s with no PLL\n", state_string(state)))
return;
- }
- if (HAS_PCH_CPT(dev_priv->dev)) {
+ val = I915_READ(pll->pll_reg);
+ cur_state = !!(val & DPLL_VCO_ENABLE);
+ WARN(cur_state != state,
+ "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
+ pll->pll_reg, state_string(state), state_string(cur_state), val);
+
+ /* Make sure the selected PLL is correctly attached to the transcoder */
+ if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
u32 pch_dpll;
pch_dpll = I915_READ(PCH_DPLL_SEL);
-
- /* Make sure the selected PLL is enabled to the transcoder */
- WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
- "transcoder %d PLL not enabled\n", intel_crtc->pipe);
+ cur_state = pll->pll_reg == _PCH_DPLL_B;
+ if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
+ "PLL[%d] not attached to this transcoder %d: %08x\n",
+ cur_state, crtc->pipe, pch_dpll)) {
+ cur_state = !!(val >> (4*crtc->pipe + 3));
+ WARN(cur_state != state,
+ "PLL[%d] not %s on this transcoder %d: %08x\n",
+ pll->pll_reg == _PCH_DPLL_B,
+ state_string(state),
+ crtc->pipe,
+ val);
+ }
}
-
- reg = intel_crtc->pch_pll->pll_reg;
- val = I915_READ(reg);
- cur_state = !!(val & DPLL_VCO_ENABLE);
- WARN(cur_state != state,
- "PCH PLL state assertion failure (expected %s, current %s)\n",
- state_string(state), state_string(cur_state));
}
-#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
-#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
+#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
+#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -1424,7 +1432,7 @@ static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
assert_pch_refclk_enabled(dev_priv);
if (pll->active++ && pll->on) {
- assert_pch_pll_enabled(dev_priv, intel_crtc);
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
@@ -1460,12 +1468,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
intel_crtc->base.base.id);
if (WARN_ON(pll->active == 0)) {
- assert_pch_pll_disabled(dev_priv, intel_crtc);
+ assert_pch_pll_disabled(dev_priv, pll, NULL);
return;
}
if (--pll->active) {
- assert_pch_pll_enabled(dev_priv, intel_crtc);
+ assert_pch_pll_enabled(dev_priv, pll, NULL);
return;
}
@@ -1495,7 +1503,9 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
BUG_ON(dev_priv->info->gen < 5);
/* Make sure PCH DPLL is enabled */
- assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
+ assert_pch_pll_enabled(dev_priv,
+ to_intel_crtc(crtc)->pch_pll,
+ to_intel_crtc(crtc));
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, pipe);
@@ -6148,17 +6158,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ uint32_t plane_bit = 0;
int ret;
ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
if (ret)
goto err;
+ switch(intel_crtc->plane) {
+ case PLANE_A:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
+ break;
+ case PLANE_B:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
+ break;
+ case PLANE_C:
+ plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
+ break;
+ default:
+ WARN_ONCE(1, "unknown plane in flip command\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
ret = intel_ring_begin(ring, 4);
if (ret)
goto err_unpin;
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
@@ -6531,7 +6558,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(HDMIC) & PORT_DETECTED)
intel_hdmi_init(dev, HDMIC);
- if (I915_READ(HDMID) & PORT_DETECTED)
+ if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
intel_hdmi_init(dev, HDMID);
if (I915_READ(PCH_DP_C) & DP_DETECTED)
@@ -6894,19 +6921,6 @@ static void i915_disable_vga(struct drm_device *dev)
POSTING_READ(vga_reg);
}
-static void ivb_pch_pwm_override(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * IVB has CPU eDP backlight regs too, set things up to let the
- * PCH regs control the backlight
- */
- I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
- I915_WRITE(BLC_PWM_CPU_CTL, 0);
- I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
-}
-
void intel_modeset_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6923,9 +6937,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
gen6_enable_rps(dev_priv);
gen6_update_ring_freq(dev_priv);
}
-
- if (IS_IVYBRIDGE(dev))
- ivb_pch_pwm_override(dev);
}
void intel_modeset_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 71c7096..c044932 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,6 +32,7 @@
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
+#include "drm_edid.h"
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -67,6 +68,8 @@ struct intel_dp {
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ struct edid *edid; /* cached EDID for eDP */
+ int edid_mode_count;
};
/**
@@ -266,6 +269,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -368,7 +374,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
- int try, precharge = 5;
+ int try, precharge;
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
@@ -388,6 +394,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
else
aux_clock_divider = intel_hrawclk(dev) / 2;
+ if (IS_GEN6(dev))
+ precharge = 3;
+ else
+ precharge = 5;
+
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ(ch_ctl);
@@ -702,6 +713,9 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = intel_dp->panel_fixed_mode->clock;
}
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ return false;
+
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %02x pixel clock %iKHz\n",
max_lane_count, bws[max_clock], mode->clock);
@@ -1154,11 +1168,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
- WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
- ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(dev_priv);
- pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
@@ -1266,18 +1279,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ /* Make sure the panel is off before trying to change the mode. But also
+ * ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
- /* Wake up the sink first */
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
-
- /* Make sure the panel is off before trying to
- * change the mode
- */
}
static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1309,10 +1320,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
if (mode != DRM_MODE_DPMS_ON) {
+ /* Switching the panel off requires vdd. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
ironlake_edp_panel_off(intel_dp);
- ironlake_edp_panel_vdd_on(intel_dp);
intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, false);
@@ -1969,6 +1981,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
+ ironlake_edp_panel_vdd_on(intel_dp);
+
if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
@@ -1976,6 +1990,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
+
+ ironlake_edp_panel_vdd_off(intel_dp, false);
}
static bool
@@ -2112,10 +2128,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct edid *edid;
+ int size;
+
+ if (is_edp(intel_dp)) {
+ if (!intel_dp->edid)
+ return NULL;
+
+ size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
+ edid = kmalloc(size, GFP_KERNEL);
+ if (!edid)
+ return NULL;
+
+ memcpy(edid, intel_dp->edid, size);
+ return edid;
+ }
- ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
return edid;
}
@@ -2125,9 +2153,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
- ironlake_edp_panel_vdd_on(intel_dp);
+ if (is_edp(intel_dp)) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_dp->edid);
+ ret = drm_add_edid_modes(connector, intel_dp->edid);
+ drm_edid_to_eld(connector,
+ intel_dp->edid);
+ connector->display_info.raw_edid = NULL;
+ return intel_dp->edid_mode_count;
+ }
+
ret = intel_ddc_get_modes(connector, adapter);
- ironlake_edp_panel_vdd_off(intel_dp, false);
return ret;
}
@@ -2317,6 +2353,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
+ kfree(intel_dp->edid);
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
@@ -2500,11 +2537,14 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
}
+ intel_dp_i2c_init(intel_dp, intel_connector, name);
+
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
bool ret;
struct edp_power_seq cur, vbt;
u32 pp_on, pp_off, pp_div;
+ struct edid *edid;
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2572,9 +2612,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_dp_destroy(&intel_connector->base);
return;
}
- }
- intel_dp_i2c_init(intel_dp, intel_connector, name);
+ ironlake_edp_panel_vdd_on(intel_dp);
+ edid = drm_get_edid(connector, &intel_dp->adapter);
+ if (edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid);
+ intel_dp->edid_mode_count =
+ drm_add_edid_modes(connector, edid);
+ drm_edid_to_eld(connector, edid);
+ intel_dp->edid = edid;
+ }
+ ironlake_edp_panel_vdd_off(intel_dp, false);
+ }
intel_encoder->hot_plug = intel_dp_hot_plug;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 4a9707d..1991a44 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -396,11 +396,22 @@ clear_err:
* Wait for bus to IDLE before clearing NAK.
* If we clear the NAK while bus is still active, then it will stay
* active and the next transaction may fail.
+ *
+ * If no ACK is received during the address phase of a transaction, the
+ * adapter must report -ENXIO. It is not clear what to return if no ACK
+ * is received at other times. But we have to be careful to not return
+ * spurious -ENXIO because that will prevent i2c and drm edid functions
+ * from retrying. So return -ENXIO only when gmbus properly quiescents -
+ * timing out seems to happen when there _is_ a ddc chip present, but
+ * it's slow responding and only answers on the 2nd retry.
*/
+ ret = -ENXIO;
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
- 10))
+ 10)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
+ ret = -ETIMEDOUT;
+ }
/* Toggle the Software Clear Interrupt bit. This has the effect
* of resetting the GMBUS controller and so clearing the
@@ -414,14 +425,6 @@ clear_err:
adapter->name, msgs[i].addr,
(msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
- /*
- * If no ACK is received during the address phase of a transaction,
- * the adapter must report -ENXIO.
- * It is not clear what to return if no ACK is received at other times.
- * So, we always return -ENXIO in all NAK cases, to ensure we send
- * it at least during the one case that is specified.
- */
- ret = -ENXIO;
goto out;
timeout:
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9dee823..08eb04c 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -747,6 +747,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "Hewlett-Packard HP t5740e Thin Client",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Hewlett-Packard t5745",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 8e79ff6..d0ce2a5 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2270,10 +2270,33 @@ void ironlake_disable_drps(struct drm_device *dev)
void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 swreq;
+ u32 limits;
- swreq = (val & 0x3ff) << 25;
- I915_WRITE(GEN6_RPNSWREQ, swreq);
+ limits = 0;
+ if (val >= dev_priv->max_delay)
+ val = dev_priv->max_delay;
+ else
+ limits |= dev_priv->max_delay << 24;
+
+ if (val <= dev_priv->min_delay)
+ val = dev_priv->min_delay;
+ else
+ limits |= dev_priv->min_delay << 16;
+
+ if (val == dev_priv->cur_delay)
+ return;
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(val) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+
+ /* Make sure we continue to get interrupts
+ * until we hit the minimum or maximum frequencies.
+ */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
+
+ dev_priv->cur_delay = val;
}
void gen6_disable_rps(struct drm_device *dev)
@@ -2327,11 +2350,10 @@ int intel_enable_rc6(const struct drm_device *dev)
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_ring_buffer *ring;
- u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
- u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_cap;
+ u32 gt_perf_status;
u32 pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
- int cur_freq, min_freq, max_freq;
int rc6_mode;
int i;
@@ -2352,6 +2374,14 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
gen6_gt_force_wake_get(dev_priv);
+ rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+
+ /* In units of 100MHz */
+ dev_priv->max_delay = rp_state_cap & 0xff;
+ dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16;
+ dev_priv->cur_delay = 0;
+
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -2399,8 +2429,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
- 18 << 24 |
- 6 << 16);
+ dev_priv->max_delay << 24 |
+ dev_priv->min_delay << 16);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
I915_WRITE(GEN6_RP_UP_EI, 100000);
@@ -2408,7 +2438,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
- GEN6_RP_MEDIA_HW_MODE |
+ GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
@@ -2426,10 +2456,6 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
- min_freq = (rp_state_cap & 0xff0000) >> 16;
- max_freq = rp_state_cap & 0xff;
- cur_freq = (gt_perf_status & 0xff00) >> 8;
-
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
@@ -2440,14 +2466,11 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
- max_freq = pcu_mbox & 0xff;
+ dev_priv->max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
- /* In units of 100MHz */
- dev_priv->max_delay = max_freq;
- dev_priv->min_delay = min_freq;
- dev_priv->cur_delay = cur_freq;
+ gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
/* requires MSI enabled */
I915_WRITE(GEN6_PMIER,
@@ -3580,8 +3603,9 @@ static void gen6_sanitize_pm(struct drm_device *dev)
limits |= (dev_priv->min_delay & 0x3f) << 16;
if (old != limits) {
- DRM_ERROR("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS expected %08x, was %08x\n",
- limits, old);
+ /* Note that the known failure case is to read back 0. */
+ DRM_DEBUG_DRIVER("Power management discrepancy: GEN6_RP_INTERRUPT_LIMITS "
+ "expected %08x, was %08x\n", limits, old);
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b59b6d5..e5b84ff 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -266,10 +266,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
static int init_ring_common(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_device *dev = ring->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
u32 head;
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
@@ -317,7 +322,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -326,9 +332,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
ring->head = I915_READ_HEAD(ring);
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring_space(ring);
+ ring->last_retired_head = -1;
}
- return 0;
+out:
+ if (HAS_FORCE_WAKE(dev))
+ gen6_gt_force_wake_put(dev_priv);
+
+ return ret;
}
static int
@@ -987,6 +998,10 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (ret)
goto err_unref;
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+
ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a949b73..b6a9d45 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -783,10 +783,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
((v_sync_len & 0x30) >> 4);
dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- dtd->part2.dtd_flags |= 0x2;
+ dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- dtd->part2.dtd_flags |= 0x4;
+ dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
dtd->part2.sdvo_flags = 0;
dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
@@ -820,9 +822,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
mode->clock = dtd->part1.clock * 10;
mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
- if (dtd->part2.dtd_flags & 0x2)
+ if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+ if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PHSYNC;
- if (dtd->part2.dtd_flags & 0x4)
+ if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
mode->flags |= DRM_MODE_FLAG_PVSYNC;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 6b7b22f..9d03014 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -61,6 +61,11 @@ struct intel_sdvo_caps {
u16 output_flags;
} __attribute__((packed));
+/* Note: SDVO detailed timing flags match EDID misc flags. */
+#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
+#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
+#define DTD_FLAG_INTERLACE (1 << 7)
+
/** This matches the EDID DTD structure, more or less */
struct intel_sdvo_dtd {
struct {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 3346612..a233a51 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -674,6 +674,54 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
+ .name = "480p",
+ .clock = 107520,
+ .refresh = 59940,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 122,
+ .hblank_start = 842, .htotal = 857,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 12, .vsync_start_f2 = 12,
+ .vsync_len = 12,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 44, .vi_end_f2 = 44,
+ .nbr_end = 479,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
+ .name = "576p",
+ .clock = 107520,
+ .refresh = 50000,
+ .oversample = TV_OVERSAMPLE_4X,
+ .component_only = 1,
+
+ .hsync_end = 64, .hblank_end = 139,
+ .hblank_start = 859, .htotal = 863,
+
+ .progressive = true, .trilevel_sync = false,
+
+ .vsync_start_f1 = 10, .vsync_start_f2 = 10,
+ .vsync_len = 10,
+
+ .veq_ena = false,
+
+ .vi_end_f1 = 48, .vi_end_f2 = 48,
+ .nbr_end = 575,
+
+ .burst_ena = false,
+
+ .filter_table = filter_table,
+ },
+ {
.name = "720p@60Hz",
.clock = 148800,
.refresh = 60000,
@@ -1194,6 +1242,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
I915_WRITE(TV_CTL, save_tv_ctl);
+ POSTING_READ(TV_CTL);
+
+ /* For unknown reasons the hw barfs if we don't do this vblank wait. */
+ intel_wait_for_vblank(intel_tv->base.base.dev,
+ to_intel_crtc(intel_tv->base.base.crtc)->pipe);
/* Restore interrupt config */
if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 3c8e04f..93e832d 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
MODULE_DEVICE_TABLE(pci, pciidlist);
+static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
+ kfree(ap);
+}
+
+
static int __devinit
mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ mgag200_kick_out_firmware_fb(pdev);
+
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 634d222..8613cb2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -123,6 +123,9 @@ struct nouveau_bo {
struct drm_gem_object *gem;
int pin_refcnt;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define nouveau_bo_tile_layout(nvbo) \
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 153b9a1..1074bc5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -467,7 +467,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
ret = drm_fb_helper_init(dev, &nfbdev->helper,
- nv_two_heads(dev) ? 2 : 1, 4);
+ dev->mode_config.num_crtc, 4);
if (ret) {
kfree(nfbdev);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index c58aab7..a25cf2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -1,3 +1,26 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ */
#include "drmP.h"
#include "drm.h"
@@ -61,6 +84,48 @@ static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
}
+static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ if (nvbo->vmapping_count) {
+ nvbo->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
+ &nvbo->dma_buf_vmap);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ nvbo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return nvbo->dma_buf_vmap.virtual;
+}
+
+static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct nouveau_bo *nvbo = dma_buf->priv;
+ struct drm_device *dev = nvbo->gem->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ nvbo->vmapping_count--;
+ if (nvbo->vmapping_count == 0) {
+ ttm_bo_kunmap(&nvbo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
+
static const struct dma_buf_ops nouveau_dmabuf_ops = {
.map_dma_buf = nouveau_gem_map_dma_buf,
.unmap_dma_buf = nouveau_gem_unmap_dma_buf,
@@ -69,6 +134,9 @@ static const struct dma_buf_ops nouveau_dmabuf_ops = {
.kmap_atomic = nouveau_gem_kmap_atomic,
.kunmap = nouveau_gem_kunmap,
.kunmap_atomic = nouveau_gem_kunmap_atomic,
+ .mmap = nouveau_gem_prime_mmap,
+ .vmap = nouveau_gem_prime_vmap,
+ .vunmap = nouveau_gem_prime_vunmap,
};
static int
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 01d77d1..3904d79 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1149,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
}
if (tiling_flags & RADEON_TILING_MACRO) {
- if (rdev->family >= CHIP_CAYMAN)
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
@@ -1177,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+ if ((rdev->family == CHIP_TAHITI) ||
+ (rdev->family == CHIP_PITCAIRN))
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
+ else if (rdev->family == CHIP_VERDE)
+ fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(AVIVO_D1VGA_CONTROL, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e7b1ec5..486ccdf 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1926,7 +1926,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE6(rdev))
+ ; /* TODO (use pointers instead of if-s?) */
+ else if (ASIC_IS_DCE4(rdev))
evergreen_hdmi_setmode(encoder, adjusted_mode);
else
r600_hdmi_setmode(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 58991af..7fb3d2e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if ((rdev->family == CHIP_JUNIPER) ||
+ (rdev->family == CHIP_CYPRESS) ||
+ (rdev->family == CHIP_HEMLOCK) ||
+ (rdev->family == CHIP_BARTS))
+ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 cur_pipe;
- u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- if (num_tile_pipes > EVERGREEN_MAX_PIPES)
- num_tile_pipes = EVERGREEN_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > EVERGREEN_MAX_BACKENDS)
- num_backends = EVERGREEN_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
-
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
-
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_CEDAR:
- case CHIP_REDWOOD:
- case CHIP_PALM:
- case CHIP_SUMO:
- case CHIP_SUMO2:
- case CHIP_TURKS:
- case CHIP_CAICOS:
- force_no_swizzle = false;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- case CHIP_JUNIPER:
- case CHIP_BARTS:
- default:
- force_no_swizzle = true;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
static void evergreen_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config;
- u32 gb_addr_config = 0;
+ u32 gb_addr_config;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
- u32 grbm_gfx_index;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 sq_config;
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
u32 sq_stack_resource_mgmt_3;
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
+ u32 disabled_rb_mask;
int i, j, num_shader_engines, ps_thread_count;
switch (rdev->family) {
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_JUNIPER:
rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_REDWOOD:
rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CEDAR:
default:
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PALM:
rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_TURKS:
rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
evergreen_fix_pci_max_read_req_size(rdev);
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
-
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
- & EVERGREEN_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
- & EVERGREEN_MAX_SIMDS_MASK);
-
- cc_rb_backend_disable =
- BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
- & EVERGREEN_MAX_BACKENDS_MASK);
-
-
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- switch (rdev->config.evergreen.max_tile_pipes) {
- case 1:
- default:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
- gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
-
- if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
- gb_addr_config |= ROW_SIZE(2);
- else
- gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
-
- if (rdev->ddev->pdev->device == 0x689e) {
- u32 efuse_straps_4;
- u32 efuse_straps_3;
- u8 efuse_box_bit_131_124;
-
- WREG32(RCU_IND_INDEX, 0x204);
- efuse_straps_4 = RREG32(RCU_IND_DATA);
- WREG32(RCU_IND_INDEX, 0x203);
- efuse_straps_3 = RREG32(RCU_IND_DATA);
- efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
-
- switch(efuse_box_bit_131_124) {
- case 0x00:
- gb_backend_map = 0x76543210;
- break;
- case 0x55:
- gb_backend_map = 0x77553311;
- break;
- case 0x56:
- gb_backend_map = 0x77553300;
- break;
- case 0x59:
- gb_backend_map = 0x77552211;
- break;
- case 0x66:
- gb_backend_map = 0x77443300;
- break;
- case 0x99:
- gb_backend_map = 0x66552211;
- break;
- case 0x5a:
- gb_backend_map = 0x77552200;
- break;
- case 0xaa:
- gb_backend_map = 0x66442200;
- break;
- case 0x95:
- gb_backend_map = 0x66553311;
- break;
- default:
- DRM_ERROR("bad backend map, using default\n");
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- break;
- }
- } else if (rdev->ddev->pdev->device == 0x68b9) {
- u32 efuse_straps_3;
- u8 efuse_box_bit_127_124;
-
- WREG32(RCU_IND_INDEX, 0x203);
- efuse_straps_3 = RREG32(RCU_IND_DATA);
- efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
-
- switch(efuse_box_bit_127_124) {
- case 0x0:
- gb_backend_map = 0x00003210;
- break;
- case 0x5:
- case 0x6:
- case 0x9:
- case 0xa:
- gb_backend_map = 0x00003311;
- break;
- default:
- DRM_ERROR("bad backend map, using default\n");
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- break;
- }
- } else {
- switch (rdev->family) {
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- case CHIP_BARTS:
- gb_backend_map = 0x66442200;
- break;
- case CHIP_JUNIPER:
- gb_backend_map = 0x00002200;
- break;
- default:
- gb_backend_map =
- evergreen_get_tile_pipe_to_backend_map(rdev,
- rdev->config.evergreen.max_tile_pipes,
- rdev->config.evergreen.max_backends,
- ((EVERGREEN_MAX_BACKENDS_MASK <<
- rdev->config.evergreen.max_backends) &
- EVERGREEN_MAX_BACKENDS_MASK));
- }
- }
-
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
- else
- rdev->config.evergreen.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
- rdev->config.evergreen.tile_config |=
- ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.evergreen.tile_config |= 1 << 4;
+ else
+ rdev->config.evergreen.tile_config |= 0 << 4;
+ }
+ rdev->config.evergreen.tile_config |= 0 << 8;
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
- rdev->config.evergreen.backend_map = gb_backend_map;
- WREG32(GB_BACKEND_MAP, gb_backend_map);
- WREG32(GB_ADDR_CONFIG, gb_addr_config);
- WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
- WREG32(HDP_ADDR_CONFIG, gb_addr_config);
+ num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
- num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
- grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
-
- for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
- u32 rb = cc_rb_backend_disable | (0xf0 << 16);
- u32 sp = cc_gc_shader_pipe_config;
- u32 gfx = grbm_gfx_index | SE_INDEX(i);
+ if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
+ u32 efuse_straps_4;
+ u32 efuse_straps_3;
- if (i == num_shader_engines) {
- rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
- sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
+ WREG32(RCU_IND_INDEX, 0x204);
+ efuse_straps_4 = RREG32(RCU_IND_DATA);
+ WREG32(RCU_IND_INDEX, 0x203);
+ efuse_straps_3 = RREG32(RCU_IND_DATA);
+ tmp = (((efuse_straps_4 & 0xf) << 4) |
+ ((efuse_straps_3 & 0xf0000000) >> 28));
+ } else {
+ tmp = 0;
+ for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
+ u32 rb_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+ tmp <<= 4;
+ tmp |= rb_disable_bitmap;
}
+ }
+ /* enabled rb are just the one not disabled :) */
+ disabled_rb_mask = tmp;
- WREG32(GRBM_GFX_INDEX, gfx);
- WREG32(RLC_GFX_INDEX, gfx);
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
- WREG32(CC_RB_BACKEND_DISABLE, rb);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
- WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
- }
+ WREG32(GB_ADDR_CONFIG, gb_addr_config);
+ WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
+ WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- grbm_gfx_index |= SE_BROADCAST_WRITES;
- WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
- WREG32(RLC_GFX_INDEX, grbm_gfx_index);
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
+ EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
+ WREG32(GB_BACKEND_MAP, tmp);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
@@ -2202,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
+ if (rdev->family <= CHIP_SUMO2)
+ WREG32(SMX_SAR_CTL0, 0x00010000);
+
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 4e7dd2b..c1655412 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -52,6 +52,7 @@ struct evergreen_cs_track {
u32 cb_color_view[12];
u32 cb_color_pitch[12];
u32 cb_color_slice[12];
+ u32 cb_color_slice_idx[12];
u32 cb_color_attrib[12];
u32 cb_color_cmask_slice[8];/* unused */
u32 cb_color_fmask_slice[8];/* unused */
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
track->cb_color_info[i] = 0;
track->cb_color_view[i] = 0xFFFFFFFF;
track->cb_color_pitch[i] = 0;
- track->cb_color_slice[i] = 0;
+ track->cb_color_slice[i] = 0xfffffff;
+ track->cb_color_slice_idx[i] = 0;
}
track->cb_target_mask = 0xFFFFFFFF;
track->cb_shader_mask = 0xFFFFFFFF;
track->cb_dirty = true;
+ track->db_depth_slice = 0xffffffff;
track->db_depth_view = 0xFFFFC000;
track->db_depth_size = 0xFFFFFFFF;
track->db_depth_control = 0xFFFFFFFF;
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
{
struct evergreen_cs_track *track = p->track;
unsigned palign, halign, tileb, slice_pt;
+ unsigned mtile_pr, mtile_ps, mtileb;
tileb = 64 * surf->bpe * surf->nsamples;
- palign = track->group_size / (8 * surf->bpe * surf->nsamples);
- palign = MAX(8, palign);
slice_pt = 1;
if (tileb > surf->tsplit) {
slice_pt = tileb / surf->tsplit;
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
/* macro tile width & height */
palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
- surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt;
+ mtileb = (palign / 8) * (halign / 8) * tileb;;
+ mtile_pr = surf->nbx / palign;
+ mtile_ps = (mtile_pr * surf->nby) / halign;
+ surf->layer_size = mtile_ps * mtileb * slice_pt;
surf->base_align = (palign / 8) * (halign / 8) * tileb;
surf->palign = palign;
surf->halign = halign;
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
offset += surf.layer_size * mslice;
if (offset > radeon_bo_size(track->cb_color_bo[id])) {
+ /* old ddx are broken they allocate bo with w*h*bpp but
+ * program slice with ALIGN(h, 8), catch this and patch
+ * command stream.
+ */
+ if (!surf.mode) {
+ volatile u32 *ib = p->ib.ptr;
+ unsigned long tmp, nby, bsize, size, min = 0;
+
+ /* find the height the ddx wants */
+ if (surf.nby > 8) {
+ min = surf.nby - 8;
+ }
+ bsize = radeon_bo_size(track->cb_color_bo[id]);
+ tmp = track->cb_color_bo_offset[id] << 8;
+ for (nby = surf.nby; nby > min; nby--) {
+ size = nby * surf.nbx * surf.bpe * surf.nsamples;
+ if ((tmp + size * mslice) <= bsize) {
+ break;
+ }
+ }
+ if (nby > min) {
+ surf.nby = nby;
+ slice = ((nby * surf.nbx) / 64) - 1;
+ if (!evergreen_surface_check(p, &surf, "cb")) {
+ /* check if this one works */
+ tmp += surf.layer_size * mslice;
+ if (tmp <= bsize) {
+ ib[track->cb_color_slice_idx[id]] = slice;
+ goto old_ddx_ok;
+ }
+ }
+ }
+ }
dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
"offset %d, max layer %d, bo size %ld, slice %d)\n",
__func__, __LINE__, id, surf.layer_size,
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
surf.tsplit, surf.mtilea);
return -EINVAL;
}
+old_ddx_ok:
return 0;
}
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_SLICE:
tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
track->cb_dirty = true;
break;
case CB_COLOR8_SLICE:
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_SLICE:
tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_slice_idx[tmp] = idx;
track->cb_dirty = true;
break;
case CB_COLOR0_ATTRIB:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index a51f880..65c5416 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -156,9 +156,6 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
- return;
-
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79130bf..b50b15c 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -37,6 +37,15 @@
#define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF
+#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
+#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
+#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
+
/* Registers */
#define RCU_IND_INDEX 0x100
@@ -54,6 +63,7 @@
#define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0)
+#define NUM_PIPES_MASK 0x0000000f
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12)
@@ -452,6 +462,7 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
@@ -492,6 +503,7 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
+#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define NUMBER_OF_SETS(x) ((x) << 1)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b01c2dd..b7bf18e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,215 +417,17 @@ out:
/*
* Core functions
*/
-static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends_per_asic,
- u32 *backend_disable_mask_per_asic,
- u32 num_shader_engines)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 num_backends_per_se;
- u32 cur_pipe;
- u32 swizzle_pipe[CAYMAN_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- /* force legal values */
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
- num_tile_pipes = rdev->config.cayman.max_tile_pipes;
- if (num_shader_engines < 1)
- num_shader_engines = 1;
- if (num_shader_engines > rdev->config.cayman.max_shader_engines)
- num_shader_engines = rdev->config.cayman.max_shader_engines;
- if (num_backends_per_asic < num_shader_engines)
- num_backends_per_asic = num_shader_engines;
- if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
- num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
-
- /* make sure we have the same number of backends per se */
- num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
- /* set up the number of backends per se */
- num_backends_per_se = num_backends_per_asic / num_shader_engines;
- if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
- num_backends_per_se = rdev->config.cayman.max_backends_per_se;
- num_backends_per_asic = num_backends_per_se * num_shader_engines;
- }
-
- /* create enable mask and count for enabled backends */
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends_per_asic)
- break;
- }
-
- /* force the backends mask to match the current number of backends */
- if (enabled_backends_count != num_backends_per_asic) {
- u32 this_backend_enabled;
- u32 shader_engine;
- u32 backend_per_se;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- /* calc the current se */
- shader_engine = i / rdev->config.cayman.max_backends_per_se;
- /* calc the backend per se */
- backend_per_se = i % rdev->config.cayman.max_backends_per_se;
- /* default to not enabled */
- this_backend_enabled = 0;
- if ((shader_engine < num_shader_engines) &&
- (backend_per_se < num_backends_per_se))
- this_backend_enabled = 1;
- if (this_backend_enabled) {
- enabled_backends_mask |= (1 << i);
- *backend_disable_mask_per_asic &= ~(1 << i);
- ++enabled_backends_count;
- }
- }
- }
-
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_CAYMAN:
- case CHIP_ARUBA:
- force_no_swizzle = true;
- break;
- default:
- force_no_swizzle = false;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
-static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
- u32 disable_mask_per_se,
- u32 max_disable_mask_per_se,
- u32 num_shader_engines)
-{
- u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
- u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
- if (num_shader_engines == 1)
- return disable_mask_per_asic;
- else if (num_shader_engines == 2)
- return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
- else
- return 0xffffffff;
-}
-
static void cayman_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
u32 cgts_tcc_disable;
u32 sx_debug_1;
u32 smx_dc_ctl0;
- u32 gc_user_shader_pipe_config;
- u32 gc_user_rb_backend_disable;
- u32 cgts_user_tcc_disable;
u32 cgts_sm_ctrl_reg;
u32 hdp_host_path_cntl;
u32 tmp;
+ u32 disabled_rb_mask;
int i, j;
switch (rdev->family) {
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.sc_prim_fifo_size = 0x100;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARUBA:
default:
@@ -657,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_pipes_per_simd = 4;
rdev->config.cayman.max_tile_pipes = 2;
if ((rdev->pdev->device == 0x9900) ||
- (rdev->pdev->device == 0x9901)) {
+ (rdev->pdev->device == 0x9901) ||
+ (rdev->pdev->device == 0x9905) ||
+ (rdev->pdev->device == 0x9906) ||
+ (rdev->pdev->device == 0x9907) ||
+ (rdev->pdev->device == 0x9908) ||
+ (rdev->pdev->device == 0x9909) ||
+ (rdev->pdev->device == 0x9910) ||
+ (rdev->pdev->device == 0x9917)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
- (rdev->pdev->device == 0x9904)) {
+ (rdev->pdev->device == 0x9904) ||
+ (rdev->pdev->device == 0x990A) ||
+ (rdev->pdev->device == 0x9913) ||
+ (rdev->pdev->device == 0x9918)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
- } else if ((rdev->pdev->device == 0x9990) ||
- (rdev->pdev->device == 0x9991)) {
+ } else if ((rdev->pdev->device == 0x9919) ||
+ (rdev->pdev->device == 0x9990) ||
+ (rdev->pdev->device == 0x9991) ||
+ (rdev->pdev->device == 0x9994) ||
+ (rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
} else {
@@ -687,6 +503,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.sc_prim_fifo_size = 0x40;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -706,39 +523,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
- cgts_tcc_disable = 0xffff0000;
- for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
- cgts_tcc_disable &= ~(1 << (16 + i));
- gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
- gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
- cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
- rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
- tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
- rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
- rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
- tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
- rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
- tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
- tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.cayman.backend_disable_mask_per_asic =
- cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
- rdev->config.cayman.num_shader_engines);
- rdev->config.cayman.backend_map =
- cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
- rdev->config.cayman.num_backends_per_se *
- rdev->config.cayman.num_shader_engines,
- &rdev->config.cayman.backend_disable_mask_per_asic,
- rdev->config.cayman.num_shader_engines);
- tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
- rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
- tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
- rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
- if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
- rdev->config.cayman.mem_max_burst_length_bytes = 512;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +532,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.num_gpus = 1;
rdev->config.cayman.multi_gpu_tile_size = 64;
- //gb_addr_config = 0x02011003
-#if 0
- gb_addr_config = RREG32(GB_ADDR_CONFIG);
-#else
- gb_addr_config = 0;
- switch (rdev->config.cayman.num_tile_pipes) {
- case 1:
- default:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
- gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
- tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
- switch (rdev->config.cayman.num_gpus) {
- case 1:
- default:
- gb_addr_config |= NUM_GPUS(0);
- break;
- case 2:
- gb_addr_config |= NUM_GPUS(1);
- break;
- case 4:
- gb_addr_config |= NUM_GPUS(2);
- break;
- }
- switch (rdev->config.cayman.multi_gpu_tile_size) {
- case 16:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
- break;
- case 32:
- default:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
- break;
- case 64:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
- break;
- case 128:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
- break;
- }
- switch (rdev->config.cayman.mem_row_size_in_kb) {
- case 1:
- default:
- gb_addr_config |= ROW_SIZE(0);
- break;
- case 2:
- gb_addr_config |= ROW_SIZE(1);
- break;
- case 4:
- gb_addr_config |= ROW_SIZE(2);
- break;
- }
-#endif
-
tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
rdev->config.cayman.num_tile_pipes = (1 << tmp);
tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +545,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
- //gb_backend_map = 0x76541032;
-#if 0
- gb_backend_map = RREG32(GB_BACKEND_MAP);
-#else
- gb_backend_map =
- cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
- rdev->config.cayman.num_backends_per_se *
- rdev->config.cayman.num_shader_engines,
- &rdev->config.cayman.backend_disable_mask_per_asic,
- rdev->config.cayman.num_shader_engines);
-#endif
+
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -865,34 +572,50 @@ static void cayman_gpu_init(struct radeon_device *rdev)
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
- rdev->config.evergreen.tile_config |= 1 << 4;
- else
- rdev->config.cayman.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ rdev->config.cayman.tile_config |= 1 << 4;
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.cayman.tile_config |= 1 << 4;
+ else
+ rdev->config.cayman.tile_config |= 0 << 4;
+ }
rdev->config.cayman.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
- rdev->config.cayman.backend_map = gb_backend_map;
- WREG32(GB_BACKEND_MAP, gb_backend_map);
+ tmp = 0;
+ for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
+ u32 rb_disable_bitmap;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
+ rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
+ tmp <<= 4;
+ tmp |= rb_disable_bitmap;
+ }
+ /* enabled rb are just the one not disabled :) */
+ disabled_rb_mask = tmp;
+
+ WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+ WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
+
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- /* primary versions */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ tmp = gb_addr_config & NUM_PIPES_MASK;
+ tmp = r6xx_remap_render_backend(rdev, tmp,
+ rdev->config.cayman.max_backends_per_se *
+ rdev->config.cayman.max_shader_engines,
+ CAYMAN_MAX_BACKENDS, disabled_rb_mask);
+ WREG32(GB_BACKEND_MAP, tmp);
+ cgts_tcc_disable = 0xffff0000;
+ for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
+ cgts_tcc_disable &= ~(1 << (16 + i));
WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
-
- /* user versions */
- WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
@@ -1580,6 +1303,10 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1606,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
radeon_ib_pool_suspend(rdev);
radeon_vm_manager_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2aa7046..a0b9806 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -41,6 +41,9 @@
#define CAYMAN_MAX_TCC 16
#define CAYMAN_MAX_TCC_MASK 0xFF
+#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
+#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
+
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
@@ -148,6 +151,8 @@
#define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
+#define RLC_GFX_INDEX 0x3FC4
+
#define CONFIG_MEMSIZE 0x5428
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
@@ -212,6 +217,12 @@
#define SOFT_RESET_VGT (1 << 14)
#define SOFT_RESET_IA (1 << 15)
+#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SE_INDEX(x) ((x) << 16)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
+
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f388a1d..bff6272 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
return r600_gpu_soft_reset(rdev);
}
-static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask;
- u32 enabled_backends_count;
- u32 cur_pipe;
- u32 swizzle_pipe[R6XX_MAX_PIPES];
- u32 cur_backend;
- u32 i;
-
- if (num_tile_pipes > R6XX_MAX_PIPES)
- num_tile_pipes = R6XX_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > R6XX_MAX_BACKENDS)
- num_backends = R6XX_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
-
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
-
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
- switch (num_tile_pipes) {
- case 1:
- swizzle_pipe[0] = 0;
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- break;
- case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- break;
- case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- break;
- case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- break;
- case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- break;
- case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- break;
+u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+ u32 tiling_pipe_num,
+ u32 max_rb_num,
+ u32 total_max_rb_num,
+ u32 disabled_rb_mask)
+{
+ u32 rendering_pipe_num, rb_num_width, req_rb_num;
+ u32 pipe_rb_ratio, pipe_rb_remain;
+ u32 data = 0, mask = 1 << (max_rb_num - 1);
+ unsigned i, j;
+
+ /* mask out the RBs that don't exist on that asic */
+ disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
+
+ rendering_pipe_num = 1 << tiling_pipe_num;
+ req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
+ BUG_ON(rendering_pipe_num < req_rb_num);
+
+ pipe_rb_ratio = rendering_pipe_num / req_rb_num;
+ pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
+
+ if (rdev->family <= CHIP_RV740) {
+ /* r6xx/r7xx */
+ rb_num_width = 2;
+ } else {
+ /* eg+ */
+ rb_num_width = 4;
}
- cur_backend = 0;
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
-
- backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
- cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
+ for (i = 0; i < max_rb_num; i++) {
+ if (!(mask & disabled_rb_mask)) {
+ for (j = 0; j < pipe_rb_ratio; j++) {
+ data <<= rb_num_width;
+ data |= max_rb_num - i - 1;
+ }
+ if (pipe_rb_remain) {
+ data <<= rb_num_width;
+ data |= max_rb_num - i - 1;
+ pipe_rb_remain--;
+ }
+ }
+ mask >>= 1;
}
- return backend_map;
+ return data;
}
int r600_count_pipe_bits(uint32_t val)
@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
- u32 backend_map;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp;
@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt = 0;
u32 sq_stack_resource_mgmt_1 = 0;
u32 sq_stack_resource_mgmt_2 = 0;
+ u32 disabled_rb_mask;
- /* FIXME: implement */
+ rdev->config.r600.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_R600:
rdev->config.r600.max_pipes = 4;
@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
- rdev->config.r600.tiling_group_size = 512;
- else
- rdev->config.r600.tiling_group_size = 256;
+
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= BANK_SWAPS(1);
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
- cc_rb_backend_disable |=
- BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
-
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
-
- backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
- (R6XX_MAX_BACKENDS -
- r600_count_pipe_bits((cc_rb_backend_disable &
- R6XX_MAX_BACKENDS_MASK) >> 16)),
- (cc_rb_backend_disable >> 16));
+ tmp = R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
+ if (tmp < rdev->config.r600.max_backends) {
+ rdev->config.r600.max_backends = tmp;
+ }
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
+ tmp = R6XX_MAX_PIPES -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.r600.max_pipes) {
+ rdev->config.r600.max_pipes = tmp;
+ }
+ tmp = R6XX_MAX_SIMDS -
+ r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.r600.max_simds) {
+ rdev->config.r600.max_simds = tmp;
+ }
+
+ disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
+ tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
+ R6XX_MAX_BACKENDS, disabled_rb_mask);
+ tiling_config |= tmp << 16;
+ rdev->config.r600.backend_map = tmp;
+
rdev->config.r600.tile_config = tiling_config;
- rdev->config.r600.backend_map = backend_map;
- tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
- /* Setup pipes */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
-
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1900,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
+ WREG32(VC_ENHANCE, 0);
}
@@ -2487,6 +2427,12 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
@@ -2523,12 +2469,6 @@ int r600_resume(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- DRM_ERROR("radeon: audio resume failed\n");
- return r;
- }
-
return r;
}
@@ -2638,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
- r = r600_audio_init(rdev);
- if (r)
- return r; /* TODO error handling */
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 7c4fa77..79b5591 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,7 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
+ return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
int base_rate = 48000;
switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
- /* Some magic trigger or src sel? */
- WREG32_P(0x5ac, 0x01, ~0x77);
+ /* Select DTO source */
+ WREG32(0x5ac, radeon_crtc->crtc_id);
} else {
switch (dig->dig_encoder) {
case 0:
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0133f5f..ca87f7a 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
break;
+ case PACKET3_STRMOUT_BASE_UPDATE:
+ if (p->family < CHIP_RV770) {
+ DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
+ return -EINVAL;
+ }
+ if (pkt->count != 1) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
+ return -EINVAL;
+ }
+ if (idx_value > 3) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
+ return -EINVAL;
+ }
+ {
+ u64 offset;
+
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
+ return -EINVAL;
+ }
+
+ if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
+ return -EINVAL;
+ }
+
+ offset = radeon_get_ib_value(p, idx+1) << 8;
+ if (offset != track->vgt_strmout_bo_offset[idx_value]) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
+ offset, track->vgt_strmout_bo_offset[idx_value]);
+ return -EINVAL;
+ }
+
+ if ((offset + 4) > radeon_bo_size(reloc->robj)) {
+ DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
+ offset + 4, radeon_bo_size(reloc->robj));
+ return -EINVAL;
+ }
+ ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ }
+ break;
case PACKET3_SURFACE_BASE_UPDATE:
if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 226379e..82a0a4c 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -322,9 +322,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
- return;
-
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
@@ -348,7 +345,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
- HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
@@ -484,7 +480,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
uint32_t offset;
u32 hdmi;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
return;
/* Silent, r600_hdmi_enable will raise WARN for us */
@@ -544,7 +540,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
- if (ASIC_IS_DCE5(rdev))
+ if (ASIC_IS_DCE6(rdev))
return;
/* Called for ATOM_ENCODER_MODE_HDMI only */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 15bd3b2..025fd5b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -219,6 +219,8 @@
#define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0
+#define PIPE_TILING__SHIFT 1
+#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
@@ -483,6 +485,7 @@
#define TC_L2_SIZE(x) ((x)<<5)
#define L2_DISABLE_LATE_HIT (1<<9)
+#define VC_ENHANCE 0x9714
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -1161,6 +1164,7 @@
#define PACKET3_SET_CTL_CONST 0x6F
#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
#define PACKET3_SET_CTL_CONST_END 0x0003e200
+#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1dc3a4a..fefcca5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -346,6 +346,9 @@ struct radeon_bo {
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
+
+ struct ttm_bo_kmap_obj dma_buf_vmap;
+ int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
@@ -848,7 +851,6 @@ struct radeon_cs_parser {
s32 priority;
};
-extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
@@ -1372,9 +1374,9 @@ struct cayman_asic {
struct si_asic {
unsigned max_shader_engines;
- unsigned max_pipes_per_simd;
unsigned max_tile_pipes;
- unsigned max_simds_per_se;
+ unsigned max_cu_per_sh;
+ unsigned max_sh_per_se;
unsigned max_backends_per_se;
unsigned max_texture_channel_caches;
unsigned max_gprs;
@@ -1385,7 +1387,6 @@ struct si_asic {
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_size;
- unsigned num_shader_engines;
unsigned num_tile_pipes;
unsigned num_backends_per_se;
unsigned backend_disable_mask_per_asic;
@@ -1846,6 +1847,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
+extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
+ u32 tiling_pipe_num,
+ u32 max_rb_num,
+ u32 total_max_rb_num,
+ u32 enabled_rb_mask);
/*
* evergreen functions used by radeon_encoder.c
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f6e69b8..b1e3820 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -444,7 +444,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*/
if ((dev->pdev->device == 0x9498) &&
(dev->pdev->subsystem_vendor == 0x1682) &&
- (dev->pdev->subsystem_device == 0x2452)) {
+ (dev->pdev->subsystem_device == 0x2452) &&
+ (i2c_bus->valid == false) &&
+ !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index c7d64a7..142f894 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
sync_to_ring, p->ring);
}
+/* XXX: note that this is called from the legacy UMS CS ioctl as well */
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
}
}
- if ((p->cs_flags & RADEON_CS_USE_VM) &&
- !p->rdev->vm_manager.enabled) {
- DRM_ERROR("VM not active on asic!\n");
- return -EINVAL;
- }
-
- /* we only support VM on SI+ */
- if ((p->rdev->family >= CHIP_TAHITI) &&
- ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
- DRM_ERROR("VM required on SI+!\n");
- return -EINVAL;
- }
+ /* these are KMS only */
+ if (p->rdev) {
+ if ((p->cs_flags & RADEON_CS_USE_VM) &&
+ !p->rdev->vm_manager.enabled) {
+ DRM_ERROR("VM not active on asic!\n");
+ return -EINVAL;
+ }
- if (radeon_cs_get_ring(p, ring, priority))
- return -EINVAL;
+ /* we only support VM on SI+ */
+ if ((p->rdev->family >= CHIP_TAHITI) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
+ DRM_ERROR("VM required on SI+!\n");
+ return -EINVAL;
+ }
+ if (radeon_cs_get_ring(p, ring, priority))
+ return -EINVAL;
+ }
/* deal with non-vm */
if ((p->chunk_ib_idx != -1) &&
@@ -580,7 +583,7 @@ int radeon_cs_finish_pages(struct radeon_cs_parser *p)
return 0;
}
-int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
+static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
{
int new_page;
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
@@ -623,3 +626,28 @@ int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
return new_page;
}
+
+u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
+{
+ struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ u32 pg_idx, pg_offset;
+ u32 idx_value = 0;
+ int new_page;
+
+ pg_idx = (idx * 4) / PAGE_SIZE;
+ pg_offset = (idx * 4) % PAGE_SIZE;
+
+ if (ibc->kpage_idx[0] == pg_idx)
+ return ibc->kpage[0][pg_offset/4];
+ if (ibc->kpage_idx[1] == pg_idx)
+ return ibc->kpage[1][pg_offset/4];
+
+ new_page = radeon_cs_update_pages(p, pg_idx);
+ if (new_page < 0) {
+ p->parser_error = new_page;
+ return 0;
+ }
+
+ idx_value = ibc->kpage[new_page][pg_offset/4];
+ return idx_value;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f0bb2b5..2c4d53f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -57,9 +57,11 @@
* 2.13.0 - virtual memory support, streamout
* 2.14.0 - add evergreen tiling informations
* 2.15.0 - add max_pipes query
+ * 2.16.0 - fix evergreen 2D tiled surface calculation
+ * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 15
+#define KMS_DRIVER_MINOR 17
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 79db56e..84b648a 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
rdev->vm_manager.enabled = false;
/* mark first vm as always in use, it's the system one */
+ /* allocate enough for 2 full VM pts */
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- rdev->vm_manager.max_pfn * 8,
+ rdev->vm_manager.max_pfn * 8 * 2,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -476,12 +477,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
mutex_lock(&vm->mutex);
if (last_pfn > vm->last_pfn) {
- /* grow va space 32M by 32M */
- unsigned align = ((32 << 20) >> 12) - 1;
+ /* release mutex and lock in right order */
+ mutex_unlock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex);
- radeon_vm_unbind_locked(rdev, vm);
+ mutex_lock(&vm->mutex);
+ /* and check again */
+ if (last_pfn > vm->last_pfn) {
+ /* grow va space 32M by 32M */
+ unsigned align = ((32 << 20) >> 12) - 1;
+ radeon_vm_unbind_locked(rdev, vm);
+ vm->last_pfn = (last_pfn + align) & ~align;
+ }
radeon_mutex_unlock(&rdev->cs_mutex);
- vm->last_pfn = (last_pfn + align) & ~align;
}
head = &vm->va;
last_offset = 0;
@@ -595,8 +602,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
if (bo_va == NULL)
return 0;
- mutex_lock(&vm->mutex);
radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&vm->mutex);
radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
radeon_mutex_unlock(&rdev->cs_mutex);
list_del(&bo_va->vm_list);
@@ -627,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
- vm->last_pfn = 0;
+ /* SI requires equal sized PTs for all VMs, so always set
+ * last_pfn to max_pfn. cayman allows variable sized
+ * pts so we can grow then as needed. Once we switch
+ * to two level pts we can unify this again.
+ */
+ if (rdev->family >= CHIP_TAHITI)
+ vm->last_pfn = rdev->vm_manager.max_pfn;
+ else
+ vm->last_pfn = 0;
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
@@ -641,9 +656,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
struct radeon_bo_va *bo_va, *tmp;
int r;
- mutex_lock(&vm->mutex);
-
radeon_mutex_lock(&rdev->cs_mutex);
+ mutex_lock(&vm->mutex);
radeon_vm_unbind_locked(rdev, vm);
radeon_mutex_unlock(&rdev->cs_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f28bd4b..21ec9f5 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
break;
}
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
- if (robj->rdev->asic->ioctl_wait_idle)
- robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+ if (rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f1016a5..5c58d7d 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
break;
case RADEON_INFO_MAX_PIPES:
if (rdev->family >= CHIP_TAHITI)
- value = rdev->config.si.max_pipes_per_simd;
+ value = rdev->config.si.max_cu_per_sh;
else if (rdev->family >= CHIP_CAYMAN)
value = rdev->config.cayman.max_pipes_per_simd;
else if (rdev->family >= CHIP_CEDAR)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0882554..5b37e28 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -801,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
int i;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- not_processed += radeon_fence_count_emitted(rdev, i);
- if (not_processed >= 3)
- break;
+ struct radeon_ring *ring = &rdev->ring[i];
+
+ if (ring->ready) {
+ not_processed += radeon_fence_count_emitted(rdev, i);
+ if (not_processed >= 3)
+ break;
+ }
}
if (not_processed >= 3) { /* should upclock */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index b8f835d..6bef46a 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -85,6 +85,47 @@ static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, v
}
+static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -EINVAL;
+}
+
+static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+ if (bo->vmapping_count) {
+ bo->vmapping_count++;
+ goto out_unlock;
+ }
+
+ ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
+ &bo->dma_buf_vmap);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ERR_PTR(ret);
+ }
+ bo->vmapping_count = 1;
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return bo->dma_buf_vmap.virtual;
+}
+
+static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct radeon_bo *bo = dma_buf->priv;
+ struct drm_device *dev = bo->rdev->ddev;
+
+ mutex_lock(&dev->struct_mutex);
+ bo->vmapping_count--;
+ if (bo->vmapping_count == 0) {
+ ttm_bo_kunmap(&bo->dma_buf_vmap);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
const static struct dma_buf_ops radeon_dmabuf_ops = {
.map_dma_buf = radeon_gem_map_dma_buf,
.unmap_dma_buf = radeon_gem_unmap_dma_buf,
@@ -93,6 +134,9 @@ const static struct dma_buf_ops radeon_dmabuf_ops = {
.kmap_atomic = radeon_gem_kmap_atomic,
.kunmap = radeon_gem_kunmap,
.kunmap_atomic = radeon_gem_kunmap_atomic,
+ .mmap = radeon_gem_prime_mmap,
+ .vmap = radeon_gem_prime_vmap,
+ .vunmap = radeon_gem_prime_vunmap,
};
static int radeon_prime_create(struct drm_device *dev,
@@ -125,11 +169,17 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
+ ret = radeon_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ERR_PTR(ret);
+
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
- if (ret)
+ if (ret) {
+ radeon_bo_unreserve(bo);
return ERR_PTR(ret);
-
+ }
+ radeon_bo_unreserve(bo);
return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 493a7be..983658c 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -39,31 +39,6 @@
*/
int radeon_debugfs_sa_init(struct radeon_device *rdev);
-u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
-{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
- u32 pg_idx, pg_offset;
- u32 idx_value = 0;
- int new_page;
-
- pg_idx = (idx * 4) / PAGE_SIZE;
- pg_offset = (idx * 4) % PAGE_SIZE;
-
- if (ibc->kpage_idx[0] == pg_idx)
- return ibc->kpage[0][pg_offset/4];
- if (ibc->kpage_idx[1] == pg_idx)
- return ibc->kpage[1][pg_offset/4];
-
- new_page = radeon_cs_update_pages(p, pg_idx);
- if (new_page < 0) {
- p->parser_error = new_page;
- return 0;
- }
-
- idx_value = ibc->kpage[new_page][pg_offset/4];
- return idx_value;
-}
-
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, unsigned size)
{
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 25f9eef..e95c5e6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
- return r;
- }
-
r = radeon_ib_pool_start(rdev);
if (r)
return r;
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3277dde..159b6a4 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
- return r;
- }
-
r = radeon_ib_pool_start(rdev);
if (r)
return r;
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c2f473b..b4f51c5 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ if (rdev->family == CHIP_RV740)
+ WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask;
- u32 enabled_backends_count;
- u32 cur_pipe;
- u32 swizzle_pipe[R7XX_MAX_PIPES];
- u32 cur_backend;
- u32 i;
- bool force_no_swizzle;
-
- if (num_tile_pipes > R7XX_MAX_PIPES)
- num_tile_pipes = R7XX_MAX_PIPES;
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_backends > R7XX_MAX_BACKENDS)
- num_backends = R7XX_MAX_BACKENDS;
- if (num_backends < 1)
- num_backends = 1;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
- if (((backend_disable_mask >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends)
- break;
- }
-
- if (enabled_backends_count == 0) {
- enabled_backends_mask = 1;
- enabled_backends_count = 1;
- }
-
- if (enabled_backends_count != num_backends)
- num_backends = enabled_backends_count;
-
- switch (rdev->family) {
- case CHIP_RV770:
- case CHIP_RV730:
- force_no_swizzle = false;
- break;
- case CHIP_RV710:
- case CHIP_RV740:
- default:
- force_no_swizzle = true;
- break;
- }
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
- switch (num_tile_pipes) {
- case 1:
- swizzle_pipe[0] = 0;
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 3:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- }
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
- }
- break;
- case 5:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- }
- break;
- case 7:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
- }
- break;
- }
-
- cur_backend = 0;
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
-
- backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
-
- cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
u32 sq_thread_resource_mgmt;
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
- u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
- u32 db_debug4;
+ u32 db_debug4, tmp;
+ u32 inactive_pipes, shader_pipe_config;
+ u32 disabled_rb_mask;
+ unsigned active_number;
/* setup chip specs */
+ rdev->config.rv770.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_RV770:
rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev)
/* setup tiling, simd, pipe config */
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+ shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
+ inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
+ for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
+ if (!(inactive_pipes & tmp)) {
+ active_number++;
+ }
+ tmp <<= 1;
+ }
+ if (active_number == 1) {
+ WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
+ } else {
+ WREG32(SPI_CONFIG_CNTL, 0);
+ }
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
+ if (tmp < rdev->config.rv770.max_backends) {
+ rdev->config.rv770.max_backends = tmp;
+ }
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
+ if (tmp < rdev->config.rv770.max_pipes) {
+ rdev->config.rv770.max_pipes = tmp;
+ }
+ tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
+ if (tmp < rdev->config.rv770.max_simds) {
+ rdev->config.rv770.max_simds = tmp;
+ }
+
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
default:
- gb_tiling_config |= PIPE_TILING(0);
+ gb_tiling_config = PIPE_TILING(0);
break;
case 2:
- gb_tiling_config |= PIPE_TILING(1);
+ gb_tiling_config = PIPE_TILING(1);
break;
case 4:
- gb_tiling_config |= PIPE_TILING(2);
+ gb_tiling_config = PIPE_TILING(2);
break;
case 8:
- gb_tiling_config |= PIPE_TILING(3);
+ gb_tiling_config = PIPE_TILING(3);
break;
}
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
+ disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
+ tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
+ tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
+ R7XX_MAX_BACKENDS, disabled_rb_mask);
+ gb_tiling_config |= tmp << 16;
+ rdev->config.rv770.backend_map = tmp;
+
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
- else
- gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+ else {
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ gb_tiling_config |= BANK_TILING(1);
+ else
+ gb_tiling_config |= BANK_TILING(0);
+ }
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
- if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
- rdev->config.rv770.tiling_group_size = 512;
- else
- rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
}
gb_tiling_config |= BANK_SWAPS(1);
-
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
- cc_rb_backend_disable |=
- BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
-
- cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
- cc_gc_shader_pipe_config |=
- INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
- cc_gc_shader_pipe_config |=
- INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
-
- if (rdev->family == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(rdev,
- rdev->config.rv770.max_tile_pipes,
- (R7XX_MAX_BACKENDS -
- r600_count_pipe_bits((cc_rb_backend_disable &
- R7XX_MAX_BACKENDS_MASK) >> 16)),
- (cc_rb_backend_disable >> 16));
-
rdev->config.rv770.tile_config = gb_tiling_config;
- rdev->config.rv770.backend_map = backend_map;
- gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
-
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
- num_qd_pipes =
- R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
+
+ num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -776,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
ACK_FLUSH_CTL(3) |
SYNC_FLUSH_CTL));
+ if (rdev->family != CHIP_RV770)
+ WREG32(SMX_SAR_CTL0, 0x00003f3f);
+
db_debug3 = RREG32(DB_DEBUG3);
db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
switch (rdev->family) {
@@ -809,8 +652,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(VGT_NUM_INSTANCES, 1);
- WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
-
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
WREG32(CP_PERFMON_CNTL, 0);
@@ -954,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
-
+ WREG32(VC_ENHANCE, 0);
}
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
@@ -1118,6 +959,12 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
@@ -1140,12 +987,6 @@ int rv770_resume(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "radeon: audio init failed\n");
- return r;
- }
-
return r;
}
@@ -1254,12 +1095,6 @@ int rv770_init(struct radeon_device *rdev)
rdev->accel_working = false;
}
- r = r600_audio_init(rdev);
- if (r) {
- dev_err(rdev->dev, "radeon: audio init failed\n");
- return r;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9c549f7..b0adfc5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -106,10 +106,13 @@
#define BACKEND_MAP(x) ((x) << 16)
#define GB_TILING_CONFIG 0x98F0
+#define PIPE_TILING__SHIFT 1
+#define PIPE_TILING__MASK 0x0000000e
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
+#define INACTIVE_QD_PIPES_SHIFT 8
#define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000
@@ -174,6 +177,7 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
+#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
@@ -207,6 +211,7 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
+#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define CACHE_DEPTH(x) ((x) << 1)
@@ -306,6 +311,8 @@
#define TCP_CNTL 0x9610
#define TCP_CHAN_STEER 0x9614
+#define VC_ENHANCE 0x9714
+
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
#define VC_ONLY 0
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 549732e..0b02792 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
- u32 num_tile_pipes,
- u32 num_backends_per_asic,
- u32 *backend_disable_mask_per_asic,
- u32 num_shader_engines)
-{
- u32 backend_map = 0;
- u32 enabled_backends_mask = 0;
- u32 enabled_backends_count = 0;
- u32 num_backends_per_se;
- u32 cur_pipe;
- u32 swizzle_pipe[SI_MAX_PIPES];
- u32 cur_backend = 0;
- u32 i;
- bool force_no_swizzle;
-
- /* force legal values */
- if (num_tile_pipes < 1)
- num_tile_pipes = 1;
- if (num_tile_pipes > rdev->config.si.max_tile_pipes)
- num_tile_pipes = rdev->config.si.max_tile_pipes;
- if (num_shader_engines < 1)
- num_shader_engines = 1;
- if (num_shader_engines > rdev->config.si.max_shader_engines)
- num_shader_engines = rdev->config.si.max_shader_engines;
- if (num_backends_per_asic < num_shader_engines)
- num_backends_per_asic = num_shader_engines;
- if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
- num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
-
- /* make sure we have the same number of backends per se */
- num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
- /* set up the number of backends per se */
- num_backends_per_se = num_backends_per_asic / num_shader_engines;
- if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
- num_backends_per_se = rdev->config.si.max_backends_per_se;
- num_backends_per_asic = num_backends_per_se * num_shader_engines;
- }
-
- /* create enable mask and count for enabled backends */
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
- enabled_backends_mask |= (1 << i);
- ++enabled_backends_count;
- }
- if (enabled_backends_count == num_backends_per_asic)
- break;
- }
-
- /* force the backends mask to match the current number of backends */
- if (enabled_backends_count != num_backends_per_asic) {
- u32 this_backend_enabled;
- u32 shader_engine;
- u32 backend_per_se;
-
- enabled_backends_mask = 0;
- enabled_backends_count = 0;
- *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- /* calc the current se */
- shader_engine = i / rdev->config.si.max_backends_per_se;
- /* calc the backend per se */
- backend_per_se = i % rdev->config.si.max_backends_per_se;
- /* default to not enabled */
- this_backend_enabled = 0;
- if ((shader_engine < num_shader_engines) &&
- (backend_per_se < num_backends_per_se))
- this_backend_enabled = 1;
- if (this_backend_enabled) {
- enabled_backends_mask |= (1 << i);
- *backend_disable_mask_per_asic &= ~(1 << i);
- ++enabled_backends_count;
- }
- }
- }
-
-
- memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
- switch (rdev->family) {
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- force_no_swizzle = true;
- break;
- default:
- force_no_swizzle = false;
- break;
- }
- if (force_no_swizzle) {
- bool last_backend_enabled = false;
-
- force_no_swizzle = false;
- for (i = 0; i < SI_MAX_BACKENDS; ++i) {
- if (((enabled_backends_mask >> i) & 1) == 1) {
- if (last_backend_enabled)
- force_no_swizzle = true;
- last_backend_enabled = true;
- } else
- last_backend_enabled = false;
- }
- }
-
- switch (num_tile_pipes) {
- case 1:
- case 3:
- case 5:
- case 7:
- DRM_ERROR("odd number of pipes!\n");
- break;
- case 2:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- break;
- case 4:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
- swizzle_pipe[3] = 3;
- }
- break;
- case 6:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 5;
- }
- break;
- case 8:
- if (force_no_swizzle) {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 1;
- swizzle_pipe[2] = 2;
- swizzle_pipe[3] = 3;
- swizzle_pipe[4] = 4;
- swizzle_pipe[5] = 5;
- swizzle_pipe[6] = 6;
- swizzle_pipe[7] = 7;
- } else {
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 1;
- swizzle_pipe[5] = 3;
- swizzle_pipe[6] = 5;
- swizzle_pipe[7] = 7;
- }
- break;
- }
-
- for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
- while (((1 << cur_backend) & enabled_backends_mask) == 0)
- cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
-
- backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
-
- cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
- }
-
- return backend_map;
-}
-
-static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
- u32 disable_mask_per_se,
- u32 max_disable_mask_per_se,
- u32 num_shader_engines)
-{
- u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
- u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
-
- if (num_shader_engines == 1)
- return disable_mask_per_asic;
- else if (num_shader_engines == 2)
- return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
- else
- return 0xffffffff;
-}
-
static void si_tiling_mode_table_init(struct radeon_device *rdev)
{
const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
}
+static void si_select_se_sh(struct radeon_device *rdev,
+ u32 se_num, u32 sh_num)
+{
+ u32 data = INSTANCE_BROADCAST_WRITES;
+
+ if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
+ data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
+ else if (se_num == 0xffffffff)
+ data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
+ else if (sh_num == 0xffffffff)
+ data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
+ else
+ data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
+ WREG32(GRBM_GFX_INDEX, data);
+}
+
+static u32 si_create_bitmask(u32 bit_width)
+{
+ u32 i, mask = 0;
+
+ for (i = 0; i < bit_width; i++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+ return mask;
+}
+
+static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ if (data & 1)
+ data &= INACTIVE_CUS_MASK;
+ else
+ data = 0;
+ data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+
+ data >>= INACTIVE_CUS_SHIFT;
+
+ mask = si_create_bitmask(cu_per_sh);
+
+ return ~data & mask;
+}
+
+static void si_setup_spi(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+ u32 cu_per_sh)
+{
+ int i, j, k;
+ u32 data, mask, active_cu;
+
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+ data = RREG32(SPI_STATIC_THREAD_MGMT_3);
+ active_cu = si_get_cu_enabled(rdev, cu_per_sh);
+
+ mask = 1;
+ for (k = 0; k < 16; k++) {
+ mask <<= k;
+ if (active_cu & mask) {
+ data &= ~mask;
+ WREG32(SPI_STATIC_THREAD_MGMT_3, data);
+ break;
+ }
+ }
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
+static u32 si_get_rb_disabled(struct radeon_device *rdev,
+ u32 max_rb_num, u32 se_num,
+ u32 sh_per_se)
+{
+ u32 data, mask;
+
+ data = RREG32(CC_RB_BACKEND_DISABLE);
+ if (data & 1)
+ data &= BACKEND_DISABLE_MASK;
+ else
+ data = 0;
+ data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
+
+ data >>= BACKEND_DISABLE_SHIFT;
+
+ mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+
+ return data & mask;
+}
+
+static void si_setup_rb(struct radeon_device *rdev,
+ u32 se_num, u32 sh_per_se,
+ u32 max_rb_num)
+{
+ int i, j;
+ u32 data, mask;
+ u32 disabled_rbs = 0;
+ u32 enabled_rbs = 0;
+
+ for (i = 0; i < se_num; i++) {
+ for (j = 0; j < sh_per_se; j++) {
+ si_select_se_sh(rdev, i, j);
+ data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
+ }
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ mask = 1;
+ for (i = 0; i < max_rb_num; i++) {
+ if (!(disabled_rbs & mask))
+ enabled_rbs |= mask;
+ mask <<= 1;
+ }
+
+ for (i = 0; i < se_num; i++) {
+ si_select_se_sh(rdev, i, 0xffffffff);
+ data = 0;
+ for (j = 0; j < sh_per_se; j++) {
+ switch (enabled_rbs & 3) {
+ case 1:
+ data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
+ break;
+ case 2:
+ data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
+ break;
+ case 3:
+ default:
+ data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
+ break;
+ }
+ enabled_rbs >>= 2;
+ }
+ WREG32(PA_SC_RASTER_CONFIG, data);
+ }
+ si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+}
+
static void si_gpu_init(struct radeon_device *rdev)
{
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_array_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
- u32 gb_backend_map;
- u32 cgts_tcc_disable;
u32 sx_debug_1;
- u32 gc_user_shader_array_config;
- u32 gc_user_rb_backend_disable;
- u32 cgts_user_tcc_disable;
u32 hdp_host_path_cntl;
u32 tmp;
int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
switch (rdev->family) {
case CHIP_TAHITI:
rdev->config.si.max_shader_engines = 2;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 12;
- rdev->config.si.max_simds_per_se = 8;
+ rdev->config.si.max_cu_per_sh = 8;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 12;
rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x100;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PITCAIRN:
rdev->config.si.max_shader_engines = 2;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 8;
- rdev->config.si.max_simds_per_se = 5;
+ rdev->config.si.max_cu_per_sh = 5;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 8;
rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x100;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_VERDE:
default:
rdev->config.si.max_shader_engines = 1;
- rdev->config.si.max_pipes_per_simd = 4;
rdev->config.si.max_tile_pipes = 4;
- rdev->config.si.max_simds_per_se = 2;
+ rdev->config.si.max_cu_per_sh = 2;
+ rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 4;
rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.sc_prim_fifo_size_backend = 0x40;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
}
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
- cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
- cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
- cgts_tcc_disable = 0xffff0000;
- for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
- cgts_tcc_disable &= ~(1 << (16 + i));
- gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
- gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
- cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
-
- rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
- tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
- tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
- rdev->config.si.backend_disable_mask_per_asic =
- si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
- rdev->config.si.num_shader_engines);
- rdev->config.si.backend_map =
- si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
- rdev->config.si.num_backends_per_se *
- rdev->config.si.num_shader_engines,
- &rdev->config.si.backend_disable_mask_per_asic,
- rdev->config.si.num_shader_engines);
- tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
- rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
rdev->config.si.mem_max_burst_length_bytes = 256;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.num_gpus = 1;
rdev->config.si.multi_gpu_tile_size = 64;
- gb_addr_config = 0;
- switch (rdev->config.si.num_tile_pipes) {
- case 1:
- gb_addr_config |= NUM_PIPES(0);
- break;
- case 2:
- gb_addr_config |= NUM_PIPES(1);
- break;
- case 4:
- gb_addr_config |= NUM_PIPES(2);
- break;
- case 8:
- default:
- gb_addr_config |= NUM_PIPES(3);
- break;
- }
-
- tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
- gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
- gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
- tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
- gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
- switch (rdev->config.si.num_gpus) {
- case 1:
- default:
- gb_addr_config |= NUM_GPUS(0);
- break;
- case 2:
- gb_addr_config |= NUM_GPUS(1);
- break;
- case 4:
- gb_addr_config |= NUM_GPUS(2);
- break;
- }
- switch (rdev->config.si.multi_gpu_tile_size) {
- case 16:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
- break;
- case 32:
- default:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
- break;
- case 64:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
- break;
- case 128:
- gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
- break;
- }
+ /* fix up row size */
+ gb_addr_config &= ~ROW_SIZE_MASK;
switch (rdev->config.si.mem_row_size_in_kb) {
case 1:
default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
break;
}
- tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
- rdev->config.si.num_tile_pipes = (1 << tmp);
- tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
- rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
- tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
- rdev->config.si.num_shader_engines = tmp + 1;
- tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
- rdev->config.si.num_gpus = tmp + 1;
- tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
- rdev->config.si.multi_gpu_tile_size = 1 << tmp;
- tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
- rdev->config.si.mem_row_size_in_kb = 1 << tmp;
-
- gb_backend_map =
- si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
- rdev->config.si.num_backends_per_se *
- rdev->config.si.num_shader_engines,
- &rdev->config.si.backend_disable_mask_per_asic,
- rdev->config.si.num_shader_engines);
-
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
rdev->config.si.tile_config |= (3 << 0);
break;
}
- rdev->config.si.tile_config |=
- ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
+ if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
+ rdev->config.si.tile_config |= 1 << 4;
+ else
+ rdev->config.si.tile_config |= 0 << 4;
rdev->config.si.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.si.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
- rdev->config.si.backend_map = gb_backend_map;
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
- /* primary versions */
- WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
-
- WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
+ si_tiling_mode_table_init(rdev);
- /* user versions */
- WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
+ si_setup_rb(rdev, rdev->config.si.max_shader_engines,
+ rdev->config.si.max_sh_per_se,
+ rdev->config.si.max_backends_per_se);
- WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
+ si_setup_spi(rdev, rdev->config.si.max_shader_engines,
+ rdev->config.si.max_sh_per_se,
+ rdev->config.si.max_cu_per_sh);
- si_tiling_mode_table_init(rdev);
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
@@ -2518,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);
/* empty context1-15 */
- /* FIXME start with 1G, once using 2 level pt switch to full
+ /* FIXME start with 4G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
index eda938a..501f9d43 100644
--- a/drivers/gpu/drm/radeon/si_reg.h
+++ b/drivers/gpu/drm/radeon/si_reg.h
@@ -30,4 +30,76 @@
#define SI_DC_GPIO_HPD_EN 0x65b8
#define SI_DC_GPIO_HPD_Y 0x65bc
+#define SI_GRPH_CONTROL 0x6804
+# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define SI_GRPH_DEPTH_8BPP 0
+# define SI_GRPH_DEPTH_16BPP 1
+# define SI_GRPH_DEPTH_32BPP 2
+# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define SI_ADDR_SURF_2_BANK 0
+# define SI_ADDR_SURF_4_BANK 1
+# define SI_ADDR_SURF_8_BANK 2
+# define SI_ADDR_SURF_16_BANK 3
+# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
+# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define SI_ADDR_SURF_BANK_WIDTH_1 0
+# define SI_ADDR_SURF_BANK_WIDTH_2 1
+# define SI_ADDR_SURF_BANK_WIDTH_4 2
+# define SI_ADDR_SURF_BANK_WIDTH_8 3
+# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define SI_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define SI_GRPH_FORMAT_ARGB1555 0
+# define SI_GRPH_FORMAT_ARGB565 1
+# define SI_GRPH_FORMAT_ARGB4444 2
+# define SI_GRPH_FORMAT_AI88 3
+# define SI_GRPH_FORMAT_MONO16 4
+# define SI_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define SI_GRPH_FORMAT_ARGB8888 0
+# define SI_GRPH_FORMAT_ARGB2101010 1
+# define SI_GRPH_FORMAT_32BPP_DIG 2
+# define SI_GRPH_FORMAT_8B_ARGB2101010 3
+# define SI_GRPH_FORMAT_BGRA1010102 4
+# define SI_GRPH_FORMAT_8B_BGRA1010102 5
+# define SI_GRPH_FORMAT_RGB111110 6
+# define SI_GRPH_FORMAT_BGR101111 7
+# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define SI_ADDR_SURF_BANK_HEIGHT_1 0
+# define SI_ADDR_SURF_BANK_HEIGHT_2 1
+# define SI_ADDR_SURF_BANK_HEIGHT_4 2
+# define SI_ADDR_SURF_BANK_HEIGHT_8 3
+# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define SI_ADDR_SURF_TILE_SPLIT_64B 0
+# define SI_ADDR_SURF_TILE_SPLIT_128B 1
+# define SI_ADDR_SURF_TILE_SPLIT_256B 2
+# define SI_ADDR_SURF_TILE_SPLIT_512B 3
+# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
+# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
+# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
+# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
+# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
+# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
+# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
+# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
+# define SI_ADDR_SURF_P2 0
+# define SI_ADDR_SURF_P4_8x16 4
+# define SI_ADDR_SURF_P4_16x16 5
+# define SI_ADDR_SURF_P4_16x32 6
+# define SI_ADDR_SURF_P4_32x32 7
+# define SI_ADDR_SURF_P8_16x16_8x16 8
+# define SI_ADDR_SURF_P8_16x32_8x16 9
+# define SI_ADDR_SURF_P8_32x32_8x16 10
+# define SI_ADDR_SURF_P8_16x32_16x16 11
+# define SI_ADDR_SURF_P8_32x32_16x16 12
+# define SI_ADDR_SURF_P8_32x32_16x32 13
+# define SI_ADDR_SURF_P8_32x64_32x32 14
+
#endif
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 53ea2c4..db40679 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -24,6 +24,11 @@
#ifndef SI_H
#define SI_H
+#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
+
+#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
+#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
+
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -408,6 +413,12 @@
#define SOFT_RESET_IA (1 << 15)
#define GRBM_GFX_INDEX 0x802C
+#define INSTANCE_INDEX(x) ((x) << 0)
+#define SH_INDEX(x) ((x) << 8)
+#define SE_INDEX(x) ((x) << 16)
+#define SH_BROADCAST_WRITES (1 << 29)
+#define INSTANCE_BROADCAST_WRITES (1 << 30)
+#define SE_BROADCAST_WRITES (1 << 31)
#define GRBM_INT_CNTL 0x8060
# define RDERR_INT_ENABLE (1 << 0)
@@ -480,6 +491,8 @@
#define VGT_TF_MEMORY_BASE 0x89B8
#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
+#define INACTIVE_CUS_MASK 0xFFFF0000
+#define INACTIVE_CUS_SHIFT 16
#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
#define PA_CL_ENHANCE 0x8A14
@@ -688,6 +701,12 @@
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348
+#define PA_SC_RASTER_CONFIG 0x28350
+# define RASTER_CONFIG_RB_MAP_0 0
+# define RASTER_CONFIG_RB_MAP_1 1
+# define RASTER_CONFIG_RB_MAP_2 2
+# define RASTER_CONFIG_RB_MAP_3 3
+
#define VGT_EVENT_INITIATOR 0x28a90
# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 30d98d1..dd14cd1 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
+ idr_init(&dev_priv->object_idr);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- idr_init(&dev->object_name_idr);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 36792bd..36f4b28 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
(*destroy)(bo);
else
kfree(bo);
+ ttm_mem_global_free(mem_glob, acc_size);
return -EINVAL;
}
bo->destroy = destroy;
@@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
struct ttm_buffer_object **p_bo)
{
struct ttm_buffer_object *bo;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size;
int ret;
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (unlikely(ret != 0))
- return ret;
-
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
-
- if (unlikely(bo == NULL)) {
- ttm_mem_global_free(mem_glob, acc_size);
+ if (unlikely(bo == NULL))
return -ENOMEM;
- }
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
buffer_start, interruptible,
persistent_swap_storage, acc_size, NULL, NULL);
@@ -1834,6 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_unlock(&glob->lru_lock);
(void) ttm_bo_cleanup_refs(bo, false, false, false);
kref_put(&bo->list_kref, ttm_bo_release_list);
+ spin_lock(&glob->lru_lock);
continue;
}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 4d02c46..6e52069 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -13,8 +13,21 @@
static struct drm_driver driver;
+/*
+ * There are many DisplayLink-based graphics products, all with unique PIDs.
+ * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
+ * We also require a match on SubClass (0x00) and Protocol (0x00),
+ * which is compatible with all known USB 2.0 era graphics chips and firmware,
+ * but allows DisplayLink to increment those for any future incompatible chips
+ */
static struct usb_device_id id_table[] = {
- {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
+ {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0x00,
+ .bInterfaceProtocol = 0x00,
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS |
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index a029ee3..ce9a611 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -156,8 +156,17 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
if (!fb->active_16)
return 0;
- if (!fb->obj->vmapping)
- udl_gem_vmap(fb->obj);
+ if (!fb->obj->vmapping) {
+ ret = udl_gem_vmap(fb->obj);
+ if (ret == -ENOMEM) {
+ DRM_ERROR("failed to vmap fb\n");
+ return 0;
+ }
+ if (!fb->obj->vmapping) {
+ DRM_ERROR("failed to vmapping\n");
+ return 0;
+ }
+ }
start_cycles = get_cycles();
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 40efd32..7bd65bd 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -180,6 +180,18 @@ int udl_gem_vmap(struct udl_gem_object *obj)
int page_count = obj->base.size / PAGE_SIZE;
int ret;
+ if (obj->base.import_attach) {
+ ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
+ 0, obj->base.size, DMA_BIDIRECTIONAL);
+ if (ret)
+ return -EINVAL;
+
+ obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
+ if (!obj->vmapping)
+ return -ENOMEM;
+ return 0;
+ }
+
ret = udl_gem_get_pages(obj, GFP_KERNEL);
if (ret)
return ret;
@@ -192,6 +204,13 @@ int udl_gem_vmap(struct udl_gem_object *obj)
void udl_gem_vunmap(struct udl_gem_object *obj)
{
+ if (obj->base.import_attach) {
+ dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
+ dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
+ obj->base.size, DMA_BIDIRECTIONAL);
+ return;
+ }
+
if (obj->vmapping)
vunmap(obj->vmapping);
@@ -202,12 +221,12 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
{
struct udl_gem_object *obj = to_udl_bo(gem_obj);
- if (gem_obj->import_attach)
- drm_prime_gem_destroy(gem_obj, obj->sg);
-
if (obj->vmapping)
udl_gem_vunmap(obj);
+ if (gem_obj->import_attach)
+ drm_prime_gem_destroy(gem_obj, obj->sg);
+
if (obj->pages)
udl_gem_put_pages(obj);
@@ -234,7 +253,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
- return ret;
+ goto out;
if (!gobj->base.map_list.map) {
ret = drm_gem_create_mmap_offset(obj);
if (ret)
@@ -257,8 +276,6 @@ static int udl_prime_create(struct drm_device *dev,
{
struct udl_gem_object *obj;
int npages;
- int i;
- struct scatterlist *iter;
npages = size / PAGE_SIZE;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index a8d5f09..4c2d836 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -61,7 +61,7 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
u8 length;
u16 key;
- key = *((u16 *) desc);
+ key = le16_to_cpu(*((u16 *) desc));
desc += sizeof(u16);
length = *desc;
desc++;
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 1f18225..c126182 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv == NULL)
return -ENOMEM;
+ idr_init(&dev_priv->object_idr);
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- idr_init(&dev->object_name_idr);
-
pci_set_master(dev->pdev);
ret = drm_vblank_init(dev, 1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 51c9ba5..21ee782 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
cmd += sizeof(remap_cmd) / sizeof(uint32);
for (i = 0; i < num_pages; ++i) {
- if (VMW_PPN_SIZE > 4)
+ if (VMW_PPN_SIZE <= 4)
*cmd = page_to_pfn(*pages++);
else
*((uint64_t *)cmd) = page_to_pfn(*pages++);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 38f9534..5b3c7d1 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -190,6 +190,19 @@ find_active_client(struct list_head *head)
return NULL;
}
+int vga_switcheroo_get_client_state(struct pci_dev *pdev)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (!client)
+ return VGA_SWITCHEROO_NOT_FOUND;
+ if (!vgasr_priv.active)
+ return VGA_SWITCHEROO_INIT;
+ return client->pwr_state;
+}
+EXPORT_SYMBOL(vga_switcheroo_get_client_state);
+
void vga_switcheroo_unregister_client(struct pci_dev *pdev)
{
struct vga_switcheroo_client *client;
@@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
vga_switchon(new_client);
vga_set_default_device(new_client->pdev);
- set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
-
return 0;
}
@@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
active->active = false;
+ set_audio_state(active->id, VGA_SWITCHEROO_OFF);
+
if (new_client->fb_info) {
struct fb_event event;
event.info = new_client->fb_info;
@@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (new_client->ops->reprobe)
new_client->ops->reprobe(new_client->pdev);
- set_audio_state(active->id, VGA_SWITCHEROO_OFF);
-
if (active->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(active);
+ set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+
new_client->active = true;
return 0;
}
@@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
/* pwr off the device not in use */
if (strncmp(usercmd, "OFF", 3) == 0) {
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (client->active)
+ if (client->active || client_is_audio(client))
continue;
+ set_audio_state(client->id, VGA_SWITCHEROO_OFF);
if (client->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(client);
}
@@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
/* pwr on the device not in use */
if (strncmp(usercmd, "ON", 2) == 0) {
list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (client->active)
+ if (client->active || client_is_audio(client))
continue;
if (client->pwr_state == VGA_SWITCHEROO_OFF)
vga_switchon(client);
+ set_audio_state(client->id, VGA_SWITCHEROO_ON);
}
goto out;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 034c80a..fbf4950 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1,20 +1,11 @@
#
# HID driver configuration
#
-menuconfig HID_SUPPORT
- bool "HID Devices"
- depends on INPUT
- default y
- ---help---
- Say Y here to get to see options for various computer-human interface
- device drivers. This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if HID_SUPPORT
+menu "HID support"
+ depends on INPUT
config HID
- tristate "Generic HID support"
+ tristate "HID bus support"
depends on INPUT
default y
---help---
@@ -23,14 +14,17 @@ config HID
most commonly used to refer to the USB-HID specification, but other
devices (such as, but not strictly limited to, Bluetooth) are
designed using HID specification (this involves certain keyboards,
- mice, tablets, etc). This option compiles into kernel the generic
- HID layer code (parser, usages, etc.), which can then be used by
- transport-specific HID implementation (like USB or Bluetooth).
+ mice, tablets, etc). This option adds the HID bus to the kernel,
+ together with generic HID layer code. The HID devices are added and
+ removed from the HID bus by the transport-layer drivers, such as
+ usbhid (USB_HID) and hidp (BT_HIDP).
For docs and specs, see http://www.usb.org/developers/hidpage/
If unsure, say Y.
+if HID
+
config HID_BATTERY_STRENGTH
bool "Battery level reporting for HID devices"
depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
@@ -59,23 +53,43 @@ config HIDRAW
If unsure, say Y.
-source "drivers/hid/usbhid/Kconfig"
-
-menu "Special HID drivers"
+config UHID
+ tristate "User-space I/O driver support for HID subsystem"
depends on HID
+ default n
+ ---help---
+ Say Y here if you want to provide HID I/O Drivers from user-space.
+ This allows to write I/O drivers in user-space and feed the data from
+ the device into the kernel. The kernel parses the HID reports, loads the
+ corresponding HID Device Driver or provides input devices on top of your
+ user-space device.
+
+ This driver cannot be used to parse HID-reports in user-space and write
+ special HID-drivers. You should use hidraw for that.
+ Instead, this driver allows to write the transport-layer driver in
+ user-space like USB-HID and Bluetooth-HID do in kernel-space.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called uhid.
config HID_GENERIC
tristate "Generic HID driver"
depends on HID
- default y
+ default HID
---help---
- Support for generic HID devices.
+ Support for generic devices on the HID bus. This includes most
+ keyboards and mice, joysticks, tablets and digitizers.
To compile this driver as a module, choose M here: the module
will be called hid-generic.
If unsure, say Y.
+menu "Special HID drivers"
+ depends on HID
+
config HID_A4TECH
tristate "A4 tech mice" if EXPERT
depends on USB_HID
@@ -200,10 +214,12 @@ config HID_EZKEY
Support for Ezkey BTC 8193 keyboard.
config HID_HOLTEK
- tristate "Holtek On Line Grip based game controller support"
+ tristate "Holtek HID devices"
depends on USB_HID
---help---
- Say Y here if you have a Holtek On Line Grip based game controller.
+ Support for Holtek based devices:
+ - Holtek On Line Grip based game controller
+ - Trust GXT 18 Gaming Keyboard
config HOLTEK_FF
bool "Holtek On Line Grip force feedback support"
@@ -268,6 +284,19 @@ config HID_LCPOWER
---help---
Support for LC-Power RC1000MCE RF remote control.
+config HID_LENOVO_TPKBD
+ tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
+ depends on USB_HID
+ select NEW_LEDS
+ select LEDS_CLASS
+ ---help---
+ Support for the Lenovo ThinkPad USB Keyboard with TrackPoint.
+
+ Say Y here if you have a Lenovo ThinkPad USB Keyboard with TrackPoint
+ and would like to use device-specific features like changing the
+ sensitivity of the trackpoint, using the microphone mute button or
+ controlling the mute and microphone mute LEDs.
+
config HID_LOGITECH
tristate "Logitech devices" if EXPERT
depends on USB_HID
@@ -393,6 +422,7 @@ config HID_MULTITOUCH
- Unitec Panels
- XAT optical touch panels
- Xiroku optical touch panels
+ - Zytronic touch panels
If unsure, say N.
@@ -662,4 +692,8 @@ config HID_ZYDACRON
endmenu
-endif # HID_SUPPORT
+endif # HID
+
+source "drivers/hid/usbhid/Kconfig"
+
+endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index ca6cc9f..f975485 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -8,6 +8,7 @@ ifdef CONFIG_DEBUG_FS
endif
obj-$(CONFIG_HID) += hid.o
+obj-$(CONFIG_UHID) += uhid.o
obj-$(CONFIG_HID_GENERIC) += hid-generic.o
@@ -48,12 +49,14 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
+obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
+obj-$(CONFIG_HID_LENOVO_TPKBD) += hid-lenovo-tpkbd.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
@@ -69,7 +72,8 @@ obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
- hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o
+ hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o \
+ hid-roccat-savu.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index fa10f84..585344b 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -517,6 +517,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index b99af34..a2abb8e 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ }
};
MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 8e3a6b2..500844f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1194,8 +1194,10 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
goto out;
}
- for (a = 0; a < report->maxfield; a++)
- hid_input_field(hid, report->field[a], cdata, interrupt);
+ if (hid->claimed != HID_CLAIMED_HIDRAW) {
+ for (a = 0; a < report->maxfield; a++)
+ hid_input_field(hid, report->field[a], cdata, interrupt);
+ }
if (hid->claimed & HID_CLAIMED_INPUT)
hidinput_report_event(hid, report);
@@ -1243,6 +1245,10 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
goto unlock;
}
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (list_empty(&hid->debug_list))
+ goto nomem;
+
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
if (!buf)
@@ -1373,8 +1379,10 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
hdev->claimed |= HID_CLAIMED_HIDRAW;
- if (!hdev->claimed) {
- hid_err(hdev, "claimed by neither input, hiddev nor hidraw\n");
+ /* Drivers with the ->raw_event callback set are not required to connect
+ * to any other listener. */
+ if (!hdev->claimed && !hdev->driver->raw_event) {
+ hid_err(hdev, "device has no listeners, quitting\n");
return -ENODEV;
}
@@ -1503,6 +1511,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1518,10 +1529,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
@@ -1536,6 +1549,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
@@ -1544,6 +1558,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1617,6 +1632,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
@@ -1880,6 +1896,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM)},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_LCM2)},
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
@@ -1994,6 +2011,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2088,6 +2106,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 2f0be4c..9e43aac 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
{ }
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
new file mode 100644
index 0000000..e0a5d17
--- /dev/null
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -0,0 +1,183 @@
+/*
+ * HID driver for Holtek keyboard
+ * Copyright (c) 2012 Tom Harwood
+*/
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+
+/* Holtek based keyboards (USB ID 04d9:a055) have the following issues:
+ * - The report descriptor specifies an excessively large number of consumer
+ * usages (2^15), which is more than HID_MAX_USAGES. This prevents proper
+ * parsing of the report descriptor.
+ * - The report descriptor reports on caps/scroll/num lock key presses, but
+ * doesn't have an LED output usage block.
+ *
+ * The replacement descriptor below fixes the number of consumer usages,
+ * and provides an LED output usage block. LED output events are redirected
+ * to the boot interface.
+ */
+
+static __u8 holtek_kbd_rdesc_fixed[] = {
+ /* Original report descriptor, with reduced number of consumer usages */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x80, /* Usage (Sys Control), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x19, 0x81, /* Usage Minimum (Sys Power Down), */
+ 0x29, 0x83, /* Usage Maximum (Sys Wake Up), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x05, /* Report Size (5), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0C, /* Usage Page (Consumer), */
+ 0x09, 0x01, /* Usage (Consumer Control), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x19, 0x00, /* Usage Minimum (00h), */
+ 0x2A, 0xFF, 0x2F, /* Usage Maximum (0x2FFF), previously 0x7FFF */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x2F, /* Logical Maximum (0x2FFF),previously 0x7FFF*/
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x81, 0x00, /* Input, */
+ 0xC0, /* End Collection, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x95, 0x38, /* Report Count (56), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
+ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
+ 0x19, 0x00, /* Usage Minimum (None), */
+ 0x29, 0x2F, /* Usage Maximum (KB Lboxbracket And Lbrace),*/
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x95, 0x38, /* Report Count (56), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0x30, /* Usage Minimum (KB Rboxbracket And Rbrace),*/
+ 0x29, 0x67, /* Usage Maximum (KP Equals), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection */
+
+ /* LED usage for the boot protocol interface */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x05, 0x08, /* Usage Page (LED), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x03, /* Usage Maximum (03h), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x95, 0x05, /* Report Count (5), */
+ 0x91, 0x01, /* Output (Constant), */
+ 0xC0, /* End Collection */
+};
+
+static __u8 *holtek_kbd_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ rdesc = holtek_kbd_rdesc_fixed;
+ *rsize = sizeof(holtek_kbd_rdesc_fixed);
+ }
+ return rdesc;
+}
+
+static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code,
+ int value)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct usb_device *usb_dev = hid_to_usb_dev(hid);
+
+ /* Locate the boot interface, to receive the LED change events */
+ struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+
+ struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
+ struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+ struct hid_input, list);
+
+ return boot_hid_input->input->event(boot_hid_input->input, type, code,
+ value);
+}
+
+static int holtek_kbd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ int ret = hid_parse(hdev);
+
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ struct hid_input *hidinput;
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+ hidinput->input->event = holtek_kbd_input_event;
+ }
+ }
+
+ return ret;
+}
+
+static const struct hid_device_id holtek_kbd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, holtek_kbd_devices);
+
+static struct hid_driver holtek_kbd_driver = {
+ .name = "holtek_kbd",
+ .id_table = holtek_kbd_devices,
+ .report_fixup = holtek_kbd_report_fixup,
+ .probe = holtek_kbd_probe
+};
+
+static int __init holtek_kbd_init(void)
+{
+ return hid_register_driver(&holtek_kbd_driver);
+}
+
+static void __exit holtek_kbd_exit(void)
+{
+ hid_unregister_driver(&holtek_kbd_driver);
+}
+
+module_exit(holtek_kbd_exit);
+module_init(holtek_kbd_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9373f53..41c34f2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -125,6 +125,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -160,6 +163,9 @@
#define USB_VENDOR_ID_AVERMEDIA 0x07ca
#define USB_DEVICE_ID_AVER_FM_MR800 0xb800
+#define USB_VENDOR_ID_AXENTIA 0x12cf
+#define USB_DEVICE_ID_AXENTIA_FM_RADIO 0x7111
+
#define USB_VENDOR_ID_BAANTO 0x2453
#define USB_DEVICE_ID_BAANTO_MT_190W2 0x0100
@@ -202,6 +208,7 @@
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
+#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
#define USB_VENDOR_ID_CHUNGHWAT 0x2247
#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
@@ -231,6 +238,7 @@
#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
#define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
+#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
@@ -404,6 +412,9 @@
#define USB_VENDOR_ID_HOLTEK 0x1241
#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015
+#define USB_VENDOR_ID_HOLTEK_ALT 0x04d9
+#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
+
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -473,6 +484,9 @@
#define USB_DEVICE_ID_LD_HYBRID 0x2090
#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0
+#define USB_VENDOR_ID_LENOVO 0x17ef
+#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
+
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
@@ -515,6 +529,9 @@
#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
+#define USB_VENDOR_ID_MADCATZ 0x0738
+#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
@@ -564,6 +581,9 @@
#define USB_VENDOR_ID_NINTENDO 0x057e
#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
+#define USB_VENDOR_ID_NOVATEK 0x0603
+#define USB_DEVICE_ID_NOVATEK_PCT 0x0600
+
#define USB_VENDOR_ID_NTRIG 0x1b96
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003
@@ -641,6 +661,7 @@
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
+#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
@@ -650,6 +671,9 @@
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
+#define USB_VENDOR_ID_SENNHEISER 0x1395
+#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
+
#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
@@ -799,6 +823,9 @@
#define USB_VENDOR_ID_ZYDACRON 0x13EC
#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006
+#define USB_VENDOR_ID_ZYTRONIC 0x14c8
+#define USB_DEVICE_ID_ZYTRONIC_ZXY100 0x0005
+
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 132b001..811bfad 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -301,6 +301,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{}
};
@@ -834,6 +837,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
break;
+ case HID_UP_HPVENDOR2:
+ set_bit(EV_REP, input->evbit);
+ switch (usage->hid & HID_USAGE) {
+ case 0x003: map_key_clear(KEY_BRIGHTNESSDOWN); break;
+ case 0x004: map_key_clear(KEY_BRIGHTNESSUP); break;
+ default: goto ignore;
+ }
+ break;
+
case HID_UP_MSVENDOR:
goto ignore;
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
new file mode 100644
index 0000000..77d2df0
--- /dev/null
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -0,0 +1,564 @@
+/*
+ * HID driver for Lenovo ThinkPad USB Keyboard with TrackPoint
+ *
+ * Copyright (c) 2012 Bernhard Seibold
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include "usbhid/usbhid.h"
+
+#include "hid-ids.h"
+
+/* This is only used for the trackpoint part of the driver, hence _tp */
+struct tpkbd_data_pointer {
+ int led_state;
+ struct led_classdev led_mute;
+ struct led_classdev led_micmute;
+ int press_to_select;
+ int dragging;
+ int release_to_select;
+ int select_right;
+ int sensitivity;
+ int press_speed;
+};
+
+#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int tpkbd_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ struct usbhid_device *uhdev;
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+ if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ map_key_clear(KEY_MICMUTE);
+ return 1;
+ }
+ return 0;
+}
+
+#undef map_key_clear
+
+static int tpkbd_features_set(struct hid_device *hdev)
+{
+ struct hid_report *report;
+ struct tpkbd_data_pointer *data_pointer;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4];
+
+ report->field[0]->value[0] = data_pointer->press_to_select ? 0x01 : 0x02;
+ report->field[0]->value[0] |= data_pointer->dragging ? 0x04 : 0x08;
+ report->field[0]->value[0] |= data_pointer->release_to_select ? 0x10 : 0x20;
+ report->field[0]->value[0] |= data_pointer->select_right ? 0x80 : 0x40;
+ report->field[1]->value[0] = 0x03; // unknown setting, imitate windows driver
+ report->field[2]->value[0] = data_pointer->sensitivity;
+ report->field[3]->value[0] = data_pointer->press_speed;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ return 0;
+}
+
+static ssize_t pointer_press_to_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
+}
+
+static ssize_t pointer_press_to_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->press_to_select = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_dragging_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
+}
+
+static ssize_t pointer_dragging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->dragging = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_release_to_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
+}
+
+static ssize_t pointer_release_to_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->release_to_select = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_select_right_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
+}
+
+static ssize_t pointer_select_right_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->select_right = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_sensitivity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ data_pointer->sensitivity);
+}
+
+static ssize_t pointer_sensitivity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ data_pointer->sensitivity = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_press_speed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ data_pointer->press_speed);
+}
+
+static ssize_t pointer_press_speed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ data_pointer->press_speed = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_pointer_press_to_select =
+ __ATTR(press_to_select, S_IWUSR | S_IRUGO,
+ pointer_press_to_select_show,
+ pointer_press_to_select_store);
+
+static struct device_attribute dev_attr_pointer_dragging =
+ __ATTR(dragging, S_IWUSR | S_IRUGO,
+ pointer_dragging_show,
+ pointer_dragging_store);
+
+static struct device_attribute dev_attr_pointer_release_to_select =
+ __ATTR(release_to_select, S_IWUSR | S_IRUGO,
+ pointer_release_to_select_show,
+ pointer_release_to_select_store);
+
+static struct device_attribute dev_attr_pointer_select_right =
+ __ATTR(select_right, S_IWUSR | S_IRUGO,
+ pointer_select_right_show,
+ pointer_select_right_store);
+
+static struct device_attribute dev_attr_pointer_sensitivity =
+ __ATTR(sensitivity, S_IWUSR | S_IRUGO,
+ pointer_sensitivity_show,
+ pointer_sensitivity_store);
+
+static struct device_attribute dev_attr_pointer_press_speed =
+ __ATTR(press_speed, S_IWUSR | S_IRUGO,
+ pointer_press_speed_show,
+ pointer_press_speed_store);
+
+static struct attribute *tpkbd_attributes_pointer[] = {
+ &dev_attr_pointer_press_to_select.attr,
+ &dev_attr_pointer_dragging.attr,
+ &dev_attr_pointer_release_to_select.attr,
+ &dev_attr_pointer_select_right.attr,
+ &dev_attr_pointer_sensitivity.attr,
+ &dev_attr_pointer_press_speed.attr,
+ NULL
+};
+
+static const struct attribute_group tpkbd_attr_group_pointer = {
+ .attrs = tpkbd_attributes_pointer,
+};
+
+static enum led_brightness tpkbd_led_brightness_get(
+ struct led_classdev *led_cdev)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int led_nr = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (led_cdev == &data_pointer->led_micmute)
+ led_nr = 1;
+
+ return data_pointer->led_state & (1 << led_nr)
+ ? LED_FULL
+ : LED_OFF;
+}
+
+static void tpkbd_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct hid_report *report;
+ struct tpkbd_data_pointer *data_pointer;
+ int led_nr = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (led_cdev == &data_pointer->led_micmute)
+ led_nr = 1;
+
+ if (value == LED_OFF)
+ data_pointer->led_state &= ~(1 << led_nr);
+ else
+ data_pointer->led_state |= 1 << led_nr;
+
+ report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3];
+ report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1;
+ report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1;
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+}
+
+static int tpkbd_probe_tp(struct hid_device *hdev)
+{
+ struct device *dev = &hdev->dev;
+ struct tpkbd_data_pointer *data_pointer;
+ size_t name_sz = strlen(dev_name(dev)) + 16;
+ char *name_mute, *name_micmute;
+ int ret;
+
+ if (sysfs_create_group(&hdev->dev.kobj,
+ &tpkbd_attr_group_pointer)) {
+ hid_warn(hdev, "Could not create sysfs group\n");
+ }
+
+ data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
+ if (data_pointer == NULL) {
+ hid_err(hdev, "Could not allocate memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ // set same default values as windows driver
+ data_pointer->sensitivity = 0xa0;
+ data_pointer->press_speed = 0x38;
+
+ name_mute = kzalloc(name_sz, GFP_KERNEL);
+ if (name_mute == NULL) {
+ hid_err(hdev, "Could not allocate memory for led data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
+
+ name_micmute = kzalloc(name_sz, GFP_KERNEL);
+ if (name_micmute == NULL) {
+ hid_err(hdev, "Could not allocate memory for led data\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+ snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
+
+ hid_set_drvdata(hdev, data_pointer);
+
+ data_pointer->led_mute.name = name_mute;
+ data_pointer->led_mute.brightness_get = tpkbd_led_brightness_get;
+ data_pointer->led_mute.brightness_set = tpkbd_led_brightness_set;
+ data_pointer->led_mute.dev = dev;
+ led_classdev_register(dev, &data_pointer->led_mute);
+
+ data_pointer->led_micmute.name = name_micmute;
+ data_pointer->led_micmute.brightness_get = tpkbd_led_brightness_get;
+ data_pointer->led_micmute.brightness_set = tpkbd_led_brightness_set;
+ data_pointer->led_micmute.dev = dev;
+ led_classdev_register(dev, &data_pointer->led_micmute);
+
+ tpkbd_features_set(hdev);
+
+ return 0;
+
+err2:
+ kfree(name_mute);
+err:
+ kfree(data_pointer);
+ return ret;
+}
+
+static int tpkbd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct usbhid_device *uhdev;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid_parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hid_hw_start failed\n");
+ goto err_free;
+ }
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+
+ if (uhdev->ifnum == 1)
+ return tpkbd_probe_tp(hdev);
+
+ return 0;
+err_free:
+ return ret;
+}
+
+static void tpkbd_remove_tp(struct hid_device *hdev)
+{
+ struct tpkbd_data_pointer *data_pointer;
+
+ sysfs_remove_group(&hdev->dev.kobj,
+ &tpkbd_attr_group_pointer);
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ led_classdev_unregister(&data_pointer->led_micmute);
+ led_classdev_unregister(&data_pointer->led_mute);
+
+ hid_set_drvdata(hdev, NULL);
+ kfree(data_pointer);
+}
+
+static void tpkbd_remove(struct hid_device *hdev)
+{
+ struct usbhid_device *uhdev;
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+ if (uhdev->ifnum == 1)
+ tpkbd_remove_tp(hdev);
+
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id tpkbd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, tpkbd_devices);
+
+static struct hid_driver tpkbd_driver = {
+ .name = "lenovo_tpkbd",
+ .id_table = tpkbd_devices,
+ .input_mapping = tpkbd_input_mapping,
+ .probe = tpkbd_probe,
+ .remove = tpkbd_remove,
+};
+
+static int __init tpkbd_init(void)
+{
+ return hid_register_driver(&tpkbd_driver);
+}
+
+static void __exit tpkbd_exit(void)
+{
+ hid_unregister_driver(&tpkbd_driver);
+}
+
+module_init(tpkbd_init);
+module_exit(tpkbd_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 5e8a7ed..0f9c146 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -436,27 +436,37 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
{
- struct dj_report dj_report;
+ struct dj_report *dj_report;
+ int retval;
- memset(&dj_report, 0, sizeof(dj_report));
- dj_report.report_id = REPORT_ID_DJ_SHORT;
- dj_report.device_index = 0xFF;
- dj_report.report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+ dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ if (!dj_report)
+ return -ENOMEM;
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+ kfree(dj_report);
+ return retval;
}
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
- struct dj_report dj_report;
+ struct dj_report *dj_report;
+ int retval;
- memset(&dj_report, 0, sizeof(dj_report));
- dj_report.report_id = REPORT_ID_DJ_SHORT;
- dj_report.device_index = 0xFF;
- dj_report.report_type = REPORT_TYPE_CMD_SWITCH;
- dj_report.report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
- dj_report.report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
- return logi_dj_recv_send_report(djrcv_dev, &dj_report);
+ dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ if (!dj_report)
+ return -ENOMEM;
+ dj_report->report_id = REPORT_ID_DJ_SHORT;
+ dj_report->device_index = 0xFF;
+ dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
+ dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
+ dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
+ retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
+ kfree(dj_report);
+ return retval;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 7cf3ffe..7364726 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -48,10 +49,6 @@ static bool scroll_acceleration = false;
module_param(scroll_acceleration, bool, 0644);
MODULE_PARM_DESC(scroll_acceleration, "Accelerate sequential scroll events");
-static bool report_touches = true;
-module_param(report_touches, bool, 0644);
-MODULE_PARM_DESC(report_touches, "Emit touch records (otherwise, only use them for emulation)");
-
static bool report_undeciphered;
module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
@@ -72,15 +69,6 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define SCROLL_ACCEL_DEFAULT 7
-/* Single touch emulation should only begin when no touches are currently down.
- * This is true when single_touch_id is equal to NO_TOUCHES. If multiple touches
- * are down and the touch providing for single touch emulation is lifted,
- * single_touch_id is equal to SINGLE_TOUCH_UP. While single touch emulation is
- * occurring, single_touch_id corresponds with the tracking id of the touch used.
- */
-#define NO_TOUCHES -1
-#define SINGLE_TOUCH_UP -2
-
/* Touch surface information. Dimension is in hundredths of a mm, min and max
* are in units. */
#define MOUSE_DIMENSION_X (float)9056
@@ -129,7 +117,6 @@ struct magicmouse_sc {
u8 size;
} touches[16];
int tracking_ids[16];
- int single_touch_id;
};
static int magicmouse_firm_touch(struct magicmouse_sc *msc)
@@ -268,16 +255,14 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
}
}
- if (down) {
+ if (down)
msc->ntouches++;
- if (msc->single_touch_id == NO_TOUCHES)
- msc->single_touch_id = id;
- } else if (msc->single_touch_id == id)
- msc->single_touch_id = SINGLE_TOUCH_UP;
+
+ input_mt_slot(input, id);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, down);
/* Generate the input events for this touch. */
- if (report_touches && down) {
- input_report_abs(input, ABS_MT_TRACKING_ID, id);
+ if (down) {
input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2);
input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2);
input_report_abs(input, ABS_MT_ORIENTATION, -orientation);
@@ -290,8 +275,6 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
-
- input_mt_sync(input);
}
}
@@ -312,12 +295,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
- /* We don't need an MT sync here because trackpad emits a
- * BTN_TOUCH event in a new frame when all touches are released.
- */
- if (msc->ntouches == 0)
- msc->single_touch_id = NO_TOUCHES;
-
clicks = data[1];
/* The following bits provide a device specific timestamp. They
@@ -335,9 +312,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
- if (report_touches && msc->ntouches == 0)
- input_mt_sync(input);
-
/* When emulating three-button mode, it is important
* to have the current touch information before
* generating a click event.
@@ -370,25 +344,17 @@ static int magicmouse_raw_event(struct hid_device *hdev,
input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
- input_report_key(input, BTN_TOUCH, msc->ntouches > 0);
- input_report_key(input, BTN_TOOL_FINGER, msc->ntouches == 1);
- input_report_key(input, BTN_TOOL_DOUBLETAP, msc->ntouches == 2);
- input_report_key(input, BTN_TOOL_TRIPLETAP, msc->ntouches == 3);
- input_report_key(input, BTN_TOOL_QUADTAP, msc->ntouches == 4);
- if (msc->single_touch_id >= 0) {
- input_report_abs(input, ABS_X,
- msc->touches[msc->single_touch_id].x);
- input_report_abs(input, ABS_Y,
- msc->touches[msc->single_touch_id].y);
- }
+ input_mt_report_pointer_emulation(input, true);
}
input_sync(input);
return 1;
}
-static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
+static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
+ int error;
+
__set_bit(EV_KEY, input->evbit);
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
@@ -417,60 +383,66 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
__set_bit(BTN_TOOL_QUADTAP, input->keybit);
+ __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
__set_bit(BTN_TOUCH, input->keybit);
__set_bit(INPUT_PROP_POINTER, input->propbit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
- if (report_touches) {
- __set_bit(EV_ABS, input->evbit);
- input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255, 4, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255, 4, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
+ __set_bit(EV_ABS, input->evbit);
- /* Note: Touch Y position from the device is inverted relative
- * to how pointer motion is reported (and relative to how USB
- * HID recommends the coordinates work). This driver keeps
- * the origin at the same position, and just uses the additive
- * inverse of the reported Y.
- */
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
- input_set_abs_params(input, ABS_MT_POSITION_X,
- MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y,
- MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
-
- input_abs_set_res(input, ABS_MT_POSITION_X,
- MOUSE_RES_X);
- input_abs_set_res(input, ABS_MT_POSITION_Y,
- MOUSE_RES_Y);
- } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
- input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
- TRACKPAD_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
- TRACKPAD_MAX_Y, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_X,
- TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y,
- TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
-
- input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
- input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
- input_abs_set_res(input, ABS_MT_POSITION_X,
- TRACKPAD_RES_X);
- input_abs_set_res(input, ABS_MT_POSITION_Y,
- TRACKPAD_RES_Y);
- }
+ error = input_mt_init_slots(input, 16);
+ if (error)
+ return error;
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
+ 4, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
+ 4, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
- input_set_events_per_packet(input, 60);
+ /* Note: Touch Y position from the device is inverted relative
+ * to how pointer motion is reported (and relative to how USB
+ * HID recommends the coordinates work). This driver keeps
+ * the origin at the same position, and just uses the additive
+ * inverse of the reported Y.
+ */
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ MOUSE_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ MOUSE_RES_Y);
+ } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+ TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+ TRACKPAD_MAX_Y, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ TRACKPAD_RES_Y);
}
+ input_set_events_per_packet(input, 60);
+
if (report_undeciphered) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
+
+ return 0;
}
static int magicmouse_input_mapping(struct hid_device *hdev,
@@ -509,8 +481,6 @@ static int magicmouse_probe(struct hid_device *hdev,
msc->quirks = id->driver_data;
hid_set_drvdata(hdev, msc);
- msc->single_touch_id = NO_TOUCHES;
-
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "magicmouse hid parse failed\n");
@@ -526,8 +496,13 @@ static int magicmouse_probe(struct hid_device *hdev,
/* We do this after hid-input is done parsing reports so that
* hid-input uses the most natural button and axis IDs.
*/
- if (msc->input)
- magicmouse_setup_input(msc->input, hdev);
+ if (msc->input) {
+ ret = magicmouse_setup_input(msc->input, hdev);
+ if (ret) {
+ hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
+ goto err_stop_hw;
+ }
+ }
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6e3332a..59c8b5c 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -83,6 +83,7 @@ struct mt_device {
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
__s8 inputmode; /* InputMode HID feature, -1 if non-existent */
+ __s8 inputmode_index; /* InputMode HID feature index in the report */
__s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
-1 if non-existent */
__u8 num_received; /* how many contacts we received */
@@ -260,10 +261,20 @@ static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
struct mt_device *td = hid_get_drvdata(hdev);
+ int i;
switch (usage->hid) {
case HID_DG_INPUTMODE:
td->inputmode = field->report->id;
+ td->inputmode_index = 0; /* has to be updated below */
+
+ for (i=0; i < field->maxusage; i++) {
+ if (field->usage[i].hid == usage->hid) {
+ td->inputmode_index = i;
+ break;
+ }
+ }
+
break;
case HID_DG_CONTACTMAX:
td->maxcontact_report_id = field->report->id;
@@ -618,7 +629,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
re = &(hdev->report_enum[HID_FEATURE_REPORT]);
r = re->report_id_hash[td->inputmode];
if (r) {
- r->field[0]->value[0] = 0x02;
+ r->field[0]->value[td->inputmode_index] = 0x02;
usbhid_submit_report(hdev, r, USB_DIR_OUT);
}
}
@@ -951,6 +962,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
USB_DEVICE_ID_PANABOARD_UBT880) },
+ /* Novatek Panel */
+ { .driver_data = MT_CLS_DEFAULT,
+ MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
+ USB_DEVICE_ID_NOVATEK_PCT) },
+
/* PenMount panels */
{ .driver_data = MT_CLS_CONFIDENCE,
MT_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
@@ -1048,6 +1064,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR2) },
+ /* Zytronic panels */
+ { .driver_data = MT_CLS_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_ZYTRONIC,
+ USB_DEVICE_ID_ZYTRONIC_ZXY100) },
+
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
{ }
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 45c3433..27c8ebd 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -1846,7 +1846,7 @@ static void picolcd_debug_out_report(struct picolcd_data *data,
#define BUFF_SZ 256
/* Avoid unnecessary overhead if debugfs is disabled */
- if (!hdev->debug_events)
+ if (list_empty(&hdev->debug_list))
return;
buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
@@ -2613,11 +2613,7 @@ static int picolcd_probe(struct hid_device *hdev,
goto err_cleanup_data;
}
- /* We don't use hidinput but hid_hw_start() fails if nothing is
- * claimed. So spoof claimed input. */
- hdev->claimed = HID_CLAIMED_INPUT;
error = hid_hw_start(hdev, 0);
- hdev->claimed = 0;
if (error) {
hid_err(hdev, "hardware start failed\n");
goto err_cleanup_data;
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index 093bfad..327f9b8 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -39,7 +39,7 @@ static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_MODE_KEY,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -67,7 +67,7 @@ static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
temp_buf.state = state;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_MODE_KEY,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -87,7 +87,7 @@ static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_KEY_MASK,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -115,7 +115,7 @@ static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
temp_buf.key_mask = key_mask;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_KEY_MASK,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -130,7 +130,7 @@ static int arvo_get_actual_profile(struct usb_device *usb_dev)
struct arvo_actual_profile temp_buf;
int retval;
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (retval)
@@ -170,7 +170,7 @@ static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
temp_buf.actual_profile = profile;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (!retval) {
arvo->actual_profile = profile;
@@ -194,7 +194,7 @@ static ssize_t arvo_sysfs_write(struct file *fp,
return -EINVAL;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, command, buf, real_size);
+ retval = roccat_common2_send(usb_dev, command, buf, real_size);
mutex_unlock(&arvo->arvo_lock);
return (retval ? retval : real_size);
@@ -217,7 +217,7 @@ static ssize_t arvo_sysfs_read(struct file *fp,
return -EINVAL;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, command, buf, real_size);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&arvo->arvo_lock);
return (retval ? retval : real_size);
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index a6d9399..74f7040 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -16,12 +16,12 @@
#include <linux/module.h>
#include "hid-roccat-common.h"
-static inline uint16_t roccat_common_feature_report(uint8_t report_id)
+static inline uint16_t roccat_common2_feature_report(uint8_t report_id)
{
return 0x300 | report_id;
}
-int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size)
{
char *buf;
@@ -34,16 +34,16 @@ int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
HID_REQ_GET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- roccat_common_feature_report(report_id),
+ roccat_common2_feature_report(report_id),
0, buf, size, USB_CTRL_SET_TIMEOUT);
memcpy(data, buf, size);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
-EXPORT_SYMBOL_GPL(roccat_common_receive);
+EXPORT_SYMBOL_GPL(roccat_common2_receive);
-int roccat_common_send(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size)
{
char *buf;
@@ -56,13 +56,71 @@ int roccat_common_send(struct usb_device *usb_dev, uint report_id,
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- roccat_common_feature_report(report_id),
+ roccat_common2_feature_report(report_id),
0, buf, size, USB_CTRL_SET_TIMEOUT);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
-EXPORT_SYMBOL_GPL(roccat_common_send);
+EXPORT_SYMBOL_GPL(roccat_common2_send);
+
+enum roccat_common2_control_states {
+ ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD = 0,
+ ROCCAT_COMMON_CONTROL_STATUS_OK = 1,
+ ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2,
+ ROCCAT_COMMON_CONTROL_STATUS_WAIT = 3,
+};
+
+static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct roccat_common2_control control;
+
+ do {
+ msleep(50);
+ retval = roccat_common2_receive(usb_dev,
+ ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
+
+ if (retval)
+ return retval;
+
+ switch (control.value) {
+ case ROCCAT_COMMON_CONTROL_STATUS_OK:
+ return 0;
+ case ROCCAT_COMMON_CONTROL_STATUS_WAIT:
+ msleep(500);
+ continue;
+ case ROCCAT_COMMON_CONTROL_STATUS_INVALID:
+
+ case ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD:
+ /* seems to be critical - replug necessary */
+ return -EINVAL;
+ default:
+ dev_err(&usb_dev->dev,
+ "roccat_common2_receive_control_status: "
+ "unknown response value 0x%x\n",
+ control.value);
+ return -EINVAL;
+ }
+
+ } while (1);
+}
+
+int roccat_common2_send_with_status(struct usb_device *usb_dev,
+ uint command, void const *buf, uint size)
+{
+ int retval;
+
+ retval = roccat_common2_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+
+ msleep(100);
+
+ return roccat_common2_receive_control_status(usb_dev);
+}
+EXPORT_SYMBOL_GPL(roccat_common2_send_with_status);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat common driver");
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index 9a5bc61..a97746a 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -15,9 +15,21 @@
#include <linux/usb.h>
#include <linux/types.h>
-int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
+enum roccat_common2_commands {
+ ROCCAT_COMMON_COMMAND_CONTROL = 0x4,
+};
+
+struct roccat_common2_control {
+ uint8_t command;
+ uint8_t value;
+ uint8_t request; /* always 0 on requesting write check */
+} __packed;
+
+int roccat_common2_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size);
-int roccat_common_send(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size);
+int roccat_common2_send_with_status(struct usb_device *usb_dev,
+ uint command, void const *buf, uint size);
#endif
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 0e4a0ab..5669916 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -36,51 +36,7 @@ static void isku_profile_activated(struct isku_device *isku, uint new_profile)
static int isku_receive(struct usb_device *usb_dev, uint command,
void *buf, uint size)
{
- return roccat_common_receive(usb_dev, command, buf, size);
-}
-
-static int isku_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct isku_control control;
-
- do {
- msleep(50);
- retval = isku_receive(usb_dev, ISKU_COMMAND_CONTROL,
- &control, sizeof(struct isku_control));
-
- if (retval)
- return retval;
-
- switch (control.value) {
- case ISKU_CONTROL_VALUE_STATUS_OK:
- return 0;
- case ISKU_CONTROL_VALUE_STATUS_WAIT:
- continue;
- case ISKU_CONTROL_VALUE_STATUS_INVALID:
- /* seems to be critical - replug necessary */
- case ISKU_CONTROL_VALUE_STATUS_OVERLOAD:
- return -EINVAL;
- default:
- hid_err(usb_dev, "isku_receive_control_status: "
- "unknown response value 0x%x\n",
- control.value);
- return -EINVAL;
- }
-
- } while (1);
-}
-
-static int isku_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- return isku_receive_control_status(usb_dev);
+ return roccat_common2_receive(usb_dev, command, buf, size);
}
static int isku_get_actual_profile(struct usb_device *usb_dev)
@@ -100,7 +56,8 @@ static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile)
buf.command = ISKU_COMMAND_ACTUAL_PROFILE;
buf.size = sizeof(struct isku_actual_profile);
buf.actual_profile = new_profile;
- return isku_send(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf,
+ return roccat_common2_send_with_status(usb_dev,
+ ISKU_COMMAND_ACTUAL_PROFILE, &buf,
sizeof(struct isku_actual_profile));
}
@@ -197,7 +154,8 @@ static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&isku->isku_lock);
- retval = isku_send(usb_dev, command, (void *)buf, real_size);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ (void *)buf, real_size);
mutex_unlock(&isku->isku_lock);
return retval ? retval : real_size;
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
index 075f6ef..605b3ce 100644
--- a/drivers/hid/hid-roccat-isku.h
+++ b/drivers/hid/hid-roccat-isku.h
@@ -25,13 +25,6 @@ struct isku_control {
uint8_t request;
} __packed;
-enum isku_control_values {
- ISKU_CONTROL_VALUE_STATUS_OVERLOAD = 0,
- ISKU_CONTROL_VALUE_STATUS_OK = 1,
- ISKU_CONTROL_VALUE_STATUS_INVALID = 2,
- ISKU_CONTROL_VALUE_STATUS_WAIT = 3,
-};
-
struct isku_actual_profile {
uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 40090d6..9ce2d0b 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -138,7 +138,7 @@ static int kone_check_write(struct usb_device *usb_dev)
return 0;
/* unknown answer */
- hid_err(usb_dev, "got retval %d when checking write\n", data);
+ dev_err(&usb_dev->dev, "got retval %d when checking write\n", data);
return -EIO;
}
@@ -503,7 +503,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
retval = kone_set_settings(usb_dev, &kone->settings);
if (retval) {
- hid_err(usb_dev, "couldn't set tcu state\n");
+ dev_err(&usb_dev->dev, "couldn't set tcu state\n");
/*
* try to reread valid settings into buffer overwriting
* first error code
@@ -519,7 +519,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
retval = size;
exit_no_settings:
- hid_err(usb_dev, "couldn't read settings\n");
+ dev_err(&usb_dev->dev, "couldn't read settings\n");
exit_unlock:
mutex_unlock(&kone->kone_lock);
return retval;
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 59e4777..f5602fe 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -39,88 +39,26 @@ static void koneplus_profile_activated(struct koneplus_device *koneplus,
static int koneplus_send_control(struct usb_device *usb_dev, uint value,
enum koneplus_control_requests request)
{
- struct koneplus_control control;
+ struct roccat_common2_control control;
if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
- control.command = KONEPLUS_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, KONEPLUS_COMMAND_CONTROL,
- &control, sizeof(struct koneplus_control));
-}
-
-static int koneplus_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct koneplus_control control;
-
- do {
- retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_CONTROL,
- &control, sizeof(struct koneplus_control));
-
- /* check if we get a completely wrong answer */
- if (retval)
- return retval;
-
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OK)
- return 0;
-
- /* indicates that hardware needs some more time to complete action */
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
- msleep(500); /* windows driver uses 1000 */
- continue;
- }
-
- /* seems to be critical - replug necessary */
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
- return -EINVAL;
-
- hid_err(usb_dev, "koneplus_receive_control_status: "
- "unknown response value 0x%x\n", control.value);
- return -EINVAL;
- } while (1);
-}
-
-static int koneplus_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- return koneplus_receive_control_status(usb_dev);
-}
-
-static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
- enum koneplus_control_requests request)
-{
- int retval;
-
- retval = koneplus_send_control(usb_dev, number, request);
- if (retval)
- return retval;
-
- /* allow time to settle things - windows driver uses 500 */
- msleep(100);
-
- retval = koneplus_receive_control_status(usb_dev);
- if (retval)
- return retval;
-
- return 0;
+ return roccat_common2_send_with_status(usb_dev,
+ ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
}
static int koneplus_get_info(struct usb_device *usb_dev,
struct koneplus_info *buf)
{
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_INFO,
buf, sizeof(struct koneplus_info));
}
@@ -129,19 +67,20 @@ static int koneplus_get_profile_settings(struct usb_device *usb_dev,
{
int retval;
- retval = koneplus_select_profile(usb_dev, number,
+ retval = koneplus_send_control(usb_dev, number,
KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct koneplus_profile_settings));
}
static int koneplus_set_profile_settings(struct usb_device *usb_dev,
struct koneplus_profile_settings const *settings)
{
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct koneplus_profile_settings));
}
@@ -150,19 +89,20 @@ static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
{
int retval;
- retval = koneplus_select_profile(usb_dev, number,
+ retval = koneplus_send_control(usb_dev, number,
KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct koneplus_profile_buttons));
}
static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
struct koneplus_profile_buttons const *buttons)
{
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct koneplus_profile_buttons));
}
@@ -172,7 +112,7 @@ static int koneplus_get_actual_profile(struct usb_device *usb_dev)
struct koneplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -187,7 +127,8 @@ static int koneplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct koneplus_actual_profile);
buf.actual_profile = new_profile;
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
}
@@ -208,7 +149,7 @@ static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = roccat_common_receive(usb_dev, command, buf, real_size);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
@@ -231,7 +172,8 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = koneplus_send(usb_dev, command, buf, real_size);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
index c03332a..7074b2a 100644
--- a/drivers/hid/hid-roccat-koneplus.h
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -20,32 +20,11 @@ struct koneplus_talk {
uint8_t data[14];
} __packed;
-/*
- * case 1: writes request 80 and reads value 1
- *
- */
-struct koneplus_control {
- uint8_t command; /* KONEPLUS_COMMAND_CONTROL */
- /*
- * value is profile number in range 0-4 for requesting settings and buttons
- * 1 if status ok for requesting status
- */
- uint8_t value;
- uint8_t request;
-} __attribute__ ((__packed__));
-
enum koneplus_control_requests {
- KONEPLUS_CONTROL_REQUEST_STATUS = 0x00,
KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x80,
KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x90,
};
-enum koneplus_control_values {
- KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0,
- KONEPLUS_CONTROL_REQUEST_STATUS_OK = 1,
- KONEPLUS_CONTROL_REQUEST_STATUS_WAIT = 3,
-};
-
struct koneplus_actual_profile {
uint8_t command; /* KONEPLUS_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
@@ -137,7 +116,6 @@ struct koneplus_tcu_image {
} __attribute__ ((__packed__));
enum koneplus_commands {
- KONEPLUS_COMMAND_CONTROL = 0x4,
KONEPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
KONEPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 112d934..ca6527a 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -47,69 +47,23 @@ static int kovaplus_send_control(struct usb_device *usb_dev, uint value,
enum kovaplus_control_requests request)
{
int retval;
- struct kovaplus_control control;
+ struct roccat_common2_control control;
if ((request == KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
- control.command = KOVAPLUS_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- retval = roccat_common_send(usb_dev, KOVAPLUS_COMMAND_CONTROL,
- &control, sizeof(struct kovaplus_control));
+ retval = roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
return retval;
}
-static int kovaplus_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct kovaplus_control control;
-
- do {
- retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_CONTROL,
- &control, sizeof(struct kovaplus_control));
-
- /* check if we get a completely wrong answer */
- if (retval)
- return retval;
-
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OK)
- return 0;
-
- /* indicates that hardware needs some more time to complete action */
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT) {
- msleep(500); /* windows driver uses 1000 */
- continue;
- }
-
- /* seems to be critical - replug necessary */
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
- return -EINVAL;
-
- hid_err(usb_dev, "roccat_common_receive_control_status: "
- "unknown response value 0x%x\n", control.value);
- return -EINVAL;
- } while (1);
-}
-
-static int kovaplus_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- msleep(100);
-
- return kovaplus_receive_control_status(usb_dev);
-}
-
static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
enum kovaplus_control_requests request)
{
@@ -119,7 +73,7 @@ static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
static int kovaplus_get_info(struct usb_device *usb_dev,
struct kovaplus_info *buf)
{
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
buf, sizeof(struct kovaplus_info));
}
@@ -133,14 +87,15 @@ static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct kovaplus_profile_settings));
}
static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings const *settings)
{
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct kovaplus_profile_settings));
}
@@ -154,14 +109,15 @@ static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct kovaplus_profile_buttons));
}
static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
struct kovaplus_profile_buttons const *buttons)
{
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct kovaplus_profile_buttons));
}
@@ -171,7 +127,7 @@ static int kovaplus_get_actual_profile(struct usb_device *usb_dev)
struct kovaplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -186,7 +142,8 @@ static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct kovaplus_actual_profile);
buf.actual_profile = new_profile;
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
}
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
index fb2aed4..f82daa1 100644
--- a/drivers/hid/hid-roccat-kovaplus.h
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -14,27 +14,13 @@
#include <linux/types.h>
-struct kovaplus_control {
- uint8_t command; /* KOVAPLUS_COMMAND_CONTROL */
- uint8_t value;
- uint8_t request;
-} __packed;
-
enum kovaplus_control_requests {
- /* read after write; value = 1 */
- KOVAPLUS_CONTROL_REQUEST_STATUS = 0x0,
/* write; value = profile number range 0-4 */
KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
/* write; value = profile number range 0-4 */
KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20,
};
-enum kovaplus_control_values {
- KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0, /* supposed */
- KOVAPLUS_CONTROL_REQUEST_STATUS_OK = 1,
- KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT = 3, /* supposed */
-};
-
struct kovaplus_actual_profile {
uint8_t command; /* KOVAPLUS_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
@@ -75,7 +61,6 @@ struct kovaplus_a {
} __packed;
enum kovaplus_commands {
- KOVAPLUS_COMMAND_CONTROL = 0x4,
KOVAPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
KOVAPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KOVAPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index df05c1b1..1317c17 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -42,43 +42,19 @@ static void profile_activated(struct pyra_device *pyra,
static int pyra_send_control(struct usb_device *usb_dev, int value,
enum pyra_control_requests request)
{
- struct pyra_control control;
+ struct roccat_common2_control control;
if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == PYRA_CONTROL_REQUEST_PROFILE_BUTTONS) &&
(value < 0 || value > 4))
return -EINVAL;
- control.command = PYRA_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, PYRA_COMMAND_CONTROL,
- &control, sizeof(struct pyra_control));
-}
-
-static int pyra_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct pyra_control control;
-
- do {
- msleep(10);
- retval = roccat_common_receive(usb_dev, PYRA_COMMAND_CONTROL,
- &control, sizeof(struct pyra_control));
-
- /* requested too early, try again */
- } while (retval == -EPROTO);
-
- if (!retval && control.command == PYRA_COMMAND_CONTROL &&
- control.request == PYRA_CONTROL_REQUEST_STATUS &&
- control.value == 1)
- return 0;
- else {
- hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n",
- control.request, control.value);
- return retval ? retval : -EINVAL;
- }
+ return roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
}
static int pyra_get_profile_settings(struct usb_device *usb_dev,
@@ -89,7 +65,7 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct pyra_profile_settings));
}
@@ -101,51 +77,44 @@ static int pyra_get_profile_buttons(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct pyra_profile_buttons));
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
- return roccat_common_receive(usb_dev, PYRA_COMMAND_SETTINGS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS,
buf, sizeof(struct pyra_settings));
}
static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
{
- return roccat_common_receive(usb_dev, PYRA_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO,
buf, sizeof(struct pyra_info));
}
-static int pyra_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
- return pyra_receive_control_status(usb_dev);
-}
-
static int pyra_set_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings const *settings)
{
- return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, settings,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_PROFILE_SETTINGS, settings,
sizeof(struct pyra_profile_settings));
}
static int pyra_set_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons const *buttons)
{
- return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS, buttons,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_PROFILE_BUTTONS, buttons,
sizeof(struct pyra_profile_buttons));
}
static int pyra_set_settings(struct usb_device *usb_dev,
struct pyra_settings const *settings)
{
- return pyra_send(usb_dev, PYRA_COMMAND_SETTINGS, settings,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_SETTINGS, settings,
sizeof(struct pyra_settings));
}
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index 0442d7f..eada783 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -20,18 +20,7 @@ struct pyra_b {
uint8_t unknown; /* 1 */
} __attribute__ ((__packed__));
-struct pyra_control {
- uint8_t command; /* PYRA_COMMAND_CONTROL */
- /*
- * value is profile number for request_settings and request_buttons
- * 1 if status ok for request_status
- */
- uint8_t value; /* Range 0-4 */
- uint8_t request;
-} __attribute__ ((__packed__));
-
enum pyra_control_requests {
- PYRA_CONTROL_REQUEST_STATUS = 0x00,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20
};
@@ -75,7 +64,6 @@ struct pyra_info {
} __attribute__ ((__packed__));
enum pyra_commands {
- PYRA_COMMAND_CONTROL = 0x4,
PYRA_COMMAND_SETTINGS = 0x5,
PYRA_COMMAND_PROFILE_SETTINGS = 0x6,
PYRA_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
new file mode 100644
index 0000000..014afba
--- /dev/null
+++ b/drivers/hid/hid-roccat-savu.c
@@ -0,0 +1,316 @@
+/*
+ * Roccat Savu driver for Linux
+ *
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/* Roccat Savu is a gamer mouse with macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-savu.h"
+
+static struct class *savu_class;
+
+static ssize_t savu_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&savu->savu_lock);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&savu->savu_lock);
+
+ return retval ? retval : real_size;
+}
+
+static ssize_t savu_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&savu->savu_lock);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ (void *)buf, real_size);
+ mutex_unlock(&savu->savu_lock);
+
+ return retval ? retval : real_size;
+}
+
+#define SAVU_SYSFS_W(thingy, THINGY) \
+static ssize_t savu_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return savu_sysfs_write(fp, kobj, buf, off, count, \
+ SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
+}
+
+#define SAVU_SYSFS_R(thingy, THINGY) \
+static ssize_t savu_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return savu_sysfs_read(fp, kobj, buf, off, count, \
+ SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
+}
+
+#define SAVU_SYSFS_RW(thingy, THINGY) \
+SAVU_SYSFS_W(thingy, THINGY) \
+SAVU_SYSFS_R(thingy, THINGY)
+
+#define SAVU_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .read = savu_sysfs_read_ ## thingy, \
+ .write = savu_sysfs_write_ ## thingy \
+}
+
+#define SAVU_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .read = savu_sysfs_read_ ## thingy, \
+}
+
+#define SAVU_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .write = savu_sysfs_write_ ## thingy \
+}
+
+SAVU_SYSFS_W(control, CONTROL)
+SAVU_SYSFS_RW(profile, PROFILE)
+SAVU_SYSFS_RW(general, GENERAL)
+SAVU_SYSFS_RW(buttons, BUTTONS)
+SAVU_SYSFS_RW(macro, MACRO)
+SAVU_SYSFS_R(info, INFO)
+SAVU_SYSFS_RW(sensor, SENSOR)
+
+static struct bin_attribute savu_bin_attributes[] = {
+ SAVU_BIN_ATTRIBUTE_W(control, CONTROL),
+ SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE),
+ SAVU_BIN_ATTRIBUTE_RW(general, GENERAL),
+ SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS),
+ SAVU_BIN_ATTRIBUTE_RW(macro, MACRO),
+ SAVU_BIN_ATTRIBUTE_R(info, INFO),
+ SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR),
+ __ATTR_NULL
+};
+
+static int savu_init_savu_device_struct(struct usb_device *usb_dev,
+ struct savu_device *savu)
+{
+ mutex_init(&savu->savu_lock);
+
+ return 0;
+}
+
+static int savu_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct savu_device *savu;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ savu = kzalloc(sizeof(*savu), GFP_KERNEL);
+ if (!savu) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, savu);
+
+ retval = savu_init_savu_device_struct(usb_dev, savu);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct savu_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(savu_class, hdev,
+ sizeof(struct savu_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ savu->chrdev_minor = retval;
+ savu->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(savu);
+ return retval;
+}
+
+static void savu_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct savu_device *savu;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return;
+
+ savu = hid_get_drvdata(hdev);
+ if (savu->roccat_claimed)
+ roccat_disconnect(savu->chrdev_minor);
+ kfree(savu);
+}
+
+static int savu_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = savu_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void savu_remove(struct hid_device *hdev)
+{
+ savu_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void savu_report_to_chrdev(struct savu_device const *savu,
+ u8 const *data)
+{
+ struct savu_roccat_report roccat_report;
+ struct savu_mouse_report_special const *special_report;
+
+ if (data[0] != SAVU_MOUSE_REPORT_NUMBER_SPECIAL)
+ return;
+
+ special_report = (struct savu_mouse_report_special const *)data;
+
+ roccat_report.type = special_report->type;
+ roccat_report.data[0] = special_report->data[0];
+ roccat_report.data[1] = special_report->data[1];
+ roccat_report_event(savu->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int savu_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct savu_device *savu = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return 0;
+
+ if (savu == NULL)
+ return 0;
+
+ if (savu->roccat_claimed)
+ savu_report_to_chrdev(savu, data);
+
+ return 0;
+}
+
+static const struct hid_device_id savu_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, savu_devices);
+
+static struct hid_driver savu_driver = {
+ .name = "savu",
+ .id_table = savu_devices,
+ .probe = savu_probe,
+ .remove = savu_remove,
+ .raw_event = savu_raw_event
+};
+
+static int __init savu_init(void)
+{
+ int retval;
+
+ savu_class = class_create(THIS_MODULE, "savu");
+ if (IS_ERR(savu_class))
+ return PTR_ERR(savu_class);
+ savu_class->dev_bin_attrs = savu_bin_attributes;
+
+ retval = hid_register_driver(&savu_driver);
+ if (retval)
+ class_destroy(savu_class);
+ return retval;
+}
+
+static void __exit savu_exit(void)
+{
+ hid_unregister_driver(&savu_driver);
+ class_destroy(savu_class);
+}
+
+module_init(savu_init);
+module_exit(savu_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Savu driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-savu.h b/drivers/hid/hid-roccat-savu.h
new file mode 100644
index 0000000..9120ba7
--- /dev/null
+++ b/drivers/hid/hid-roccat-savu.h
@@ -0,0 +1,87 @@
+#ifndef __HID_ROCCAT_SAVU_H
+#define __HID_ROCCAT_SAVU_H
+
+/*
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+ SAVU_SIZE_CONTROL = 0x03,
+ SAVU_SIZE_PROFILE = 0x03,
+ SAVU_SIZE_GENERAL = 0x10,
+ SAVU_SIZE_BUTTONS = 0x2f,
+ SAVU_SIZE_MACRO = 0x0823,
+ SAVU_SIZE_INFO = 0x08,
+ SAVU_SIZE_SENSOR = 0x04,
+};
+
+enum savu_control_requests {
+ SAVU_CONTROL_REQUEST_GENERAL = 0x80,
+ SAVU_CONTROL_REQUEST_BUTTONS = 0x90,
+};
+
+enum savu_commands {
+ SAVU_COMMAND_CONTROL = 0x4,
+ SAVU_COMMAND_PROFILE = 0x5,
+ SAVU_COMMAND_GENERAL = 0x6,
+ SAVU_COMMAND_BUTTONS = 0x7,
+ SAVU_COMMAND_MACRO = 0x8,
+ SAVU_COMMAND_INFO = 0x9,
+ SAVU_COMMAND_SENSOR = 0xc,
+};
+
+struct savu_mouse_report_special {
+ uint8_t report_number; /* always 3 */
+ uint8_t zero;
+ uint8_t type;
+ uint8_t data[2];
+} __packed;
+
+enum {
+ SAVU_MOUSE_REPORT_NUMBER_SPECIAL = 3,
+};
+
+enum savu_mouse_report_button_types {
+ /* data1 = new profile range 1-5 */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_PROFILE = 0x20,
+
+ /* data1 = button number range 1-24; data2 = action */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 0x60,
+
+ /* data1 = button number range 1-24; data2 = action */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_TIMER = 0x80,
+
+ /* data1 = setting number range 1-5 */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_CPI = 0xb0,
+
+ /* data1 and data2 = range 0x1-0xb */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 0xc0,
+
+ /* data1 = 22 = next track...
+ * data2 = action
+ */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+};
+
+struct savu_roccat_report {
+ uint8_t type;
+ uint8_t data[2];
+} __packed;
+
+struct savu_device {
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex savu_lock;
+};
+
+#endif
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
index aa95870..0a1805c 100644
--- a/drivers/hid/hid-wiimote-ext.c
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -77,7 +77,7 @@ static __u16 wiiext_keymap[] = {
BTN_TR, /* WIIEXT_KEY_RT */
};
-/* diable all extensions */
+/* disable all extensions */
static void ext_disable(struct wiimote_ext *ext)
{
unsigned long flags;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 36fa77b..3b6f7bf 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -96,6 +96,7 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
}
kfree(list->buffer[list->tail].value);
+ list->buffer[list->tail].value = NULL;
list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
}
out:
@@ -300,6 +301,7 @@ static int hidraw_release(struct inode * inode, struct file * file)
struct hidraw *dev;
struct hidraw_list *list = file->private_data;
int ret;
+ int i;
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
@@ -317,6 +319,9 @@ static int hidraw_release(struct inode * inode, struct file * file)
kfree(list->hidraw);
}
}
+
+ for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
+ kfree(list->buffer[i].value);
kfree(list);
ret = 0;
unlock:
@@ -446,12 +451,17 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
int ret = 0;
list_for_each_entry(list, &dev->list, node) {
+ int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
+
+ if (new_head == list->tail)
+ continue;
+
if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) {
ret = -ENOMEM;
break;
}
list->buffer[list->head].len = len;
- list->head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
+ list->head = new_head;
kill_fasync(&list->fasync, SIGIO, POLL_IN);
}
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
new file mode 100644
index 0000000..714cd8c
--- /dev/null
+++ b/drivers/hid/uhid.c
@@ -0,0 +1,572 @@
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uhid.h>
+#include <linux/wait.h>
+
+#define UHID_NAME "uhid"
+#define UHID_BUFSIZE 32
+
+struct uhid_device {
+ struct mutex devlock;
+ bool running;
+
+ __u8 *rd_data;
+ uint rd_size;
+
+ struct hid_device *hid;
+ struct uhid_event input_buf;
+
+ wait_queue_head_t waitq;
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct uhid_event *outq[UHID_BUFSIZE];
+
+ struct mutex report_lock;
+ wait_queue_head_t report_wait;
+ atomic_t report_done;
+ atomic_t report_id;
+ struct uhid_event report_buf;
+};
+
+static struct miscdevice uhid_misc;
+
+static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
+{
+ __u8 newhead;
+
+ newhead = (uhid->head + 1) % UHID_BUFSIZE;
+
+ if (newhead != uhid->tail) {
+ uhid->outq[uhid->head] = ev;
+ uhid->head = newhead;
+ wake_up_interruptible(&uhid->waitq);
+ } else {
+ hid_warn(uhid->hid, "Output queue is full\n");
+ kfree(ev);
+ }
+}
+
+static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
+{
+ unsigned long flags;
+ struct uhid_event *ev;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = event;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return 0;
+}
+
+static int uhid_hid_start(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ return uhid_queue_event(uhid, UHID_START);
+}
+
+static void uhid_hid_stop(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ hid->claimed = 0;
+ uhid_queue_event(uhid, UHID_STOP);
+}
+
+static int uhid_hid_open(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ return uhid_queue_event(uhid, UHID_OPEN);
+}
+
+static void uhid_hid_close(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ uhid_queue_event(uhid, UHID_CLOSE);
+}
+
+static int uhid_hid_input(struct input_dev *input, unsigned int type,
+ unsigned int code, int value)
+{
+ struct hid_device *hid = input_get_drvdata(input);
+ struct uhid_device *uhid = hid->driver_data;
+ unsigned long flags;
+ struct uhid_event *ev;
+
+ ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = UHID_OUTPUT_EV;
+ ev->u.output_ev.type = type;
+ ev->u.output_ev.code = code;
+ ev->u.output_ev.value = value;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return 0;
+}
+
+static int uhid_hid_parse(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
+}
+
+static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
+ __u8 *buf, size_t count, unsigned char rtype)
+{
+ struct uhid_device *uhid = hid->driver_data;
+ __u8 report_type;
+ struct uhid_event *ev;
+ unsigned long flags;
+ int ret;
+ size_t uninitialized_var(len);
+ struct uhid_feature_answer_req *req;
+
+ if (!uhid->running)
+ return -EIO;
+
+ switch (rtype) {
+ case HID_FEATURE_REPORT:
+ report_type = UHID_FEATURE_REPORT;
+ break;
+ case HID_OUTPUT_REPORT:
+ report_type = UHID_OUTPUT_REPORT;
+ break;
+ case HID_INPUT_REPORT:
+ report_type = UHID_INPUT_REPORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&uhid->report_lock);
+ if (ret)
+ return ret;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ ev->type = UHID_FEATURE;
+ ev->u.feature.id = atomic_inc_return(&uhid->report_id);
+ ev->u.feature.rnum = rnum;
+ ev->u.feature.rtype = report_type;
+
+ atomic_set(&uhid->report_done, 0);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ ret = wait_event_interruptible_timeout(uhid->report_wait,
+ atomic_read(&uhid->report_done), 5 * HZ);
+
+ /*
+ * Make sure "uhid->running" is cleared on shutdown before
+ * "uhid->report_done" is set.
+ */
+ smp_rmb();
+ if (!ret || !uhid->running) {
+ ret = -EIO;
+ } else if (ret < 0) {
+ ret = -ERESTARTSYS;
+ } else {
+ spin_lock_irqsave(&uhid->qlock, flags);
+ req = &uhid->report_buf.u.feature_answer;
+
+ if (req->err) {
+ ret = -EIO;
+ } else {
+ ret = 0;
+ len = min(count,
+ min_t(size_t, req->size, UHID_DATA_MAX));
+ memcpy(buf, req->data, len);
+ }
+
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+ }
+
+ atomic_set(&uhid->report_done, 1);
+
+unlock:
+ mutex_unlock(&uhid->report_lock);
+ return ret ? ret : len;
+}
+
+static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
+ unsigned char report_type)
+{
+ struct uhid_device *uhid = hid->driver_data;
+ __u8 rtype;
+ unsigned long flags;
+ struct uhid_event *ev;
+
+ switch (report_type) {
+ case HID_FEATURE_REPORT:
+ rtype = UHID_FEATURE_REPORT;
+ break;
+ case HID_OUTPUT_REPORT:
+ rtype = UHID_OUTPUT_REPORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (count < 1 || count > UHID_DATA_MAX)
+ return -EINVAL;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = UHID_OUTPUT;
+ ev->u.output.size = count;
+ ev->u.output.rtype = rtype;
+ memcpy(ev->u.output.data, buf, count);
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return count;
+}
+
+static struct hid_ll_driver uhid_hid_driver = {
+ .start = uhid_hid_start,
+ .stop = uhid_hid_stop,
+ .open = uhid_hid_open,
+ .close = uhid_hid_close,
+ .hidinput_input_event = uhid_hid_input,
+ .parse = uhid_hid_parse,
+};
+
+static int uhid_dev_create(struct uhid_device *uhid,
+ const struct uhid_event *ev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ if (uhid->running)
+ return -EALREADY;
+
+ uhid->rd_size = ev->u.create.rd_size;
+ if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE)
+ return -EINVAL;
+
+ uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL);
+ if (!uhid->rd_data)
+ return -ENOMEM;
+
+ if (copy_from_user(uhid->rd_data, ev->u.create.rd_data,
+ uhid->rd_size)) {
+ ret = -EFAULT;
+ goto err_free;
+ }
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ ret = PTR_ERR(hid);
+ goto err_free;
+ }
+
+ strncpy(hid->name, ev->u.create.name, 127);
+ hid->name[127] = 0;
+ strncpy(hid->phys, ev->u.create.phys, 63);
+ hid->phys[63] = 0;
+ strncpy(hid->uniq, ev->u.create.uniq, 63);
+ hid->uniq[63] = 0;
+
+ hid->ll_driver = &uhid_hid_driver;
+ hid->hid_get_raw_report = uhid_hid_get_raw;
+ hid->hid_output_raw_report = uhid_hid_output_raw;
+ hid->bus = ev->u.create.bus;
+ hid->vendor = ev->u.create.vendor;
+ hid->product = ev->u.create.product;
+ hid->version = ev->u.create.version;
+ hid->country = ev->u.create.country;
+ hid->driver_data = uhid;
+ hid->dev.parent = uhid_misc.this_device;
+
+ uhid->hid = hid;
+ uhid->running = true;
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_err(hid, "Cannot register HID device\n");
+ goto err_hid;
+ }
+
+ return 0;
+
+err_hid:
+ hid_destroy_device(hid);
+ uhid->hid = NULL;
+ uhid->running = false;
+err_free:
+ kfree(uhid->rd_data);
+ return ret;
+}
+
+static int uhid_dev_destroy(struct uhid_device *uhid)
+{
+ if (!uhid->running)
+ return -EINVAL;
+
+ /* clear "running" before setting "report_done" */
+ uhid->running = false;
+ smp_wmb();
+ atomic_set(&uhid->report_done, 1);
+ wake_up_interruptible(&uhid->report_wait);
+
+ hid_destroy_device(uhid->hid);
+ kfree(uhid->rd_data);
+
+ return 0;
+}
+
+static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
+{
+ if (!uhid->running)
+ return -EINVAL;
+
+ hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
+ min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
+
+ return 0;
+}
+
+static int uhid_dev_feature_answer(struct uhid_device *uhid,
+ struct uhid_event *ev)
+{
+ unsigned long flags;
+
+ if (!uhid->running)
+ return -EINVAL;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+
+ /* id for old report; drop it silently */
+ if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
+ goto unlock;
+ if (atomic_read(&uhid->report_done))
+ goto unlock;
+
+ memcpy(&uhid->report_buf, ev, sizeof(*ev));
+ atomic_set(&uhid->report_done, 1);
+ wake_up_interruptible(&uhid->report_wait);
+
+unlock:
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+ return 0;
+}
+
+static int uhid_char_open(struct inode *inode, struct file *file)
+{
+ struct uhid_device *uhid;
+
+ uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
+ if (!uhid)
+ return -ENOMEM;
+
+ mutex_init(&uhid->devlock);
+ mutex_init(&uhid->report_lock);
+ spin_lock_init(&uhid->qlock);
+ init_waitqueue_head(&uhid->waitq);
+ init_waitqueue_head(&uhid->report_wait);
+ uhid->running = false;
+ atomic_set(&uhid->report_done, 1);
+
+ file->private_data = uhid;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static int uhid_char_release(struct inode *inode, struct file *file)
+{
+ struct uhid_device *uhid = file->private_data;
+ unsigned int i;
+
+ uhid_dev_destroy(uhid);
+
+ for (i = 0; i < UHID_BUFSIZE; ++i)
+ kfree(uhid->outq[i]);
+
+ kfree(uhid);
+
+ return 0;
+}
+
+static ssize_t uhid_char_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct uhid_device *uhid = file->private_data;
+ int ret;
+ unsigned long flags;
+ size_t len;
+
+ /* they need at least the "type" member of uhid_event */
+ if (count < sizeof(__u32))
+ return -EINVAL;
+
+try_again:
+ if (file->f_flags & O_NONBLOCK) {
+ if (uhid->head == uhid->tail)
+ return -EAGAIN;
+ } else {
+ ret = wait_event_interruptible(uhid->waitq,
+ uhid->head != uhid->tail);
+ if (ret)
+ return ret;
+ }
+
+ ret = mutex_lock_interruptible(&uhid->devlock);
+ if (ret)
+ return ret;
+
+ if (uhid->head == uhid->tail) {
+ mutex_unlock(&uhid->devlock);
+ goto try_again;
+ } else {
+ len = min(count, sizeof(**uhid->outq));
+ if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
+ ret = -EFAULT;
+ } else {
+ kfree(uhid->outq[uhid->tail]);
+ uhid->outq[uhid->tail] = NULL;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+ }
+ }
+
+ mutex_unlock(&uhid->devlock);
+ return ret ? ret : len;
+}
+
+static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct uhid_device *uhid = file->private_data;
+ int ret;
+ size_t len;
+
+ /* we need at least the "type" member of uhid_event */
+ if (count < sizeof(__u32))
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&uhid->devlock);
+ if (ret)
+ return ret;
+
+ memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
+ len = min(count, sizeof(uhid->input_buf));
+ if (copy_from_user(&uhid->input_buf, buffer, len)) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ switch (uhid->input_buf.type) {
+ case UHID_CREATE:
+ ret = uhid_dev_create(uhid, &uhid->input_buf);
+ break;
+ case UHID_DESTROY:
+ ret = uhid_dev_destroy(uhid);
+ break;
+ case UHID_INPUT:
+ ret = uhid_dev_input(uhid, &uhid->input_buf);
+ break;
+ case UHID_FEATURE_ANSWER:
+ ret = uhid_dev_feature_answer(uhid, &uhid->input_buf);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+unlock:
+ mutex_unlock(&uhid->devlock);
+
+ /* return "count" not "len" to not confuse the caller */
+ return ret ? ret : count;
+}
+
+static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+{
+ struct uhid_device *uhid = file->private_data;
+
+ poll_wait(file, &uhid->waitq, wait);
+
+ if (uhid->head != uhid->tail)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations uhid_fops = {
+ .owner = THIS_MODULE,
+ .open = uhid_char_open,
+ .release = uhid_char_release,
+ .read = uhid_char_read,
+ .write = uhid_char_write,
+ .poll = uhid_char_poll,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice uhid_misc = {
+ .fops = &uhid_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = UHID_NAME,
+};
+
+static int __init uhid_init(void)
+{
+ return misc_register(&uhid_misc);
+}
+
+static void __exit uhid_exit(void)
+{
+ misc_deregister(&uhid_misc);
+}
+
+module_init(uhid_init);
+module_exit(uhid_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
+MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 0f20fd1..0108c59 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -1,13 +1,13 @@
-comment "USB Input Devices"
+menu "USB HID support"
depends on USB
config USB_HID
- tristate "USB Human Interface Device (full HID) support"
+ tristate "USB HID transport layer"
default y
depends on USB && INPUT
select HID
---help---
- Say Y here if you want full HID support to connect USB keyboards,
+ Say Y here if you want to connect USB keyboards,
mice, joysticks, graphic tablets, or any other HID based devices
to your computer via USB, as well as Uninterruptible Power Supply
(UPS) and monitor control devices.
@@ -81,4 +81,4 @@ config USB_MOUSE
endmenu
-
+endmenu
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 482f936..dedd8e4 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -84,7 +84,7 @@ static int hid_start_in(struct hid_device *hid)
spin_lock_irqsave(&usbhid->lock, flags);
if (hid->open > 0 &&
!test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
- !test_bit(HID_REPORTED_IDLE, &usbhid->iofl) &&
+ !test_bit(HID_SUSPENDED, &usbhid->iofl) &&
!test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC);
if (rc != 0) {
@@ -207,15 +207,27 @@ static int usbhid_restart_out_queue(struct usbhid_device *usbhid)
int kicked;
int r;
- if (!hid)
+ if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) ||
+ test_bit(HID_SUSPENDED, &usbhid->iofl))
return 0;
if ((kicked = (usbhid->outhead != usbhid->outtail))) {
hid_dbg(hid, "Kicking head %d tail %d", usbhid->outhead, usbhid->outtail);
+ /* Try to wake up from autosuspend... */
r = usb_autopm_get_interface_async(usbhid->intf);
if (r < 0)
return r;
+
+ /*
+ * If still suspended, don't submit. Submission will
+ * occur if/when resume drains the queue.
+ */
+ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) {
+ usb_autopm_put_interface_no_suspend(usbhid->intf);
+ return r;
+ }
+
/* Asynchronously flush queue. */
set_bit(HID_OUT_RUNNING, &usbhid->iofl);
if (hid_submit_out(hid)) {
@@ -234,15 +246,27 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
int r;
WARN_ON(hid == NULL);
- if (!hid)
+ if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) ||
+ test_bit(HID_SUSPENDED, &usbhid->iofl))
return 0;
if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) {
hid_dbg(hid, "Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail);
+ /* Try to wake up from autosuspend... */
r = usb_autopm_get_interface_async(usbhid->intf);
if (r < 0)
return r;
+
+ /*
+ * If still suspended, don't submit. Submission will
+ * occur if/when resume drains the queue.
+ */
+ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) {
+ usb_autopm_put_interface_no_suspend(usbhid->intf);
+ return r;
+ }
+
/* Asynchronously flush queue. */
set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
if (hid_submit_ctrl(hid)) {
@@ -331,9 +355,12 @@ static int hid_submit_out(struct hid_device *hid)
usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) +
1 + (report->id > 0);
usbhid->urbout->dev = hid_to_usb_dev(hid);
- memcpy(usbhid->outbuf, raw_report,
- usbhid->urbout->transfer_buffer_length);
- kfree(raw_report);
+ if (raw_report) {
+ memcpy(usbhid->outbuf, raw_report,
+ usbhid->urbout->transfer_buffer_length);
+ kfree(raw_report);
+ usbhid->out[usbhid->outtail].raw_report = NULL;
+ }
dbg_hid("submitting out urb\n");
@@ -362,8 +389,11 @@ static int hid_submit_ctrl(struct hid_device *hid)
if (dir == USB_DIR_OUT) {
usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
usbhid->urbctrl->transfer_buffer_length = len;
- memcpy(usbhid->ctrlbuf, raw_report, len);
- kfree(raw_report);
+ if (raw_report) {
+ memcpy(usbhid->ctrlbuf, raw_report, len);
+ kfree(raw_report);
+ usbhid->ctrl[usbhid->ctrltail].raw_report = NULL;
+ }
} else {
int maxpacket, padlen;
@@ -407,16 +437,6 @@ static int hid_submit_ctrl(struct hid_device *hid)
* Output interrupt completion handler.
*/
-static int irq_out_pump_restart(struct hid_device *hid)
-{
- struct usbhid_device *usbhid = hid->driver_data;
-
- if (usbhid->outhead != usbhid->outtail)
- return hid_submit_out(hid);
- else
- return -1;
-}
-
static void hid_irq_out(struct urb *urb)
{
struct hid_device *hid = urb->context;
@@ -441,15 +461,17 @@ static void hid_irq_out(struct urb *urb)
spin_lock_irqsave(&usbhid->lock, flags);
- if (unplug)
+ if (unplug) {
usbhid->outtail = usbhid->outhead;
- else
+ } else {
usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1);
- if (!irq_out_pump_restart(hid)) {
- /* Successfully submitted next urb in queue */
- spin_unlock_irqrestore(&usbhid->lock, flags);
- return;
+ if (usbhid->outhead != usbhid->outtail &&
+ hid_submit_out(hid) == 0) {
+ /* Successfully submitted next urb in queue */
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+ return;
+ }
}
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
@@ -461,15 +483,6 @@ static void hid_irq_out(struct urb *urb)
/*
* Control pipe completion handler.
*/
-static int ctrl_pump_restart(struct hid_device *hid)
-{
- struct usbhid_device *usbhid = hid->driver_data;
-
- if (usbhid->ctrlhead != usbhid->ctrltail)
- return hid_submit_ctrl(hid);
- else
- return -1;
-}
static void hid_ctrl(struct urb *urb)
{
@@ -498,15 +511,17 @@ static void hid_ctrl(struct urb *urb)
hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
- if (unplug)
+ if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
- else
+ } else {
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
- if (!ctrl_pump_restart(hid)) {
- /* Successfully submitted next urb in queue */
- spin_unlock(&usbhid->lock);
- return;
+ if (usbhid->ctrlhead != usbhid->ctrltail &&
+ hid_submit_ctrl(hid) == 0) {
+ /* Successfully submitted next urb in queue */
+ spin_unlock(&usbhid->lock);
+ return;
+ }
}
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
@@ -540,49 +555,36 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->out[usbhid->outhead].report = report;
usbhid->outhead = head;
- /* Try to awake from autosuspend... */
- if (usb_autopm_get_interface_async(usbhid->intf) < 0)
- return;
+ /* If the queue isn't running, restart it */
+ if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
+ usbhid_restart_out_queue(usbhid);
- /*
- * But if still suspended, leave urb enqueued, don't submit.
- * Submission will occur if/when resume() drains the queue.
- */
- if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
- return;
+ /* Otherwise see if an earlier request has timed out */
+ } else if (time_after(jiffies, usbhid->last_out + HZ * 5)) {
+
+ /* Prevent autosuspend following the unlink */
+ usb_autopm_get_interface_no_resume(usbhid->intf);
- if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
- if (hid_submit_out(hid)) {
- clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- usb_autopm_put_interface_async(usbhid->intf);
- }
- wake_up(&usbhid->wait);
- } else {
/*
- * the queue is known to run
- * but an earlier request may be stuck
- * we may need to time out
- * no race because the URB is blocked under
- * spinlock
+ * Prevent resubmission in case the URB completes
+ * before we can unlink it. We don't want to cancel
+ * the wrong transfer!
*/
- if (time_after(jiffies, usbhid->last_out + HZ * 5)) {
- usb_block_urb(usbhid->urbout);
- /* drop lock to not deadlock if the callback is called */
- spin_unlock(&usbhid->lock);
- usb_unlink_urb(usbhid->urbout);
- spin_lock(&usbhid->lock);
- usb_unblock_urb(usbhid->urbout);
- /*
- * if the unlinking has already completed
- * the pump will have been stopped
- * it must be restarted now
- */
- if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- if (!irq_out_pump_restart(hid))
- set_bit(HID_OUT_RUNNING, &usbhid->iofl);
+ usb_block_urb(usbhid->urbout);
+ /* Drop lock to avoid deadlock if the callback runs */
+ spin_unlock(&usbhid->lock);
- }
+ usb_unlink_urb(usbhid->urbout);
+ spin_lock(&usbhid->lock);
+ usb_unblock_urb(usbhid->urbout);
+
+ /* Unlink might have stopped the queue */
+ if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+
+ /* Now we can allow autosuspend again */
+ usb_autopm_put_interface_async(usbhid->intf);
}
return;
}
@@ -604,47 +606,36 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->ctrl[usbhid->ctrlhead].dir = dir;
usbhid->ctrlhead = head;
- /* Try to awake from autosuspend... */
- if (usb_autopm_get_interface_async(usbhid->intf) < 0)
- return;
+ /* If the queue isn't running, restart it */
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
+ usbhid_restart_ctrl_queue(usbhid);
- /*
- * If already suspended, leave urb enqueued, but don't submit.
- * Submission will occur if/when resume() drains the queue.
- */
- if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
- return;
+ /* Otherwise see if an earlier request has timed out */
+ } else if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) {
+
+ /* Prevent autosuspend following the unlink */
+ usb_autopm_get_interface_no_resume(usbhid->intf);
- if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
- if (hid_submit_ctrl(hid)) {
- clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- usb_autopm_put_interface_async(usbhid->intf);
- }
- wake_up(&usbhid->wait);
- } else {
/*
- * the queue is known to run
- * but an earlier request may be stuck
- * we may need to time out
- * no race because the URB is blocked under
- * spinlock
+ * Prevent resubmission in case the URB completes
+ * before we can unlink it. We don't want to cancel
+ * the wrong transfer!
*/
- if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) {
- usb_block_urb(usbhid->urbctrl);
- /* drop lock to not deadlock if the callback is called */
- spin_unlock(&usbhid->lock);
- usb_unlink_urb(usbhid->urbctrl);
- spin_lock(&usbhid->lock);
- usb_unblock_urb(usbhid->urbctrl);
- /*
- * if the unlinking has already completed
- * the pump will have been stopped
- * it must be restarted now
- */
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- if (!ctrl_pump_restart(hid))
- set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- }
+ usb_block_urb(usbhid->urbctrl);
+
+ /* Drop lock to avoid deadlock if the callback runs */
+ spin_unlock(&usbhid->lock);
+
+ usb_unlink_urb(usbhid->urbctrl);
+ spin_lock(&usbhid->lock);
+ usb_unblock_urb(usbhid->urbctrl);
+
+ /* Unlink might have stopped the queue */
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+
+ /* Now we can allow autosuspend again */
+ usb_autopm_put_interface_async(usbhid->intf);
}
}
@@ -1002,9 +993,10 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
static void usbhid_restart_queues(struct usbhid_device *usbhid)
{
- if (usbhid->urbout)
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
usbhid_restart_out_queue(usbhid);
- usbhid_restart_ctrl_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
}
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
@@ -1471,11 +1463,38 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
+static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int status;
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
+ test_bit(HID_RESET_PENDING, &usbhid->iofl))
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+
+ usbhid_restart_queues(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+
+ status = hid_start_in(hid);
+ if (status < 0)
+ hid_io_error(hid);
+
+ if (driver_suspended && hid->driver && hid->driver->resume)
+ status = hid->driver->resume(hid);
+ return status;
+}
+
static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
int status;
+ bool driver_suspended = false;
if (PMSG_IS_AUTO(message)) {
spin_lock_irq(&usbhid->lock); /* Sync with error handler */
@@ -1486,13 +1505,14 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
&& !test_bit(HID_KEYS_PRESSED, &usbhid->iofl)
&& (!usbhid->ledcount || ignoreled))
{
- set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ set_bit(HID_SUSPENDED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
if (hid->driver && hid->driver->suspend) {
status = hid->driver->suspend(hid, message);
if (status < 0)
- return status;
+ goto failed;
}
+ driver_suspended = true;
} else {
usbhid_mark_busy(usbhid);
spin_unlock_irq(&usbhid->lock);
@@ -1505,11 +1525,14 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
if (status < 0)
return status;
}
+ driver_suspended = true;
spin_lock_irq(&usbhid->lock);
- set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ set_bit(HID_SUSPENDED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
- if (usbhid_wait_io(hid) < 0)
- return -EIO;
+ if (usbhid_wait_io(hid) < 0) {
+ status = -EIO;
+ goto failed;
+ }
}
hid_cancel_delayed_stuff(usbhid);
@@ -1517,14 +1540,15 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
/* lost race against keypresses */
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_mark_busy(usbhid);
- return -EBUSY;
+ status = -EBUSY;
+ goto failed;
}
dev_dbg(&intf->dev, "suspend\n");
return 0;
+
+ failed:
+ hid_resume_common(hid, driver_suspended);
+ return status;
}
static int hid_resume(struct usb_interface *intf)
@@ -1536,23 +1560,7 @@ static int hid_resume(struct usb_interface *intf)
if (!test_bit(HID_STARTED, &usbhid->iofl))
return 0;
- clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
-
- if (status >= 0 && hid->driver && hid->driver->resume) {
- int ret = hid->driver->resume(hid);
- if (ret < 0)
- status = ret;
- }
+ status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
}
@@ -1563,7 +1571,7 @@ static int hid_reset_resume(struct usb_interface *intf)
struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0597ee6..903eef3 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -76,6 +76,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 1883d7b..bd87a61 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -53,7 +53,6 @@ struct usb_interface *usbhid_find_interface(int minor);
#define HID_CLEAR_HALT 6
#define HID_DISCONNECTED 7
#define HID_STARTED 8
-#define HID_REPORTED_IDLE 9
#define HID_KEYS_PRESSED 10
#define HID_NO_BANDWIDTH 11
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 7cd9bf4..b0a2e4c 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -314,6 +314,16 @@ config SENSORS_DS1621
This driver can also be built as a module. If so, the module
will be called ds1621.
+config SENSORS_DA9052_ADC
+ tristate "Dialog DA9052/DA9053 ADC"
+ depends on PMIC_DA9052
+ help
+ Say y here to support the ADC found on Dialog Semiconductor
+ DA9052-BC and DA9053-AA/Bx PMICs.
+
+ This driver can also be built as module. If so, the module
+ will be called da9052-hwmon.
+
config SENSORS_EXYNOS4_TMU
tristate "Temperature sensor on Samsung EXYNOS4"
depends on ARCH_EXYNOS4
@@ -433,6 +443,16 @@ config SENSORS_GPIO_FAN
This driver can also be built as a module. If so, the module
will be called gpio-fan.
+config SENSORS_HIH6130
+ tristate "Honeywell Humidicon HIH-6130 humidity/temperature sensor"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Honeywell Humidicon
+ HIH-6130 and HIH-6131 Humidicon humidity sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called hih6130.
+
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86 && PCI && EXPERIMENTAL
@@ -1036,8 +1056,9 @@ config SENSORS_SCH56XX_COMMON
config SENSORS_SCH5627
tristate "SMSC SCH5627"
- depends on !PPC
+ depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
+ select WATCHDOG_CORE
help
If you say yes here you get support for the hardware monitoring
features of the SMSC SCH5627 Super-I/O chip including support for
@@ -1048,8 +1069,9 @@ config SENSORS_SCH5627
config SENSORS_SCH5636
tristate "SMSC SCH5636"
- depends on !PPC
+ depends on !PPC && WATCHDOG
select SENSORS_SCH56XX_COMMON
+ select WATCHDOG_CORE
help
SMSC SCH5636 Super I/O chips include an embedded microcontroller for
hardware monitoring solutions, allowing motherboard manufacturers to
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e1eeac1..7aa9811 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
+obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
+obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index a72bf25..d4419b4 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1513,10 +1513,10 @@ LEAVE_UPDATE:
return NULL;
}
-#ifdef CONFIG_PM
-static int abituguru_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int abituguru_suspend(struct device *dev)
{
- struct abituguru_data *data = platform_get_drvdata(pdev);
+ struct abituguru_data *data = dev_get_drvdata(dev);
/*
* make sure all communications with the uguru are done and no new
* ones are started
@@ -1525,29 +1525,30 @@ static int abituguru_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int abituguru_resume(struct platform_device *pdev)
+static int abituguru_resume(struct device *dev)
{
- struct abituguru_data *data = platform_get_drvdata(pdev);
+ struct abituguru_data *data = dev_get_drvdata(dev);
/* See if the uGuru is still ready */
if (inb_p(data->addr + ABIT_UGURU_DATA) != ABIT_UGURU_STATUS_INPUT)
data->uguru_ready = 0;
mutex_unlock(&data->update_lock);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(abituguru_pm, abituguru_suspend, abituguru_resume);
+#define ABIT_UGURU_PM &abituguru_pm
#else
-#define abituguru_suspend NULL
-#define abituguru_resume NULL
+#define ABIT_UGURU_PM NULL
#endif /* CONFIG_PM */
static struct platform_driver abituguru_driver = {
.driver = {
.owner = THIS_MODULE,
.name = ABIT_UGURU_NAME,
+ .pm = ABIT_UGURU_PM,
},
.probe = abituguru_probe,
.remove = __devexit_p(abituguru_remove),
- .suspend = abituguru_suspend,
- .resume = abituguru_resume,
};
static int __init abituguru_detect(void)
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index a5bc428..5d582ae 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1141,10 +1141,10 @@ LEAVE_UPDATE:
return NULL;
}
-#ifdef CONFIG_PM
-static int abituguru3_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int abituguru3_suspend(struct device *dev)
{
- struct abituguru3_data *data = platform_get_drvdata(pdev);
+ struct abituguru3_data *data = dev_get_drvdata(dev);
/*
* make sure all communications with the uguru3 are done and no new
* ones are started
@@ -1153,26 +1153,27 @@ static int abituguru3_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int abituguru3_resume(struct platform_device *pdev)
+static int abituguru3_resume(struct device *dev)
{
- struct abituguru3_data *data = platform_get_drvdata(pdev);
+ struct abituguru3_data *data = dev_get_drvdata(dev);
mutex_unlock(&data->update_lock);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(abituguru3_pm, abituguru3_suspend, abituguru3_resume);
+#define ABIT_UGURU3_PM &abituguru3_pm
#else
-#define abituguru3_suspend NULL
-#define abituguru3_resume NULL
+#define ABIT_UGURU3_PM NULL
#endif /* CONFIG_PM */
static struct platform_driver abituguru3_driver = {
.driver = {
.owner = THIS_MODULE,
.name = ABIT_UGURU3_NAME,
+ .pm = ABIT_UGURU3_PM
},
.probe = abituguru3_probe,
.remove = __devexit_p(abituguru3_remove),
- .suspend = abituguru3_suspend,
- .resume = abituguru3_resume
};
static int __init abituguru3_dmi_detect(void)
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 34ad5a2..563c029 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -101,7 +101,7 @@ struct acpi_power_meter_resource {
unsigned long sensors_last_updated;
struct sensor_device_attribute sensors[NUM_SENSORS];
int num_sensors;
- int trip[2];
+ s64 trip[2];
int num_domain_devices;
struct acpi_device **domain_devices;
struct kobject *holders_dir;
@@ -237,7 +237,7 @@ static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
if (res)
return res;
- temp /= 1000;
+ temp = DIV_ROUND_CLOSEST(temp, 1000);
if (temp > resource->caps.max_cap || temp < resource->caps.min_cap)
return -EINVAL;
arg0.integer.value = temp;
@@ -307,9 +307,7 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
if (res)
return res;
- temp /= 1000;
- if (temp < 0)
- return -EINVAL;
+ temp = DIV_ROUND_CLOSEST(temp, 1000);
mutex_lock(&resource->lock);
resource->trip[attr->index - 7] = temp;
@@ -929,20 +927,25 @@ static int acpi_power_meter_remove(struct acpi_device *device, int type)
return 0;
}
-static int acpi_power_meter_resume(struct acpi_device *device)
+static int acpi_power_meter_resume(struct device *dev)
{
struct acpi_power_meter_resource *resource;
- if (!device || !acpi_driver_data(device))
+ if (!dev)
+ return -EINVAL;
+
+ resource = acpi_driver_data(to_acpi_device(dev));
+ if (!resource)
return -EINVAL;
- resource = acpi_driver_data(device);
free_capabilities(resource);
read_capabilities(resource);
return 0;
}
+static SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL, acpi_power_meter_resume);
+
static struct acpi_driver acpi_power_meter_driver = {
.name = "power_meter",
.class = ACPI_POWER_METER_CLASS,
@@ -950,9 +953,9 @@ static struct acpi_driver acpi_power_meter_driver = {
.ops = {
.add = acpi_power_meter_add,
.remove = acpi_power_meter_remove,
- .resume = acpi_power_meter_resume,
.notify = acpi_power_meter_notify,
},
+ .drv.pm = &acpi_power_meter_pm,
};
/* Module init/exit routines */
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 4394e7e..fd1d1b1 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -366,11 +366,11 @@ static int adm1021_probe(struct i2c_client *client,
struct adm1021_data *data;
int err;
- data = kzalloc(sizeof(struct adm1021_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1021_data),
+ GFP_KERNEL);
if (!data) {
- pr_debug("adm1021: detect failed, kzalloc failed!\n");
- err = -ENOMEM;
- goto error0;
+ pr_debug("adm1021: detect failed, devm_kzalloc failed!\n");
+ return -ENOMEM;
}
i2c_set_clientdata(client, data);
@@ -384,21 +384,18 @@ static int adm1021_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1021_group);
if (err)
- goto error1;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto error3;
+ goto error;
}
return 0;
-error3:
+error:
sysfs_remove_group(&client->dev.kobj, &adm1021_group);
-error1:
- kfree(data);
-error0:
return err;
}
@@ -418,7 +415,6 @@ static int adm1021_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adm1021_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index b8557f9..7e16e5d 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -477,11 +477,10 @@ static int adm1025_probe(struct i2c_client *client,
int err;
u8 config;
- data = kzalloc(sizeof(struct adm1025_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1025_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -492,7 +491,7 @@ static int adm1025_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1025_group);
if (err)
- goto exit_free;
+ return err;
/* Pin 11 is either in4 (+12V) or VID4 */
config = i2c_smbus_read_byte_data(client, ADM1025_REG_CONFIG);
@@ -513,9 +512,6 @@ static int adm1025_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &adm1025_group);
sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -569,7 +565,6 @@ static int adm1025_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &adm1025_group);
sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 1003219..0f068e7 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -1834,11 +1834,10 @@ static int adm1026_probe(struct i2c_client *client,
struct adm1026_data *data;
int err;
- data = kzalloc(sizeof(struct adm1026_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1026_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -1852,7 +1851,7 @@ static int adm1026_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1026_group);
if (err)
- goto exitfree;
+ return err;
if (data->config1 & CFG1_AIN8_9)
err = sysfs_create_group(&client->dev.kobj,
&adm1026_group_in8_9);
@@ -1877,9 +1876,6 @@ exitremove:
sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9);
else
sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3);
-exitfree:
- kfree(data);
-exit:
return err;
}
@@ -1892,7 +1888,6 @@ static int adm1026_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9);
else
sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 44e1fd7..c6a4631 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -954,11 +954,10 @@ static int adm1031_probe(struct i2c_client *client,
struct adm1031_data *data;
int err;
- data = kzalloc(sizeof(struct adm1031_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1031_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->chip_type = id->driver_data;
@@ -975,7 +974,7 @@ static int adm1031_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1031_group);
if (err)
- goto exit_free;
+ return err;
if (data->chip_type == adm1031) {
err = sysfs_create_group(&client->dev.kobj, &adm1031_group_opt);
@@ -994,9 +993,6 @@ static int adm1031_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &adm1031_group);
sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -1007,7 +1003,6 @@ static int adm1031_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adm1031_group);
sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index c3c2865..5a78d10 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -650,11 +650,9 @@ static int adm9240_probe(struct i2c_client *new_client,
struct adm9240_data *data;
int err;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
mutex_init(&data->update_lock);
@@ -664,7 +662,7 @@ static int adm9240_probe(struct i2c_client *new_client,
/* populate sysfs filesystem */
err = sysfs_create_group(&new_client->dev.kobj, &adm9240_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -676,9 +674,6 @@ static int adm9240_probe(struct i2c_client *new_client,
exit_remove:
sysfs_remove_group(&new_client->dev.kobj, &adm9240_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -689,7 +684,6 @@ static int adm9240_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adm9240_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index df29d13..861c756 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1260,7 +1260,7 @@ static int adt7475_probe(struct i2c_client *client,
int i, ret = 0, revision;
u8 config2, config3;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -1344,7 +1344,7 @@ static int adt7475_probe(struct i2c_client *client,
ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group);
if (ret)
- goto efree;
+ return ret;
/* Features that can be disabled individually */
if (data->has_fan4) {
@@ -1410,8 +1410,6 @@ static int adt7475_probe(struct i2c_client *client,
eremove:
adt7475_remove_files(client, data);
-efree:
- kfree(data);
return ret;
}
@@ -1421,7 +1419,6 @@ static int adt7475_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
adt7475_remove_files(client, data);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f082e48..4d937a1 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -8,7 +8,7 @@
*
* Based on hdaps.c driver:
* Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
*
* Fan control based on smcFanControl:
* Copyright (C) 2006 Hendrik Holtmann <holtmann@mac.com>
@@ -54,7 +54,7 @@
#define APPLESMC_MAX_DATA_LENGTH 32
/* wait up to 32 ms for a status change. */
-#define APPLESMC_MIN_WAIT 0x0040
+#define APPLESMC_MIN_WAIT 0x0010
#define APPLESMC_MAX_WAIT 0x8000
#define APPLESMC_STATUS_MASK 0x0f
@@ -80,6 +80,8 @@
#define FANS_MANUAL "FS! " /* r-w ui16 */
#define FAN_ID_FMT "F%dID" /* r-o char[16] */
+#define TEMP_SENSOR_TYPE "sp78"
+
/* List of keys used to read/write fan speeds */
static const char *const fan_speed_fmt[] = {
"F%dAc", /* actual speed */
@@ -96,10 +98,6 @@ static const char *const fan_speed_fmt[] = {
#define APPLESMC_INPUT_FUZZ 4 /* input event threshold */
#define APPLESMC_INPUT_FLAT 4
-#define SENSOR_X 0
-#define SENSOR_Y 1
-#define SENSOR_Z 2
-
#define to_index(attr) (to_sensor_dev_attr(attr)->index & 0xffff)
#define to_option(attr) (to_sensor_dev_attr(attr)->index >> 16)
@@ -135,11 +133,13 @@ static struct applesmc_registers {
unsigned int temp_count; /* number of temperature registers */
unsigned int temp_begin; /* temperature lower index bound */
unsigned int temp_end; /* temperature upper index bound */
+ unsigned int index_count; /* size of temperature index array */
int num_light_sensors; /* number of light sensors */
bool has_accelerometer; /* has motion sensor */
bool has_key_backlight; /* has keyboard backlight */
bool init_complete; /* true when fully initialized */
struct applesmc_entry *cache; /* cached key entries */
+ const char **index; /* temperature key index */
} smcreg = {
.mutex = __MUTEX_INITIALIZER(smcreg.mutex),
};
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
int i;
if (send_command(cmd) || send_argument(key)) {
- pr_warn("%s: read arg fail\n", key);
+ pr_warn("%.4s: read arg fail\n", key);
return -EIO;
}
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
for (i = 0; i < len; i++) {
if (__wait_status(0x05)) {
- pr_warn("%s: read data fail\n", key);
+ pr_warn("%.4s: read data fail\n", key);
return -EIO;
}
buffer[i] = inb(APPLESMC_DATA_PORT);
@@ -432,30 +432,19 @@ static int applesmc_has_key(const char *key, bool *value)
}
/*
- * applesmc_read_motion_sensor - Read motion sensor (X, Y or Z).
+ * applesmc_read_s16 - Read 16-bit signed big endian register
*/
-static int applesmc_read_motion_sensor(int index, s16 *value)
+static int applesmc_read_s16(const char *key, s16 *value)
{
u8 buffer[2];
int ret;
- switch (index) {
- case SENSOR_X:
- ret = applesmc_read_key(MOTION_SENSOR_X_KEY, buffer, 2);
- break;
- case SENSOR_Y:
- ret = applesmc_read_key(MOTION_SENSOR_Y_KEY, buffer, 2);
- break;
- case SENSOR_Z:
- ret = applesmc_read_key(MOTION_SENSOR_Z_KEY, buffer, 2);
- break;
- default:
- ret = -EINVAL;
- }
+ ret = applesmc_read_key(key, buffer, 2);
+ if (ret)
+ return ret;
*value = ((s16)buffer[0] << 8) | buffer[1];
-
- return ret;
+ return 0;
}
/*
@@ -482,6 +471,30 @@ static void applesmc_device_init(void)
pr_warn("failed to init the device\n");
}
+static int applesmc_init_index(struct applesmc_registers *s)
+{
+ const struct applesmc_entry *entry;
+ unsigned int i;
+
+ if (s->index)
+ return 0;
+
+ s->index = kcalloc(s->temp_count, sizeof(s->index[0]), GFP_KERNEL);
+ if (!s->index)
+ return -ENOMEM;
+
+ for (i = s->temp_begin; i < s->temp_end; i++) {
+ entry = applesmc_get_entry_by_index(i);
+ if (IS_ERR(entry))
+ continue;
+ if (strcmp(entry->type, TEMP_SENSOR_TYPE))
+ continue;
+ s->index[s->index_count++] = entry->key;
+ }
+
+ return 0;
+}
+
/*
* applesmc_init_smcreg_try - Try to initialize register cache. Idempotent.
*/
@@ -517,6 +530,10 @@ static int applesmc_init_smcreg_try(void)
return ret;
s->temp_count = s->temp_end - s->temp_begin;
+ ret = applesmc_init_index(s);
+ if (ret)
+ return ret;
+
ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor);
if (ret)
return ret;
@@ -533,8 +550,8 @@ static int applesmc_init_smcreg_try(void)
s->num_light_sensors = left_light_sensor + right_light_sensor;
s->init_complete = true;
- pr_info("key=%d fan=%d temp=%d acc=%d lux=%d kbd=%d\n",
- s->key_count, s->fan_count, s->temp_count,
+ pr_info("key=%d fan=%d temp=%d index=%d acc=%d lux=%d kbd=%d\n",
+ s->key_count, s->fan_count, s->temp_count, s->index_count,
s->has_accelerometer,
s->num_light_sensors,
s->has_key_backlight);
@@ -542,6 +559,15 @@ static int applesmc_init_smcreg_try(void)
return 0;
}
+static void applesmc_destroy_smcreg(void)
+{
+ kfree(smcreg.index);
+ smcreg.index = NULL;
+ kfree(smcreg.cache);
+ smcreg.cache = NULL;
+ smcreg.init_complete = false;
+}
+
/*
* applesmc_init_smcreg - Initialize register cache.
*
@@ -562,19 +588,11 @@ static int applesmc_init_smcreg(void)
msleep(INIT_WAIT_MSECS);
}
- kfree(smcreg.cache);
- smcreg.cache = NULL;
+ applesmc_destroy_smcreg();
return ret;
}
-static void applesmc_destroy_smcreg(void)
-{
- kfree(smcreg.cache);
- smcreg.cache = NULL;
- smcreg.init_complete = false;
-}
-
/* Device model stuff */
static int applesmc_probe(struct platform_device *dev)
{
@@ -624,8 +642,8 @@ static struct platform_driver applesmc_driver = {
*/
static void applesmc_calibrate(void)
{
- applesmc_read_motion_sensor(SENSOR_X, &rest_x);
- applesmc_read_motion_sensor(SENSOR_Y, &rest_y);
+ applesmc_read_s16(MOTION_SENSOR_X_KEY, &rest_x);
+ applesmc_read_s16(MOTION_SENSOR_Y_KEY, &rest_y);
rest_x = -rest_x;
}
@@ -634,9 +652,9 @@ static void applesmc_idev_poll(struct input_polled_dev *dev)
struct input_dev *idev = dev->input;
s16 x, y;
- if (applesmc_read_motion_sensor(SENSOR_X, &x))
+ if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x))
return;
- if (applesmc_read_motion_sensor(SENSOR_Y, &y))
+ if (applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y))
return;
x = -x;
@@ -659,13 +677,13 @@ static ssize_t applesmc_position_show(struct device *dev,
int ret;
s16 x, y, z;
- ret = applesmc_read_motion_sensor(SENSOR_X, &x);
+ ret = applesmc_read_s16(MOTION_SENSOR_X_KEY, &x);
if (ret)
goto out;
- ret = applesmc_read_motion_sensor(SENSOR_Y, &y);
+ ret = applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y);
if (ret)
goto out;
- ret = applesmc_read_motion_sensor(SENSOR_Z, &z);
+ ret = applesmc_read_s16(MOTION_SENSOR_Z_KEY, &z);
if (ret)
goto out;
@@ -718,44 +736,27 @@ out:
static ssize_t applesmc_show_sensor_label(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
{
- int index = smcreg.temp_begin + to_index(devattr);
- const struct applesmc_entry *entry;
+ const char *key = smcreg.index[to_index(devattr)];
- entry = applesmc_get_entry_by_index(index);
- if (IS_ERR(entry))
- return PTR_ERR(entry);
-
- return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key);
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
}
/* Displays degree Celsius * 1000 */
static ssize_t applesmc_show_temperature(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
{
- int index = smcreg.temp_begin + to_index(devattr);
- const struct applesmc_entry *entry;
+ const char *key = smcreg.index[to_index(devattr)];
int ret;
- u8 buffer[2];
- unsigned int temp;
-
- entry = applesmc_get_entry_by_index(index);
- if (IS_ERR(entry))
- return PTR_ERR(entry);
- if (entry->len > 2)
- return -EINVAL;
+ s16 value;
+ int temp;
- ret = applesmc_read_entry(entry, buffer, entry->len);
+ ret = applesmc_read_s16(key, &value);
if (ret)
return ret;
- if (entry->len == 2) {
- temp = buffer[0] * 1000;
- temp += (buffer[1] >> 6) * 250;
- } else {
- temp = buffer[0] * 4000;
- }
+ temp = 250 * (value >> 6);
- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", temp);
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", temp);
}
static ssize_t applesmc_show_fan_speed(struct device *dev,
@@ -1265,7 +1266,7 @@ static int __init applesmc_init(void)
if (ret)
goto out_info;
- ret = applesmc_create_nodes(temp_group, smcreg.temp_count);
+ ret = applesmc_create_nodes(temp_group, smcreg.index_count);
if (ret)
goto out_fans;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 7caa242..b867aab7 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1109,7 +1109,8 @@ asc7621_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- data = kzalloc(sizeof(struct asc7621_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct asc7621_data),
+ GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -1143,7 +1144,6 @@ exit_remove:
&(asc7621_params[i].sda.dev_attr));
}
- kfree(data);
return err;
}
@@ -1192,7 +1192,6 @@ static int asc7621_remove(struct i2c_client *client)
&(asc7621_params[i].sda.dev_attr));
}
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 58af6aa..aecb9ea 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -345,11 +345,10 @@ static int atxp1_probe(struct i2c_client *new_client,
struct atxp1_data *data;
int err;
- data = kzalloc(sizeof(struct atxp1_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct atxp1_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
/* Get VRM */
data->vrm = vid_which_vrm();
@@ -362,7 +361,7 @@ static int atxp1_probe(struct i2c_client *new_client,
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &atxp1_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -377,9 +376,6 @@ static int atxp1_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &atxp1_group);
-exit_free:
- kfree(data);
-exit:
return err;
};
@@ -390,8 +386,6 @@ static int atxp1_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &atxp1_group);
- kfree(data);
-
return 0;
};
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index b9d5123..637c51c 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev,
return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
}
+struct tjmax {
+ char const *id;
+ int tjmax;
+};
+
+static struct tjmax __cpuinitconst tjmax_table[] = {
+ { "CPU D410", 100000 },
+ { "CPU D425", 100000 },
+ { "CPU D510", 100000 },
+ { "CPU D525", 100000 },
+ { "CPU N450", 100000 },
+ { "CPU N455", 100000 },
+ { "CPU N470", 100000 },
+ { "CPU N475", 100000 },
+ { "CPU 230", 100000 },
+ { "CPU 330", 125000 },
+};
+
static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
struct device *dev)
{
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
int err;
u32 eax, edx;
struct pci_dev *host_bridge;
+ int i;
+
+ /* explicit tjmax table entries override heuristics */
+ for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
+ if (strstr(c->x86_model_id, tjmax_table[i].id))
+ return tjmax_table[i].tjmax;
+ }
/* Early chips have no MSR for TjMax */
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
/* Atom CPUs */
- if (c->x86_model == 0x1c) {
+ if (c->x86_model == 0x1c || c->x86_model == 0x26
+ || c->x86_model == 0x27) {
usemsr_ee = 0;
host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
tjmax = 90000;
pci_dev_put(host_bridge);
+ } else if (c->x86_model == 0x36) {
+ usemsr_ee = 0;
+ tjmax = 100000;
}
if (c->x86_model > 0xe && usemsr_ee) {
@@ -664,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
* sensors. We check this bit only, all the early CPUs
* without thermal sensors will be filtered out.
*/
- if (!cpu_has(c, X86_FEATURE_DTS))
+ if (!cpu_has(c, X86_FEATURE_DTHERM))
return;
if (!pdev) {
@@ -765,14 +794,14 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
};
static const struct x86_cpu_id coretemp_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
{}
};
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
static int __init coretemp_init(void)
{
- int i, err = -ENODEV;
+ int i, err;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
new file mode 100644
index 0000000..fc65f2d
--- /dev/null
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -0,0 +1,344 @@
+/*
+ * HWMON Driver for Dialog DA9052
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+struct da9052_hwmon {
+ struct da9052 *da9052;
+ struct device *class_device;
+ struct mutex hwmon_lock;
+};
+
+static const char * const input_names[] = {
+ [DA9052_ADC_VDDOUT] = "VDDOUT",
+ [DA9052_ADC_ICH] = "CHARGING CURRENT",
+ [DA9052_ADC_TBAT] = "BATTERY TEMP",
+ [DA9052_ADC_VBAT] = "BATTERY VOLTAGE",
+ [DA9052_ADC_IN4] = "ADC IN4",
+ [DA9052_ADC_IN5] = "ADC IN5",
+ [DA9052_ADC_IN6] = "ADC IN6",
+ [DA9052_ADC_TJUNC] = "BATTERY JUNCTION TEMP",
+ [DA9052_ADC_VBBAT] = "BACK-UP BATTERY VOLTAGE",
+};
+
+/* Conversion function for VDDOUT and VBAT */
+static inline int volt_reg_to_mV(int value)
+{
+ return DIV_ROUND_CLOSEST(value * 1000, 512) + 2500;
+}
+
+/* Conversion function for ADC channels 4, 5 and 6 */
+static inline int input_reg_to_mV(int value)
+{
+ return DIV_ROUND_CLOSEST(value * 2500, 1023);
+}
+
+/* Conversion function for VBBAT */
+static inline int vbbat_reg_to_mV(int value)
+{
+ return DIV_ROUND_CLOSEST(value * 2500, 512);
+}
+
+static int da9052_enable_vddout_channel(struct da9052 *da9052)
+{
+ int ret;
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_CONT_REG);
+ if (ret < 0)
+ return ret;
+
+ ret |= DA9052_ADCCONT_AUTOVDDEN;
+
+ return da9052_reg_write(da9052, DA9052_ADC_CONT_REG, ret);
+}
+
+static int da9052_disable_vddout_channel(struct da9052 *da9052)
+{
+ int ret;
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_CONT_REG);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DA9052_ADCCONT_AUTOVDDEN;
+
+ return da9052_reg_write(da9052, DA9052_ADC_CONT_REG, ret);
+}
+
+static ssize_t da9052_read_vddout(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret, vdd;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ ret = da9052_enable_vddout_channel(hwmon->da9052);
+ if (ret < 0)
+ goto hwmon_err;
+
+ vdd = da9052_reg_read(hwmon->da9052, DA9052_VDD_RES_REG);
+ if (vdd < 0) {
+ ret = vdd;
+ goto hwmon_err_release;
+ }
+
+ ret = da9052_disable_vddout_channel(hwmon->da9052);
+ if (ret < 0)
+ goto hwmon_err;
+
+ mutex_unlock(&hwmon->hwmon_lock);
+ return sprintf(buf, "%d\n", volt_reg_to_mV(vdd));
+
+hwmon_err_release:
+ da9052_disable_vddout_channel(hwmon->da9052);
+hwmon_err:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static ssize_t da9052_read_ich(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ ret = da9052_reg_read(hwmon->da9052, DA9052_ICHG_AV_REG);
+ if (ret < 0)
+ return ret;
+
+ /* Equivalent to 3.9mA/bit in register ICHG_AV */
+ return sprintf(buf, "%d\n", DIV_ROUND_CLOSEST(ret * 39, 10));
+}
+
+static ssize_t da9052_read_tbat(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", da9052_adc_read_temp(hwmon->da9052));
+}
+
+static ssize_t da9052_read_vbat(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ ret = da9052_adc_manual_read(hwmon->da9052, DA9052_ADC_VBAT);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", volt_reg_to_mV(ret));
+}
+
+static ssize_t da9052_read_misc_channel(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int channel = to_sensor_dev_attr(devattr)->index;
+ int ret;
+
+ ret = da9052_adc_manual_read(hwmon->da9052, channel);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", input_reg_to_mV(ret));
+}
+
+static ssize_t da9052_read_tjunc(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int tjunc;
+ int toffset;
+
+ tjunc = da9052_reg_read(hwmon->da9052, DA9052_TJUNC_RES_REG);
+ if (tjunc < 0)
+ return tjunc;
+
+ toffset = da9052_reg_read(hwmon->da9052, DA9052_T_OFFSET_REG);
+ if (toffset < 0)
+ return toffset;
+
+ /*
+ * Degrees celsius = 1.708 * (TJUNC_RES - T_OFFSET) - 108.8
+ * T_OFFSET is a trim value used to improve accuracy of the result
+ */
+ return sprintf(buf, "%d\n", 1708 * (tjunc - toffset) - 108800);
+}
+
+static ssize_t da9052_read_vbbat(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ ret = da9052_adc_manual_read(hwmon->da9052, DA9052_ADC_VBBAT);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", vbbat_reg_to_mV(ret));
+}
+
+static ssize_t da9052_hwmon_show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "da9052-hwmon\n");
+}
+
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return sprintf(buf, "%s\n",
+ input_names[to_sensor_dev_attr(devattr)->index]);
+}
+
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, da9052_read_vddout, NULL,
+ DA9052_ADC_VDDOUT);
+static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_VDDOUT);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, da9052_read_vbat, NULL,
+ DA9052_ADC_VBAT);
+static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_VBAT);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, da9052_read_misc_channel, NULL,
+ DA9052_ADC_IN4);
+static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_IN4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, da9052_read_misc_channel, NULL,
+ DA9052_ADC_IN5);
+static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_IN5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, da9052_read_misc_channel, NULL,
+ DA9052_ADC_IN6);
+static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_IN6);
+static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, da9052_read_vbbat, NULL,
+ DA9052_ADC_VBBAT);
+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_VBBAT);
+
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, da9052_read_ich, NULL,
+ DA9052_ADC_ICH);
+static SENSOR_DEVICE_ATTR(curr1_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_ICH);
+
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, da9052_read_tbat, NULL,
+ DA9052_ADC_TBAT);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_TBAT);
+static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, da9052_read_tjunc, NULL,
+ DA9052_ADC_TJUNC);
+static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_TJUNC);
+
+static DEVICE_ATTR(name, S_IRUGO, da9052_hwmon_show_name, NULL);
+
+static struct attribute *da9052_attr[] = {
+ &dev_attr_name.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_label.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_label.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_label.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in5_label.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in6_label.dev_attr.attr,
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ &sensor_dev_attr_in9_label.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp8_input.dev_attr.attr,
+ &sensor_dev_attr_temp8_label.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group da9052_attr_group = {.attrs = da9052_attr};
+
+static int __devinit da9052_hwmon_probe(struct platform_device *pdev)
+{
+ struct da9052_hwmon *hwmon;
+ int ret;
+
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(struct da9052_hwmon),
+ GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ mutex_init(&hwmon->hwmon_lock);
+ hwmon->da9052 = dev_get_drvdata(pdev->dev.parent);
+
+ platform_set_drvdata(pdev, hwmon);
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &da9052_attr_group);
+ if (ret)
+ goto err_mem;
+
+ hwmon->class_device = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(hwmon->class_device)) {
+ ret = PTR_ERR(hwmon->class_device);
+ goto err_sysfs;
+ }
+
+ return 0;
+
+err_sysfs:
+ sysfs_remove_group(&pdev->dev.kobj, &da9052_attr_group);
+err_mem:
+ return ret;
+}
+
+static int __devexit da9052_hwmon_remove(struct platform_device *pdev)
+{
+ struct da9052_hwmon *hwmon = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(hwmon->class_device);
+ sysfs_remove_group(&pdev->dev.kobj, &da9052_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver da9052_hwmon_driver = {
+ .probe = da9052_hwmon_probe,
+ .remove = __devexit_p(da9052_hwmon_remove),
+ .driver = {
+ .name = "da9052-hwmon",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(da9052_hwmon_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 HWMON driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-hwmon");
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index f647a33..1c56873 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -249,11 +249,10 @@ static int ds1621_probe(struct i2c_client *client,
struct ds1621_data *data;
int err;
- data = kzalloc(sizeof(struct ds1621_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct ds1621_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -264,7 +263,7 @@ static int ds1621_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &ds1621_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -276,9 +275,6 @@ static int ds1621_probe(struct i2c_client *client,
exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &ds1621_group);
- exit_free:
- kfree(data);
- exit:
return err;
}
@@ -289,8 +285,6 @@ static int ds1621_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &ds1621_group);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index a73e685..7bb8e88 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
data->fan_rpm_control = true;
break;
default:
- mutex_unlock(&data->update_lock);
- return -EINVAL;
+ count = -EINVAL;
+ goto err;
}
- read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+ result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
+ if (result) {
+ count = result;
+ goto err;
+ }
if (data->fan_rpm_control)
conf_reg |= 0x80;
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
conf_reg &= ~0x80;
i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
-
+err:
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 840f511..ada12a9 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -492,11 +492,10 @@ static int emc6w201_probe(struct i2c_client *client,
struct emc6w201_data *data;
int err;
- data = kzalloc(sizeof(struct emc6w201_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct emc6w201_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -504,7 +503,7 @@ static int emc6w201_probe(struct i2c_client *client,
/* Create sysfs attribute */
err = sysfs_create_group(&client->dev.kobj, &emc6w201_group);
if (err)
- goto exit_free;
+ return err;
/* Expose as a hwmon device */
data->hwmon_dev = hwmon_device_register(&client->dev);
@@ -517,9 +516,6 @@ static int emc6w201_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &emc6w201_group);
- exit_free:
- kfree(data);
- exit:
return err;
}
@@ -529,7 +525,6 @@ static int emc6w201_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &emc6w201_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index f2359a0..e912059 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -475,35 +475,39 @@ static int __devexit exynos4_tmu_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos4_tmu_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int exynos4_tmu_suspend(struct device *dev)
{
- exynos4_tmu_control(pdev, false);
+ exynos4_tmu_control(to_platform_device(dev), false);
return 0;
}
-static int exynos4_tmu_resume(struct platform_device *pdev)
+static int exynos4_tmu_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
+
exynos4_tmu_initialize(pdev);
exynos4_tmu_control(pdev, true);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(exynos4_tmu_pm,
+ exynos4_tmu_suspend, exynos4_tmu_resume);
+#define EXYNOS4_TMU_PM &exynos4_tmu_pm
#else
-#define exynos4_tmu_suspend NULL
-#define exynos4_tmu_resume NULL
+#define EXYNOS4_TMU_PM NULL
#endif
static struct platform_driver exynos4_tmu_driver = {
.driver = {
.name = "exynos4-tmu",
.owner = THIS_MODULE,
+ .pm = EXYNOS4_TMU_PM,
},
.probe = exynos4_tmu_probe,
.remove = __devexit_p(exynos4_tmu_remove),
- .suspend = exynos4_tmu_suspend,
- .resume = exynos4_tmu_resume,
};
module_platform_driver(exynos4_tmu_driver);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 3e4da62..4dd7723 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1386,20 +1386,20 @@ static int __devinit f71805f_probe(struct platform_device *pdev)
"f71872f",
};
- data = kzalloc(sizeof(struct f71805f_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct f71805f_data),
+ GFP_KERNEL);
if (!data) {
- err = -ENOMEM;
pr_err("Out of memory\n");
- goto exit;
+ return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start + ADDR_REG_OFFSET, 2, DRVNAME)) {
- err = -EBUSY;
+ if (!devm_request_region(&pdev->dev, res->start + ADDR_REG_OFFSET, 2,
+ DRVNAME)) {
dev_err(&pdev->dev, "Failed to request region 0x%lx-0x%lx\n",
(unsigned long)(res->start + ADDR_REG_OFFSET),
(unsigned long)(res->start + ADDR_REG_OFFSET + 1));
- goto exit_free;
+ return -EBUSY;
}
data->addr = res->start;
data->name = names[sio_data->kind];
@@ -1427,7 +1427,7 @@ static int __devinit f71805f_probe(struct platform_device *pdev)
/* Register sysfs interface files */
err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group);
if (err)
- goto exit_release_region;
+ return err;
if (data->has_in & (1 << 4)) { /* in4 */
err = sysfs_create_group(&pdev->dev.kobj,
&f71805f_group_optin[0]);
@@ -1487,19 +1487,12 @@ exit_remove_files:
for (i = 0; i < 4; i++)
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]);
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq);
-exit_release_region:
- release_region(res->start + ADDR_REG_OFFSET, 2);
-exit_free:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-exit:
return err;
}
static int __devexit f71805f_remove(struct platform_device *pdev)
{
struct f71805f_data *data = platform_get_drvdata(pdev);
- struct resource *res;
int i;
hwmon_device_unregister(data->hwmon_dev);
@@ -1507,11 +1500,6 @@ static int __devexit f71805f_remove(struct platform_device *pdev)
for (i = 0; i < 4; i++)
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]);
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start + ADDR_REG_OFFSET, 2);
return 0;
}
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 6b13f1a..2764b78 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -67,7 +67,8 @@ static ssize_t show_power(struct device *dev,
REG_TDP_LIMIT3, &val);
tdp_limit = val >> 16;
- curr_pwr_watts = (tdp_limit + data->base_tdp) << running_avg_range;
+ curr_pwr_watts = ((u64)(tdp_limit +
+ data->base_tdp)) << running_avg_range;
curr_pwr_watts -= running_avg_capture;
curr_pwr_watts *= data->tdp_to_watts;
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 764a083..2c74673 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -544,11 +544,10 @@ static int gl518_probe(struct i2c_client *client,
struct gl518_data *data;
int err, revision;
- data = kzalloc(sizeof(struct gl518_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct gl518_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
revision = gl518_read_value(client, GL518_REG_REVISION);
@@ -562,7 +561,7 @@ static int gl518_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &gl518_group);
if (err)
- goto exit_free;
+ return err;
if (data->type == gl518sm_r80) {
err = sysfs_create_group(&client->dev.kobj, &gl518_group_r80);
if (err)
@@ -581,9 +580,6 @@ exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &gl518_group);
if (data->type == gl518sm_r80)
sysfs_remove_group(&client->dev.kobj, &gl518_group_r80);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -617,7 +613,6 @@ static int gl518_remove(struct i2c_client *client)
if (data->type == gl518sm_r80)
sysfs_remove_group(&client->dev.kobj, &gl518_group_r80);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 5ff452b..a21ff25 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -779,11 +779,10 @@ static int gl520_probe(struct i2c_client *client,
struct gl520_data *data;
int err;
- data = kzalloc(sizeof(struct gl520_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct gl520_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -794,7 +793,7 @@ static int gl520_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &gl520_group);
if (err)
- goto exit_free;
+ return err;
if (data->two_temps)
err = sysfs_create_group(&client->dev.kobj, &gl520_group_temp2);
@@ -816,9 +815,6 @@ exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &gl520_group);
sysfs_remove_group(&client->dev.kobj, &gl520_group_in4);
sysfs_remove_group(&client->dev.kobj, &gl520_group_temp2);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -870,7 +866,6 @@ static int gl520_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &gl520_group_in4);
sysfs_remove_group(&client->dev.kobj, &gl520_group_temp2);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 2ce8c44..2f4b01b 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -41,7 +41,7 @@ struct gpio_fan_data {
int num_speed;
struct gpio_fan_speed *speed;
int speed_index;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
int resume_speed;
#endif
bool pwm_enable;
@@ -95,17 +95,17 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data,
fan_data->alarm = alarm;
- err = gpio_request(alarm->gpio, "GPIO fan alarm");
+ err = devm_gpio_request(&pdev->dev, alarm->gpio, "GPIO fan alarm");
if (err)
return err;
err = gpio_direction_input(alarm->gpio);
if (err)
- goto err_free_gpio;
+ return err;
err = device_create_file(&pdev->dev, &dev_attr_fan1_alarm);
if (err)
- goto err_free_gpio;
+ return err;
/*
* If the alarm GPIO don't support interrupts, just leave
@@ -117,8 +117,8 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data,
INIT_WORK(&fan_data->alarm_work, fan_alarm_notify);
irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
- err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED,
- "GPIO fan alarm", fan_data);
+ err = devm_request_irq(&pdev->dev, alarm_irq, fan_alarm_irq_handler,
+ IRQF_SHARED, "GPIO fan alarm", fan_data);
if (err)
goto err_free_sysfs;
@@ -126,21 +126,14 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data,
err_free_sysfs:
device_remove_file(&pdev->dev, &dev_attr_fan1_alarm);
-err_free_gpio:
- gpio_free(alarm->gpio);
-
return err;
}
static void fan_alarm_free(struct gpio_fan_data *fan_data)
{
struct platform_device *pdev = fan_data->pdev;
- int alarm_irq = gpio_to_irq(fan_data->alarm->gpio);
- if (alarm_irq >= 0)
- free_irq(alarm_irq, fan_data);
device_remove_file(&pdev->dev, &dev_attr_fan1_alarm);
- gpio_free(fan_data->alarm->gpio);
}
/*
@@ -365,15 +358,14 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
int i, err;
for (i = 0; i < num_ctrl; i++) {
- err = gpio_request(ctrl[i], "GPIO fan control");
+ err = devm_gpio_request(&pdev->dev, ctrl[i],
+ "GPIO fan control");
if (err)
- goto err_free_gpio;
+ return err;
err = gpio_direction_output(ctrl[i], gpio_get_value(ctrl[i]));
- if (err) {
- gpio_free(ctrl[i]);
- goto err_free_gpio;
- }
+ if (err)
+ return err;
}
fan_data->num_ctrl = num_ctrl;
@@ -382,32 +374,18 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
fan_data->speed = pdata->speed;
fan_data->pwm_enable = true; /* Enable manual fan speed control. */
fan_data->speed_index = get_fan_speed_index(fan_data);
- if (fan_data->speed_index < 0) {
- err = -ENODEV;
- goto err_free_gpio;
- }
+ if (fan_data->speed_index < 0)
+ return -ENODEV;
err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
- if (err)
- goto err_free_gpio;
-
- return 0;
-
-err_free_gpio:
- for (i = i - 1; i >= 0; i--)
- gpio_free(ctrl[i]);
-
return err;
}
static void fan_ctrl_free(struct gpio_fan_data *fan_data)
{
struct platform_device *pdev = fan_data->pdev;
- int i;
sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
- for (i = 0; i < fan_data->num_ctrl; i++)
- gpio_free(fan_data->ctrl[i]);
}
/*
@@ -431,7 +409,8 @@ static int __devinit gpio_fan_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- fan_data = kzalloc(sizeof(struct gpio_fan_data), GFP_KERNEL);
+ fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data),
+ GFP_KERNEL);
if (!fan_data)
return -ENOMEM;
@@ -443,7 +422,7 @@ static int __devinit gpio_fan_probe(struct platform_device *pdev)
if (pdata->alarm) {
err = fan_alarm_init(fan_data, pdata->alarm);
if (err)
- goto err_free_data;
+ return err;
}
/* Configure control GPIOs if available. */
@@ -480,10 +459,6 @@ err_free_ctrl:
err_free_alarm:
if (fan_data->alarm)
fan_alarm_free(fan_data);
-err_free_data:
- platform_set_drvdata(pdev, NULL);
- kfree(fan_data);
-
return err;
}
@@ -497,15 +472,14 @@ static int __devexit gpio_fan_remove(struct platform_device *pdev)
fan_alarm_free(fan_data);
if (fan_data->ctrl)
fan_ctrl_free(fan_data);
- kfree(fan_data);
return 0;
}
-#ifdef CONFIG_PM
-static int gpio_fan_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int gpio_fan_suspend(struct device *dev)
{
- struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
if (fan_data->ctrl) {
fan_data->resume_speed = fan_data->speed_index;
@@ -515,27 +489,28 @@ static int gpio_fan_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int gpio_fan_resume(struct platform_device *pdev)
+static int gpio_fan_resume(struct device *dev)
{
- struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
if (fan_data->ctrl)
set_fan_speed(fan_data, fan_data->resume_speed);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
+#define GPIO_FAN_PM &gpio_fan_pm
#else
-#define gpio_fan_suspend NULL
-#define gpio_fan_resume NULL
+#define GPIO_FAN_PM NULL
#endif
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
.remove = __devexit_p(gpio_fan_remove),
- .suspend = gpio_fan_suspend,
- .resume = gpio_fan_resume,
.driver = {
.name = "gpio-fan",
+ .pm = GPIO_FAN_PM,
},
};
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
new file mode 100644
index 0000000..e8ee75f
--- /dev/null
+++ b/drivers/hwmon/hih6130.c
@@ -0,0 +1,293 @@
+/* Honeywell HIH-6130/HIH-6131 humidity and temperature sensor driver
+ *
+ * Copyright (C) 2012 Iain Paton <ipaton0@gmail.com>
+ *
+ * heavily based on the sht21 driver
+ * Copyright (C) 2010 Urs Fleisch <urs.fleisch@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Data sheets available (2012-06-22) at
+ * http://sensing.honeywell.com/index.php?ci_id=3106&la_id=1&defId=44872
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+/**
+ * struct hih6130 - HIH-6130 device specific data
+ * @hwmon_dev: device registered with hwmon
+ * @lock: mutex to protect measurement values
+ * @valid: only false before first measurement is taken
+ * @last_update: time of last update (jiffies)
+ * @temperature: cached temperature measurement value
+ * @humidity: cached humidity measurement value
+ */
+struct hih6130 {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ bool valid;
+ unsigned long last_update;
+ int temperature;
+ int humidity;
+};
+
+/**
+ * hih6130_temp_ticks_to_millicelsius() - convert raw temperature ticks to
+ * milli celsius
+ * @ticks: temperature ticks value received from sensor
+ */
+static inline int hih6130_temp_ticks_to_millicelsius(int ticks)
+{
+
+ ticks = ticks >> 2;
+ /*
+ * from data sheet section 5.0
+ * Formula T = ( ticks / ( 2^14 - 2 ) ) * 165 -40
+ */
+ return (DIV_ROUND_CLOSEST(ticks * 1650, 16382) - 400) * 100;
+}
+
+/**
+ * hih6130_rh_ticks_to_per_cent_mille() - convert raw humidity ticks to
+ * one-thousandths of a percent relative humidity
+ * @ticks: humidity ticks value received from sensor
+ */
+static inline int hih6130_rh_ticks_to_per_cent_mille(int ticks)
+{
+
+ ticks &= ~0xC000; /* clear status bits */
+ /*
+ * from data sheet section 4.0
+ * Formula RH = ( ticks / ( 2^14 -2 ) ) * 100
+ */
+ return DIV_ROUND_CLOSEST(ticks * 1000, 16382) * 100;
+}
+
+/**
+ * hih6130_update_measurements() - get updated measurements from device
+ * @client: I2C client device
+ *
+ * Returns 0 on success, else negative errno.
+ */
+static int hih6130_update_measurements(struct i2c_client *client)
+{
+ int ret = 0;
+ int t;
+ struct hih6130 *hih6130 = i2c_get_clientdata(client);
+ unsigned char tmp[4];
+ struct i2c_msg msgs[1] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 4,
+ .buf = tmp,
+ }
+ };
+
+ mutex_lock(&hih6130->lock);
+
+ /*
+ * While the measurement can be completed in ~40ms the sensor takes
+ * much longer to react to a change in external conditions. How quickly
+ * it reacts depends on airflow and other factors outwith our control.
+ * The datasheet specifies maximum 'Response time' for humidity at 8s
+ * and temperature at 30s under specified conditions.
+ * We therefore choose to only read the sensor at most once per second.
+ * This trades off pointless activity polling the sensor much faster
+ * than it can react against better response times in conditions more
+ * favourable than specified in the datasheet.
+ */
+ if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
+
+ /* write to slave address, no data, to request a measurement */
+ ret = i2c_master_send(client, tmp, 0);
+ if (ret < 0)
+ goto out;
+
+ /* measurement cycle time is ~36.65msec */
+ msleep(40);
+
+ ret = i2c_transfer(client->adapter, msgs, 1);
+ if (ret < 0)
+ goto out;
+
+ if ((tmp[0] & 0xC0) != 0) {
+ dev_err(&client->dev, "Error while reading measurement result\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ t = (tmp[0] << 8) + tmp[1];
+ hih6130->humidity = hih6130_rh_ticks_to_per_cent_mille(t);
+
+ t = (tmp[2] << 8) + tmp[3];
+ hih6130->temperature = hih6130_temp_ticks_to_millicelsius(t);
+
+ hih6130->last_update = jiffies;
+ hih6130->valid = true;
+ }
+out:
+ mutex_unlock(&hih6130->lock);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
+ * hih6130_show_temperature() - show temperature measurement value in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
+ *
+ * Will be called on read access to temp1_input sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t hih6130_show_temperature(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hih6130 *hih6130 = i2c_get_clientdata(client);
+ int ret = hih6130_update_measurements(client);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", hih6130->temperature);
+}
+
+/**
+ * hih6130_show_humidity() - show humidity measurement value in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
+ *
+ * Will be called on read access to humidity1_input sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t hih6130_show_humidity(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hih6130 *hih6130 = i2c_get_clientdata(client);
+ int ret = hih6130_update_measurements(client);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", hih6130->humidity);
+}
+
+/* sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, hih6130_show_temperature,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, hih6130_show_humidity,
+ NULL, 0);
+
+static struct attribute *hih6130_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_humidity1_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hih6130_attr_group = {
+ .attrs = hih6130_attributes,
+};
+
+/**
+ * hih6130_probe() - probe device
+ * @client: I2C client device
+ * @id: device ID
+ *
+ * Called by the I2C core when an entry in the ID table matches a
+ * device's name.
+ * Returns 0 on success.
+ */
+static int __devinit hih6130_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hih6130 *hih6130;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "adapter does not support true I2C\n");
+ return -ENODEV;
+ }
+
+ hih6130 = devm_kzalloc(&client->dev, sizeof(*hih6130), GFP_KERNEL);
+ if (!hih6130)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, hih6130);
+
+ mutex_init(&hih6130->lock);
+
+ err = sysfs_create_group(&client->dev.kobj, &hih6130_attr_group);
+ if (err) {
+ dev_dbg(&client->dev, "could not create sysfs files\n");
+ return err;
+ }
+
+ hih6130->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(hih6130->hwmon_dev)) {
+ dev_dbg(&client->dev, "unable to register hwmon device\n");
+ err = PTR_ERR(hih6130->hwmon_dev);
+ goto fail_remove_sysfs;
+ }
+
+ return 0;
+
+fail_remove_sysfs:
+ sysfs_remove_group(&client->dev.kobj, &hih6130_attr_group);
+ return err;
+}
+
+/**
+ * hih6130_remove() - remove device
+ * @client: I2C client device
+ */
+static int __devexit hih6130_remove(struct i2c_client *client)
+{
+ struct hih6130 *hih6130 = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(hih6130->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &hih6130_attr_group);
+
+ return 0;
+}
+
+/* Device ID table */
+static const struct i2c_device_id hih6130_id[] = {
+ { "hih6130", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, hih6130_id);
+
+static struct i2c_driver hih6130_driver = {
+ .driver.name = "hih6130",
+ .probe = hih6130_probe,
+ .remove = __devexit_p(hih6130_remove),
+ .id_table = hih6130_id,
+};
+
+module_i2c_driver(hih6130_driver);
+
+MODULE_AUTHOR("Iain Paton <ipaton0@gmail.com>");
+MODULE_DESCRIPTION("Honeywell HIH-6130 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index e7701d9..f1de397 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -2341,7 +2341,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
/* Start monitoring */
it87_write_value(data, IT87_REG_CONFIG,
- (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
+ (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
| (update_vbat ? 0x41 : 0x01));
}
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index a9bfd67..e72ba5d 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -590,6 +590,6 @@ abort:
module_i2c_driver(jc42_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("JC42 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 7356b5e..f2fe807 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -33,9 +33,6 @@ static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
-/* PCI-IDs for Northbridge devices not used anywhere else */
-#define PCI_DEVICE_ID_AMD_15H_M10H_NB_F3 0x1403
-
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -213,7 +210,7 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 35aac82..49a69c5 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -183,21 +183,17 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
u8 model, stepping;
struct k8temp_data *data;
- data = kzalloc(sizeof(struct k8temp_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&pdev->dev, sizeof(struct k8temp_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
model = boot_cpu_data.x86_model;
stepping = boot_cpu_data.x86_mask;
/* feature available since SH-C0, exclude older revisions */
- if (((model == 4) && (stepping == 0)) ||
- ((model == 5) && (stepping <= 1))) {
- err = -ENODEV;
- goto exit_free;
- }
+ if ((model == 4 && stepping == 0) ||
+ (model == 5 && stepping <= 1))
+ return -ENODEV;
/*
* AMD NPT family 0fh, i.e. RevF and RevG:
@@ -224,8 +220,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
if (scfg & (SEL_PLACE | SEL_CORE)) {
dev_err(&pdev->dev, "Configuration bit(s) stuck at 1!\n");
- err = -ENODEV;
- goto exit_free;
+ return -ENODEV;
}
scfg |= (SEL_PLACE | SEL_CORE);
@@ -307,10 +302,6 @@ exit_remove:
device_remove_file(&pdev->dev,
&sensor_dev_attr_temp4_input.dev_attr);
device_remove_file(&pdev->dev, &dev_attr_name);
-exit_free:
- pci_set_drvdata(pdev, NULL);
- kfree(data);
-exit:
return err;
}
@@ -328,8 +319,6 @@ static void __devexit k8temp_remove(struct pci_dev *pdev)
device_remove_file(&pdev->dev,
&sensor_dev_attr_temp4_input.dev_attr);
device_remove_file(&pdev->dev, &dev_attr_name);
- pci_set_drvdata(pdev, NULL);
- kfree(data);
}
static struct pci_driver k8temp_driver = {
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index d264937..bd75d24 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -567,6 +567,6 @@ static struct i2c_driver pem_driver = {
module_i2c_driver(pem_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("Lineage CPL PEM hardware monitoring driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 602a0f0..eed4d94 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1108,11 +1108,9 @@ static int lm63_probe(struct i2c_client *client,
struct lm63_data *data;
int err;
- data = kzalloc(sizeof(struct lm63_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct lm63_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->valid = 0;
@@ -1129,7 +1127,7 @@ static int lm63_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm63_group);
if (err)
- goto exit_free;
+ return err;
if (data->config & 0x04) { /* tachometer enabled */
err = sysfs_create_group(&client->dev.kobj, &lm63_group_fan1);
if (err)
@@ -1161,9 +1159,6 @@ exit_remove_files:
device_remove_file(&client->dev, &dev_attr_temp2_type);
sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
}
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -1179,7 +1174,6 @@ static int lm63_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
}
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index a83f206..291edff 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -156,7 +156,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
- data = kzalloc(sizeof(struct lm75_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lm75_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -174,7 +174,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
status = lm75_read_value(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
- goto exit_free;
+ return status;
}
data->orig_conf = status;
new = status & ~clr_mask;
@@ -186,7 +186,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
status = sysfs_create_group(&client->dev.kobj, &lm75_group);
if (status)
- goto exit_free;
+ return status;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -201,8 +201,6 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
exit_remove:
sysfs_remove_group(&client->dev.kobj, &lm75_group);
-exit_free:
- kfree(data);
return status;
}
@@ -213,7 +211,6 @@ static int lm75_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm75_group);
lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 0fca861..f82acf6 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -267,10 +267,9 @@ static const struct attribute_group lm77_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm77_detect(struct i2c_client *new_client,
- struct i2c_board_info *info)
+static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info)
{
- struct i2c_adapter *adapter = new_client->adapter;
+ struct i2c_adapter *adapter = client->adapter;
int i, cur, conf, hyst, crit, min, max;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
@@ -292,18 +291,18 @@ static int lm77_detect(struct i2c_client *new_client,
*/
/* addresses cycling */
- cur = i2c_smbus_read_word_data(new_client, 0);
- conf = i2c_smbus_read_byte_data(new_client, 1);
- hyst = i2c_smbus_read_word_data(new_client, 2);
- crit = i2c_smbus_read_word_data(new_client, 3);
- min = i2c_smbus_read_word_data(new_client, 4);
- max = i2c_smbus_read_word_data(new_client, 5);
+ cur = i2c_smbus_read_word_data(client, 0);
+ conf = i2c_smbus_read_byte_data(client, 1);
+ hyst = i2c_smbus_read_word_data(client, 2);
+ crit = i2c_smbus_read_word_data(client, 3);
+ min = i2c_smbus_read_word_data(client, 4);
+ max = i2c_smbus_read_word_data(client, 5);
for (i = 8; i <= 0xff; i += 8) {
- if (i2c_smbus_read_byte_data(new_client, i + 1) != conf
- || i2c_smbus_read_word_data(new_client, i + 2) != hyst
- || i2c_smbus_read_word_data(new_client, i + 3) != crit
- || i2c_smbus_read_word_data(new_client, i + 4) != min
- || i2c_smbus_read_word_data(new_client, i + 5) != max)
+ if (i2c_smbus_read_byte_data(client, i + 1) != conf
+ || i2c_smbus_read_word_data(client, i + 2) != hyst
+ || i2c_smbus_read_word_data(client, i + 3) != crit
+ || i2c_smbus_read_word_data(client, i + 4) != min
+ || i2c_smbus_read_word_data(client, i + 5) != max)
return -ENODEV;
}
@@ -320,17 +319,17 @@ static int lm77_detect(struct i2c_client *new_client,
return -ENODEV;
/* 0x06 and 0x07 return the last read value */
- cur = i2c_smbus_read_word_data(new_client, 0);
- if (i2c_smbus_read_word_data(new_client, 6) != cur
- || i2c_smbus_read_word_data(new_client, 7) != cur)
+ cur = i2c_smbus_read_word_data(client, 0);
+ if (i2c_smbus_read_word_data(client, 6) != cur
+ || i2c_smbus_read_word_data(client, 7) != cur)
return -ENODEV;
- hyst = i2c_smbus_read_word_data(new_client, 2);
- if (i2c_smbus_read_word_data(new_client, 6) != hyst
- || i2c_smbus_read_word_data(new_client, 7) != hyst)
+ hyst = i2c_smbus_read_word_data(client, 2);
+ if (i2c_smbus_read_word_data(client, 6) != hyst
+ || i2c_smbus_read_word_data(client, 7) != hyst)
return -ENODEV;
- min = i2c_smbus_read_word_data(new_client, 4);
- if (i2c_smbus_read_word_data(new_client, 6) != min
- || i2c_smbus_read_word_data(new_client, 7) != min)
+ min = i2c_smbus_read_word_data(client, 4);
+ if (i2c_smbus_read_word_data(client, 6) != min
+ || i2c_smbus_read_word_data(client, 7) != min)
return -ENODEV;
strlcpy(info->type, "lm77", I2C_NAME_SIZE);
@@ -338,31 +337,29 @@ static int lm77_detect(struct i2c_client *new_client,
return 0;
}
-static int lm77_probe(struct i2c_client *new_client,
- const struct i2c_device_id *id)
+static int lm77_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct lm77_data *data;
int err;
- data = kzalloc(sizeof(struct lm77_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(dev, sizeof(struct lm77_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ i2c_set_clientdata(client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the LM77 chip */
- lm77_init_client(new_client);
+ lm77_init_client(client);
/* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &lm77_group);
+ err = sysfs_create_group(&dev->kobj, &lm77_group);
if (err)
- goto exit_free;
+ return err;
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
@@ -371,10 +368,7 @@ static int lm77_probe(struct i2c_client *new_client,
return 0;
exit_remove:
- sysfs_remove_group(&new_client->dev.kobj, &lm77_group);
-exit_free:
- kfree(data);
-exit:
+ sysfs_remove_group(&dev->kobj, &lm77_group);
return err;
}
@@ -383,7 +377,6 @@ static int lm77_remove(struct i2c_client *client)
struct lm77_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm77_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index f6bc414..c6ffafe 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -660,7 +660,7 @@ static int lm78_i2c_probe(struct i2c_client *client,
struct lm78_data *data;
int err;
- data = kzalloc(sizeof(struct lm78_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lm78_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -674,20 +674,18 @@ static int lm78_i2c_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm78_group);
if (err)
- goto ERROR3;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR4;
+ goto error;
}
return 0;
-ERROR4:
+error:
sysfs_remove_group(&client->dev.kobj, &lm78_group);
-ERROR3:
- kfree(data);
return err;
}
@@ -697,7 +695,6 @@ static int lm78_i2c_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm78_group);
- kfree(data);
return 0;
}
@@ -844,16 +841,14 @@ static int __devinit lm78_isa_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start + LM78_ADDR_REG_OFFSET, 2, "lm78")) {
- err = -EBUSY;
- goto exit;
- }
+ if (!devm_request_region(&pdev->dev, res->start + LM78_ADDR_REG_OFFSET,
+ 2, "lm78"))
+ return -EBUSY;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct lm78_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- data = kzalloc(sizeof(struct lm78_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit_release_region;
- }
mutex_init(&data->lock);
data->isa_addr = res->start;
platform_set_drvdata(pdev, data);
@@ -888,25 +883,16 @@ static int __devinit lm78_isa_probe(struct platform_device *pdev)
exit_remove_files:
sysfs_remove_group(&pdev->dev.kobj, &lm78_group);
device_remove_file(&pdev->dev, &dev_attr_name);
- kfree(data);
- exit_release_region:
- release_region(res->start + LM78_ADDR_REG_OFFSET, 2);
- exit:
return err;
}
static int __devexit lm78_isa_remove(struct platform_device *pdev)
{
struct lm78_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &lm78_group);
device_remove_file(&pdev->dev, &dev_attr_name);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start + LM78_ADDR_REG_OFFSET, 2);
return 0;
}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index e2c43e1..28a8b71 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -543,11 +543,9 @@ static int lm80_probe(struct i2c_client *client,
struct lm80_data *data;
int err;
- data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct lm80_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -562,7 +560,7 @@ static int lm80_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm80_group);
if (err)
- goto error_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -574,9 +572,6 @@ static int lm80_probe(struct i2c_client *client,
error_remove:
sysfs_remove_group(&client->dev.kobj, &lm80_group);
-error_free:
- kfree(data);
-exit:
return err;
}
@@ -587,7 +582,6 @@ static int lm80_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm80_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index cd45b9d..e998034 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -343,11 +343,10 @@ static int lm83_probe(struct i2c_client *new_client,
struct lm83_data *data;
int err;
- data = kzalloc(sizeof(struct lm83_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct lm83_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
data->valid = 0;
@@ -362,7 +361,7 @@ static int lm83_probe(struct i2c_client *new_client,
err = sysfs_create_group(&new_client->dev.kobj, &lm83_group);
if (err)
- goto exit_free;
+ return err;
if (id->driver_data == lm83) {
err = sysfs_create_group(&new_client->dev.kobj,
@@ -382,9 +381,6 @@ static int lm83_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &lm83_group);
sysfs_remove_group(&new_client->dev.kobj, &lm83_group_opt);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -396,7 +392,6 @@ static int lm83_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &lm83_group);
sysfs_remove_group(&client->dev.kobj, &lm83_group_opt);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 864c7d9..9f2dd77 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1387,7 +1387,7 @@ static int lm85_probe(struct i2c_client *client,
struct lm85_data *data;
int err;
- data = kzalloc(sizeof(struct lm85_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lm85_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1419,7 +1419,7 @@ static int lm85_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm85_group);
if (err)
- goto err_kfree;
+ return err;
/* minctl and temp_off exist on all chips except emc6d103s */
if (data->type != emc6d103s) {
@@ -1466,8 +1466,6 @@ static int lm85_probe(struct i2c_client *client,
/* Error out and cleanup code */
err_remove_files:
lm85_remove_files(client, data);
- err_kfree:
- kfree(data);
return err;
}
@@ -1476,7 +1474,6 @@ static int lm85_remove(struct i2c_client *client)
struct lm85_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
lm85_remove_files(client, data);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 314d147..16e45d7 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -898,11 +898,9 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct lm87_data *data;
int err;
- data = kzalloc(sizeof(struct lm87_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct lm87_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->valid = 0;
@@ -923,7 +921,7 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm87_group);
if (err)
- goto exit_free;
+ goto exit_stop;
if (data->channel & CHAN_NO_FAN(0)) {
err = sysfs_create_group(&client->dev.kobj, &lm87_group_in6);
@@ -972,10 +970,8 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
exit_remove:
lm87_remove_files(client);
-exit_free:
+exit_stop:
lm87_write_value(client, LM87_REG_CONFIG, data->config);
- kfree(data);
-exit:
return err;
}
@@ -987,7 +983,6 @@ static int lm87_remove(struct i2c_client *client)
lm87_remove_files(client);
lm87_write_value(client, LM87_REG_CONFIG, data->config);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 22b14a68..863412a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1399,11 +1399,10 @@ static int lm90_probe(struct i2c_client *client,
struct lm90_data *data;
int err;
- data = kzalloc(sizeof(struct lm90_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct lm90_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -1474,8 +1473,6 @@ exit_remove_files:
lm90_remove_files(client, data);
exit_restore:
lm90_restore_conf(client, data);
- kfree(data);
-exit:
return err;
}
@@ -1487,7 +1484,6 @@ static int lm90_remove(struct i2c_client *client)
lm90_remove_files(client, data);
lm90_restore_conf(client, data);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index fdc691a..2282d77 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -373,11 +373,10 @@ static int lm92_probe(struct i2c_client *new_client,
struct lm92_data *data;
int err;
- data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct lm92_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
data->valid = 0;
@@ -389,7 +388,7 @@ static int lm92_probe(struct i2c_client *new_client,
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &lm92_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -401,9 +400,6 @@ static int lm92_probe(struct i2c_client *new_client,
exit_remove:
sysfs_remove_group(&new_client->dev.kobj, &lm92_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -414,7 +410,6 @@ static int lm92_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm92_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 67e8fe2..bf94618 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2738,15 +2738,13 @@ static int lm93_probe(struct i2c_client *client,
} else {
dev_dbg(&client->dev, "detect failed, "
"smbus byte and/or word data not supported!\n");
- err = -ENODEV;
- goto err_out;
+ return -ENODEV;
}
- data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lm93_data), GFP_KERNEL);
if (!data) {
dev_dbg(&client->dev, "out of memory!\n");
- err = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
i2c_set_clientdata(client, data);
@@ -2760,7 +2758,7 @@ static int lm93_probe(struct i2c_client *client,
err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp);
if (err)
- goto err_free;
+ return err;
/* Register hwmon driver class */
data->hwmon_dev = hwmon_device_register(&client->dev);
@@ -2770,9 +2768,6 @@ static int lm93_probe(struct i2c_client *client,
err = PTR_ERR(data->hwmon_dev);
dev_err(&client->dev, "error registering hwmon device.\n");
sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
-err_free:
- kfree(data);
-err_out:
return err;
}
@@ -2783,7 +2778,6 @@ static int lm93_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 069b7d3..77476a5 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -292,6 +292,6 @@ static struct i2c_driver ltc4261_driver = {
module_i2c_driver(ltc4261_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("LTC4261 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 362a40e..f3978a4 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -168,7 +168,7 @@ static int __devinit max1111_probe(struct spi_device *spi)
if (err < 0)
return err;
- data = kzalloc(sizeof(struct max1111_data), GFP_KERNEL);
+ data = devm_kzalloc(&spi->dev, sizeof(struct max1111_data), GFP_KERNEL);
if (data == NULL) {
dev_err(&spi->dev, "failed to allocate memory\n");
return -ENOMEM;
@@ -176,7 +176,7 @@ static int __devinit max1111_probe(struct spi_device *spi)
err = setup_transfer(data);
if (err)
- goto err_free_data;
+ return err;
mutex_init(&data->drvdata_lock);
@@ -186,7 +186,7 @@ static int __devinit max1111_probe(struct spi_device *spi)
err = sysfs_create_group(&spi->dev.kobj, &max1111_attr_group);
if (err) {
dev_err(&spi->dev, "failed to create attribute group\n");
- goto err_free_data;
+ return err;
}
data->hwmon_dev = hwmon_device_register(&spi->dev);
@@ -203,8 +203,6 @@ static int __devinit max1111_probe(struct spi_device *spi)
err_remove:
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
-err_free_data:
- kfree(data);
return err;
}
@@ -215,7 +213,6 @@ static int __devexit max1111_remove(struct spi_device *spi)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
mutex_destroy(&data->drvdata_lock);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 822261b..019427d 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -692,6 +692,6 @@ static struct i2c_driver max16065_driver = {
module_i2c_driver(max16065_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("MAX16065 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index ecac04a7..6c11ec2 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -267,11 +267,10 @@ static int max1619_probe(struct i2c_client *new_client,
struct max1619_data *data;
int err;
- data = kzalloc(sizeof(struct max1619_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct max1619_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
data->valid = 0;
@@ -283,7 +282,7 @@ static int max1619_probe(struct i2c_client *new_client,
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &max1619_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -295,9 +294,6 @@ static int max1619_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &max1619_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -323,7 +319,6 @@ static int max1619_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &max1619_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index de8f7ad..6e60036 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -548,11 +548,10 @@ static int max6639_probe(struct i2c_client *client,
struct max6639_data *data;
int err;
- data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct max6639_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -560,12 +559,12 @@ static int max6639_probe(struct i2c_client *client,
/* Initialize the max6639 chip */
err = max6639_init_client(client);
if (err < 0)
- goto error_free;
+ return err;
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &max6639_group);
if (err)
- goto error_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -579,9 +578,6 @@ static int max6639_probe(struct i2c_client *client,
error_remove:
sysfs_remove_group(&client->dev.kobj, &max6639_group);
-error_free:
- kfree(data);
-exit:
return err;
}
@@ -592,7 +588,6 @@ static int max6639_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &max6639_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 4298909..bf236c0 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -286,11 +286,10 @@ static int max6642_probe(struct i2c_client *new_client,
struct max6642_data *data;
int err;
- data = kzalloc(sizeof(struct max6642_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct max6642_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
mutex_init(&data->update_lock);
@@ -301,7 +300,7 @@ static int max6642_probe(struct i2c_client *new_client,
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &max6642_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -313,9 +312,6 @@ static int max6642_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &max6642_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -326,7 +322,6 @@ static int max6642_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &max6642_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 33a8a7f..f739f83 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -545,7 +545,8 @@ static int max6650_probe(struct i2c_client *client,
struct max6650_data *data;
int err;
- data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct max6650_data),
+ GFP_KERNEL);
if (!data) {
dev_err(&client->dev, "out of memory.\n");
return -ENOMEM;
@@ -560,11 +561,11 @@ static int max6650_probe(struct i2c_client *client,
*/
err = max6650_init_client(client);
if (err)
- goto err_free;
+ return err;
err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
if (err)
- goto err_free;
+ return err;
/* 3 additional fan inputs for the MAX6651 */
if (data->nr_fans == 4) {
err = sysfs_create_group(&client->dev.kobj, &max6651_attr_grp);
@@ -582,8 +583,6 @@ static int max6650_probe(struct i2c_client *client,
sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
err_remove:
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
-err_free:
- kfree(data);
return err;
}
@@ -595,7 +594,6 @@ static int max6650_remove(struct i2c_client *client)
if (data->nr_fans == 4)
sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index ce86c5e..cf47a59 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -179,7 +179,7 @@ static int __init mc13783_adc_probe(struct platform_device *pdev)
const struct platform_device_id *id = platform_get_device_id(pdev);
char *dash;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -194,7 +194,7 @@ static int __init mc13783_adc_probe(struct platform_device *pdev)
/* Register sysfs hooks */
ret = sysfs_create_group(&pdev->dev.kobj, &mc13783_group_base);
if (ret)
- goto out_err_create_base;
+ return ret;
if (id->driver_data & MC13783_ADC_16CHANS) {
ret = sysfs_create_group(&pdev->dev.kobj,
@@ -230,11 +230,6 @@ out_err_create_ts:
out_err_create_16chans:
sysfs_remove_group(&pdev->dev.kobj, &mc13783_group_base);
-out_err_create_base:
-
- platform_set_drvdata(pdev, NULL);
- kfree(priv);
-
return ret;
}
@@ -253,9 +248,6 @@ static int __devexit mc13783_adc_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &mc13783_group_base);
- platform_set_drvdata(pdev, NULL);
- kfree(priv);
-
return 0;
}
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 6da9696..74a6c58 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -351,7 +351,7 @@ static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
data->pdata = pdata;
- strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE);
+ strlcpy(data->name, pdev->id_entry->name, sizeof(data->name));
switch (pdev->id_entry->driver_data) {
case TYPE_NCPXXWB473:
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 79ba48c..91d5b2a 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1230,7 +1230,7 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
int use_thermistors = 0;
struct device *dev = &pdev->dev;
- data = kzalloc(sizeof(struct pc87360_data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct pc87360_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1269,15 +1269,12 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
for (i = 0; i < LDNI_MAX; i++) {
data->address[i] = extra_isa[i];
if (data->address[i]
- && !request_region(extra_isa[i], PC87360_EXTENT,
- pc87360_driver.driver.name)) {
+ && !devm_request_region(dev, extra_isa[i], PC87360_EXTENT,
+ pc87360_driver.driver.name)) {
dev_err(dev, "Region 0x%x-0x%x already "
"in use!\n", extra_isa[i],
extra_isa[i]+PC87360_EXTENT-1);
- for (i--; i >= 0; i--)
- release_region(extra_isa[i], PC87360_EXTENT);
- err = -EBUSY;
- goto ERROR1;
+ return -EBUSY;
}
}
@@ -1325,13 +1322,13 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
if (data->innr) {
err = sysfs_create_group(&dev->kobj, &pc8736x_vin_group);
if (err)
- goto ERROR3;
+ goto error;
}
if (data->innr == 14) {
err = sysfs_create_group(&dev->kobj, &pc8736x_therm_group);
if (err)
- goto ERROR3;
+ goto error;
}
/* create device attr-files for varying sysfs groups */
@@ -1341,11 +1338,11 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj,
&pc8736x_temp_attr_group[i]);
if (err)
- goto ERROR3;
+ goto error;
}
err = device_create_file(dev, &dev_attr_alarms_temp);
if (err)
- goto ERROR3;
+ goto error;
}
for (i = 0; i < data->fannr; i++) {
@@ -1353,49 +1350,37 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj,
&pc8736x_fan_attr_group[i]);
if (err)
- goto ERROR3;
+ goto error;
}
if (FAN_CONFIG_CONTROL(data->fan_conf, i)) {
err = device_create_file(dev, &pwm[i].dev_attr);
if (err)
- goto ERROR3;
+ goto error;
}
}
err = device_create_file(dev, &dev_attr_name);
if (err)
- goto ERROR3;
+ goto error;
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR3;
+ goto error;
}
return 0;
-ERROR3:
+error:
pc87360_remove_files(dev);
- for (i = 0; i < 3; i++) {
- if (data->address[i])
- release_region(data->address[i], PC87360_EXTENT);
- }
-ERROR1:
- kfree(data);
return err;
}
static int __devexit pc87360_remove(struct platform_device *pdev)
{
struct pc87360_data *data = platform_get_drvdata(pdev);
- int i;
hwmon_device_unregister(data->hwmon_dev);
pc87360_remove_files(&pdev->dev);
- for (i = 0; i < 3; i++) {
- if (data->address[i])
- release_region(data->address[i], PC87360_EXTENT);
- }
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 37059a3..f185b1f 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -956,44 +956,28 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
* Device detection, attach and detach
*/
-static void pc87427_release_regions(struct platform_device *pdev, int count)
-{
- struct resource *res;
- int i;
-
- for (i = 0; i < count; i++) {
- res = platform_get_resource(pdev, IORESOURCE_IO, i);
- release_region(res->start, resource_size(res));
- }
-}
-
static int __devinit pc87427_request_regions(struct platform_device *pdev,
int count)
{
struct resource *res;
- int i, err = 0;
+ int i;
for (i = 0; i < count; i++) {
res = platform_get_resource(pdev, IORESOURCE_IO, i);
if (!res) {
- err = -ENOENT;
dev_err(&pdev->dev, "Missing resource #%d\n", i);
- break;
+ return -ENOENT;
}
- if (!request_region(res->start, resource_size(res), DRVNAME)) {
- err = -EBUSY;
+ if (!devm_request_region(&pdev->dev, res->start,
+ resource_size(res), DRVNAME)) {
dev_err(&pdev->dev,
"Failed to request region 0x%lx-0x%lx\n",
(unsigned long)res->start,
(unsigned long)res->end);
- break;
+ return -EBUSY;
}
}
-
- if (err && i)
- pc87427_release_regions(pdev, i);
-
- return err;
+ return 0;
}
static void __devinit pc87427_init_device(struct device *dev)
@@ -1094,11 +1078,11 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
struct pc87427_data *data;
int i, err, res_count;
- data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct pc87427_data),
+ GFP_KERNEL);
if (!data) {
- err = -ENOMEM;
pr_err("Out of memory\n");
- goto exit;
+ return -ENOMEM;
}
data->address[0] = sio_data->address[0];
@@ -1107,7 +1091,7 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
err = pc87427_request_regions(pdev, res_count);
if (err)
- goto exit_kfree;
+ return err;
mutex_init(&data->lock);
data->name = "pc87427";
@@ -1117,7 +1101,7 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
/* Register sysfs hooks */
err = device_create_file(&pdev->dev, &dev_attr_name);
if (err)
- goto exit_release_region;
+ return err;
for (i = 0; i < 8; i++) {
if (!(data->fan_enabled & (1 << i)))
continue;
@@ -1154,28 +1138,15 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
exit_remove_files:
pc87427_remove_files(&pdev->dev);
-exit_release_region:
- pc87427_release_regions(pdev, res_count);
-exit_kfree:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-exit:
return err;
}
static int __devexit pc87427_remove(struct platform_device *pdev)
{
struct pc87427_data *data = platform_get_drvdata(pdev);
- int res_count;
-
- res_count = (data->address[0] != 0) + (data->address[1] != 0);
hwmon_device_unregister(data->hwmon_dev);
pc87427_remove_files(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
- pc87427_release_regions(pdev, res_count);
return 0;
}
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 4174c74..825883d 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -200,11 +200,10 @@ static int pcf8591_probe(struct i2c_client *client,
struct pcf8591_data *data;
int err;
- data = kzalloc(sizeof(struct pcf8591_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct pcf8591_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -215,7 +214,7 @@ static int pcf8591_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &pcf8591_attr_group);
if (err)
- goto exit_kfree;
+ return err;
/* Register input2 if not in "two differential inputs" mode */
if (input_mode != 3) {
@@ -242,9 +241,6 @@ static int pcf8591_probe(struct i2c_client *client,
exit_sysfs_remove:
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
-exit_kfree:
- kfree(data);
-exit:
return err;
}
@@ -255,7 +251,6 @@ static int pcf8591_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
- kfree(i2c_get_clientdata(client));
return 0;
}
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index f6c26d1..b7975f8 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -288,7 +288,7 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
return -EINVAL;
}
- hwmon = kzalloc(sizeof(struct s3c_hwmon), GFP_KERNEL);
+ hwmon = devm_kzalloc(&dev->dev, sizeof(struct s3c_hwmon), GFP_KERNEL);
if (hwmon == NULL) {
dev_err(&dev->dev, "no memory\n");
return -ENOMEM;
@@ -303,8 +303,7 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
hwmon->client = s3c_adc_register(dev, NULL, NULL, 0);
if (IS_ERR(hwmon->client)) {
dev_err(&dev->dev, "cannot register adc\n");
- ret = PTR_ERR(hwmon->client);
- goto err_mem;
+ return PTR_ERR(hwmon->client);
}
/* add attributes for our adc devices. */
@@ -363,8 +362,6 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
err_registered:
s3c_adc_release(hwmon->client);
- err_mem:
- kfree(hwmon);
return ret;
}
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 8ec6dfb..8342275 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -579,7 +579,7 @@ static int __devinit sch5627_probe(struct platform_device *pdev)
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(data->addr,
+ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
(build_code << 24) | (build_id << 8) | hwmon_rev,
&data->update_lock, 1);
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index 906d4ed..96a7e68 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -510,7 +510,7 @@ static int __devinit sch5636_probe(struct platform_device *pdev)
}
/* Note failing to register the watchdog is not a fatal error */
- data->watchdog = sch56xx_watchdog_register(data->addr,
+ data->watchdog = sch56xx_watchdog_register(&pdev->dev, data->addr,
(revision[0] << 8) | revision[1],
&data->update_lock, 0);
diff --git a/drivers/hwmon/sch56xx-common.c b/drivers/hwmon/sch56xx-common.c
index ce52fc5..4380f5d 100644
--- a/drivers/hwmon/sch56xx-common.c
+++ b/drivers/hwmon/sch56xx-common.c
@@ -66,15 +66,10 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
struct sch56xx_watchdog_data {
u16 addr;
- u32 revision;
struct mutex *io_lock;
- struct mutex watchdog_lock;
- struct list_head list; /* member of the watchdog_data_list */
struct kref kref;
- struct miscdevice watchdog_miscdev;
- unsigned long watchdog_is_open;
- char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
- char watchdog_expect_close;
+ struct watchdog_info wdinfo;
+ struct watchdog_device wddev;
u8 watchdog_preset;
u8 watchdog_control;
u8 watchdog_output_enable;
@@ -82,15 +77,6 @@ struct sch56xx_watchdog_data {
static struct platform_device *sch56xx_pdev;
-/*
- * Somewhat ugly :( global data pointer list with all sch56xx devices, so that
- * we can find our device data as when using misc_register there is no other
- * method to get to ones device data from the open fop.
- */
-static LIST_HEAD(watchdog_data_list);
-/* Note this lock not only protect list access, but also data.kref access */
-static DEFINE_MUTEX(watchdog_data_mutex);
-
/* Super I/O functions */
static inline int superio_inb(int base, int reg)
{
@@ -272,22 +258,22 @@ EXPORT_SYMBOL(sch56xx_read_virtual_reg12);
* Watchdog routines
*/
-/*
- * Release our data struct when the platform device has been released *and*
- * all references to our watchdog device are released.
- */
-static void sch56xx_watchdog_release_resources(struct kref *r)
+/* Release our data struct when we're unregistered *and*
+ all references to our watchdog device are released */
+static void watchdog_release_resources(struct kref *r)
{
struct sch56xx_watchdog_data *data =
container_of(r, struct sch56xx_watchdog_data, kref);
kfree(data);
}
-static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
- int timeout)
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+ unsigned int timeout)
{
- int ret, resolution;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
+ unsigned int resolution;
u8 control;
+ int ret;
/* 1 second or 60 second resolution? */
if (timeout <= 255)
@@ -298,12 +284,6 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
if (timeout < resolution || timeout > (resolution * 255))
return -EINVAL;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave;
- }
-
if (resolution == 1)
control = data->watchdog_control | SCH56XX_WDOG_TIME_BASE_SEC;
else
@@ -316,7 +296,7 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
control);
mutex_unlock(data->io_lock);
if (ret)
- goto leave;
+ return ret;
data->watchdog_control = control;
}
@@ -326,38 +306,17 @@ static int watchdog_set_timeout(struct sch56xx_watchdog_data *data,
* the watchdog countdown.
*/
data->watchdog_preset = DIV_ROUND_UP(timeout, resolution);
+ wddev->timeout = data->watchdog_preset * resolution;
- ret = data->watchdog_preset * resolution;
-leave:
- mutex_unlock(&data->watchdog_lock);
- return ret;
-}
-
-static int watchdog_get_timeout(struct sch56xx_watchdog_data *data)
-{
- int timeout;
-
- mutex_lock(&data->watchdog_lock);
- if (data->watchdog_control & SCH56XX_WDOG_TIME_BASE_SEC)
- timeout = data->watchdog_preset;
- else
- timeout = data->watchdog_preset * 60;
- mutex_unlock(&data->watchdog_lock);
-
- return timeout;
+ return 0;
}
-static int watchdog_start(struct sch56xx_watchdog_data *data)
+static int watchdog_start(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret;
u8 val;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave_unlock_watchdog;
- }
-
/*
* The sch56xx's watchdog cannot really be started / stopped
* it is always running, but we can avoid the timer expiring
@@ -385,18 +344,14 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
if (ret)
goto leave;
- /* 2. Enable output (if not already enabled) */
- if (!(data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)) {
- val = data->watchdog_output_enable |
- SCH56XX_WDOG_OUTPUT_ENABLE;
- ret = sch56xx_write_virtual_reg(data->addr,
- SCH56XX_REG_WDOG_OUTPUT_ENABLE,
- val);
- if (ret)
- goto leave;
+ /* 2. Enable output */
+ val = data->watchdog_output_enable | SCH56XX_WDOG_OUTPUT_ENABLE;
+ ret = sch56xx_write_virtual_reg(data->addr,
+ SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+ if (ret)
+ goto leave;
- data->watchdog_output_enable = val;
- }
+ data->watchdog_output_enable = val;
/* 3. Clear the watchdog event bit if set */
val = inb(data->addr + 9);
@@ -405,234 +360,70 @@ static int watchdog_start(struct sch56xx_watchdog_data *data)
leave:
mutex_unlock(data->io_lock);
-leave_unlock_watchdog:
- mutex_unlock(&data->watchdog_lock);
return ret;
}
-static int watchdog_trigger(struct sch56xx_watchdog_data *data)
+static int watchdog_trigger(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret;
- mutex_lock(&data->watchdog_lock);
- if (!data->addr) {
- ret = -ENODEV;
- goto leave;
- }
-
/* Reset the watchdog countdown counter */
mutex_lock(data->io_lock);
ret = sch56xx_write_virtual_reg(data->addr, SCH56XX_REG_WDOG_PRESET,
data->watchdog_preset);
mutex_unlock(data->io_lock);
-leave:
- mutex_unlock(&data->watchdog_lock);
+
return ret;
}
-static int watchdog_stop_unlocked(struct sch56xx_watchdog_data *data)
+static int watchdog_stop(struct watchdog_device *wddev)
{
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
int ret = 0;
u8 val;
- if (!data->addr)
- return -ENODEV;
-
- if (data->watchdog_output_enable & SCH56XX_WDOG_OUTPUT_ENABLE) {
- val = data->watchdog_output_enable &
- ~SCH56XX_WDOG_OUTPUT_ENABLE;
- mutex_lock(data->io_lock);
- ret = sch56xx_write_virtual_reg(data->addr,
- SCH56XX_REG_WDOG_OUTPUT_ENABLE,
- val);
- mutex_unlock(data->io_lock);
- if (ret)
- return ret;
-
- data->watchdog_output_enable = val;
- }
-
- return ret;
-}
-
-static int watchdog_stop(struct sch56xx_watchdog_data *data)
-{
- int ret;
-
- mutex_lock(&data->watchdog_lock);
- ret = watchdog_stop_unlocked(data);
- mutex_unlock(&data->watchdog_lock);
-
- return ret;
-}
-
-static int watchdog_release(struct inode *inode, struct file *filp)
-{
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- if (data->watchdog_expect_close) {
- watchdog_stop(data);
- data->watchdog_expect_close = 0;
- } else {
- watchdog_trigger(data);
- pr_crit("unexpected close, not stopping watchdog!\n");
- }
-
- clear_bit(0, &data->watchdog_is_open);
-
- mutex_lock(&watchdog_data_mutex);
- kref_put(&data->kref, sch56xx_watchdog_release_resources);
- mutex_unlock(&watchdog_data_mutex);
+ val = data->watchdog_output_enable & ~SCH56XX_WDOG_OUTPUT_ENABLE;
+ mutex_lock(data->io_lock);
+ ret = sch56xx_write_virtual_reg(data->addr,
+ SCH56XX_REG_WDOG_OUTPUT_ENABLE, val);
+ mutex_unlock(data->io_lock);
+ if (ret)
+ return ret;
+ data->watchdog_output_enable = val;
return 0;
}
-static int watchdog_open(struct inode *inode, struct file *filp)
+static void watchdog_ref(struct watchdog_device *wddev)
{
- struct sch56xx_watchdog_data *pos, *data = NULL;
- int ret, watchdog_is_open;
-
- /*
- * We get called from drivers/char/misc.c with misc_mtx hold, and we
- * call misc_register() from sch56xx_watchdog_probe() with
- * watchdog_data_mutex hold, as misc_register() takes the misc_mtx
- * lock, this is a possible deadlock, so we use mutex_trylock here.
- */
- if (!mutex_trylock(&watchdog_data_mutex))
- return -ERESTARTSYS;
- list_for_each_entry(pos, &watchdog_data_list, list) {
- if (pos->watchdog_miscdev.minor == iminor(inode)) {
- data = pos;
- break;
- }
- }
- /* Note we can never not have found data, so we don't check for this */
- watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
- if (!watchdog_is_open)
- kref_get(&data->kref);
- mutex_unlock(&watchdog_data_mutex);
-
- if (watchdog_is_open)
- return -EBUSY;
-
- filp->private_data = data;
-
- /* Start the watchdog */
- ret = watchdog_start(data);
- if (ret) {
- watchdog_release(inode, filp);
- return ret;
- }
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
- return nonseekable_open(inode, filp);
+ kref_get(&data->kref);
}
-static ssize_t watchdog_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *offset)
+static void watchdog_unref(struct watchdog_device *wddev)
{
- int ret;
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- if (count) {
- if (!nowayout) {
- size_t i;
-
- /* Clear it in case it was set with a previous write */
- data->watchdog_expect_close = 0;
-
- for (i = 0; i != count; i++) {
- char c;
- if (get_user(c, buf + i))
- return -EFAULT;
- if (c == 'V')
- data->watchdog_expect_close = 1;
- }
- }
- ret = watchdog_trigger(data);
- if (ret)
- return ret;
- }
- return count;
-}
-
-static long watchdog_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
- .identity = "sch56xx watchdog"
- };
- int i, ret = 0;
- struct sch56xx_watchdog_data *data = filp->private_data;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ident.firmware_version = data->revision;
- if (!nowayout)
- ident.options |= WDIOF_MAGICCLOSE;
- if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
- ret = -EFAULT;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int __user *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- ret = watchdog_trigger(data);
- break;
+ struct sch56xx_watchdog_data *data = watchdog_get_drvdata(wddev);
- case WDIOC_GETTIMEOUT:
- i = watchdog_get_timeout(data);
- ret = put_user(i, (int __user *)arg);
- break;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(i, (int __user *)arg)) {
- ret = -EFAULT;
- break;
- }
- ret = watchdog_set_timeout(data, i);
- if (ret >= 0)
- ret = put_user(ret, (int __user *)arg);
- break;
-
- case WDIOC_SETOPTIONS:
- if (get_user(i, (int __user *)arg)) {
- ret = -EFAULT;
- break;
- }
-
- if (i & WDIOS_DISABLECARD)
- ret = watchdog_stop(data);
- else if (i & WDIOS_ENABLECARD)
- ret = watchdog_trigger(data);
- else
- ret = -EINVAL;
- break;
-
- default:
- ret = -ENOTTY;
- }
- return ret;
+ kref_put(&data->kref, watchdog_release_resources);
}
-static const struct file_operations watchdog_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .open = watchdog_open,
- .release = watchdog_release,
- .write = watchdog_write,
- .unlocked_ioctl = watchdog_ioctl,
+static const struct watchdog_ops watchdog_ops = {
+ .owner = THIS_MODULE,
+ .start = watchdog_start,
+ .stop = watchdog_stop,
+ .ping = watchdog_trigger,
+ .set_timeout = watchdog_set_timeout,
+ .ref = watchdog_ref,
+ .unref = watchdog_unref,
};
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
u16 addr, u32 revision, struct mutex *io_lock, int check_enabled)
{
struct sch56xx_watchdog_data *data;
- int i, err, control, output_enable;
- const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
+ int err, control, output_enable;
/* Cache the watchdog registers */
mutex_lock(io_lock);
@@ -656,82 +447,55 @@ struct sch56xx_watchdog_data *sch56xx_watchdog_register(
return NULL;
data->addr = addr;
- data->revision = revision;
data->io_lock = io_lock;
- data->watchdog_control = control;
- data->watchdog_output_enable = output_enable;
- mutex_init(&data->watchdog_lock);
- INIT_LIST_HEAD(&data->list);
kref_init(&data->kref);
- err = watchdog_set_timeout(data, 60);
- if (err < 0)
- goto error;
-
- /*
- * We take the data_mutex lock early so that watchdog_open() cannot
- * run when misc_register() has completed, but we've not yet added
- * our data to the watchdog_data_list.
- */
- mutex_lock(&watchdog_data_mutex);
- for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
- /* Register our watchdog part */
- snprintf(data->watchdog_name, sizeof(data->watchdog_name),
- "watchdog%c", (i == 0) ? '\0' : ('0' + i));
- data->watchdog_miscdev.name = data->watchdog_name;
- data->watchdog_miscdev.fops = &watchdog_fops;
- data->watchdog_miscdev.minor = watchdog_minors[i];
- err = misc_register(&data->watchdog_miscdev);
- if (err == -EBUSY)
- continue;
- if (err)
- break;
+ strlcpy(data->wdinfo.identity, "sch56xx watchdog",
+ sizeof(data->wdinfo.identity));
+ data->wdinfo.firmware_version = revision;
+ data->wdinfo.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT;
+ if (!nowayout)
+ data->wdinfo.options |= WDIOF_MAGICCLOSE;
+
+ data->wddev.info = &data->wdinfo;
+ data->wddev.ops = &watchdog_ops;
+ data->wddev.parent = parent;
+ data->wddev.timeout = 60;
+ data->wddev.min_timeout = 1;
+ data->wddev.max_timeout = 255 * 60;
+ if (nowayout)
+ set_bit(WDOG_NO_WAY_OUT, &data->wddev.status);
+ if (output_enable & SCH56XX_WDOG_OUTPUT_ENABLE)
+ set_bit(WDOG_ACTIVE, &data->wddev.status);
+
+ /* Since the watchdog uses a downcounter there is no register to read
+ the BIOS set timeout from (if any was set at all) ->
+ Choose a preset which will give us a 1 minute timeout */
+ if (control & SCH56XX_WDOG_TIME_BASE_SEC)
+ data->watchdog_preset = 60; /* seconds */
+ else
+ data->watchdog_preset = 1; /* minute */
- list_add(&data->list, &watchdog_data_list);
- pr_info("Registered /dev/%s chardev major 10, minor: %d\n",
- data->watchdog_name, watchdog_minors[i]);
- break;
- }
- mutex_unlock(&watchdog_data_mutex);
+ data->watchdog_control = control;
+ data->watchdog_output_enable = output_enable;
+ watchdog_set_drvdata(&data->wddev, data);
+ err = watchdog_register_device(&data->wddev);
if (err) {
pr_err("Registering watchdog chardev: %d\n", err);
- goto error;
- }
- if (i == ARRAY_SIZE(watchdog_minors)) {
- pr_warn("Couldn't register watchdog (no free minor)\n");
- goto error;
+ kfree(data);
+ return NULL;
}
return data;
-
-error:
- kfree(data);
- return NULL;
}
EXPORT_SYMBOL(sch56xx_watchdog_register);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data)
{
- mutex_lock(&watchdog_data_mutex);
- misc_deregister(&data->watchdog_miscdev);
- list_del(&data->list);
- mutex_unlock(&watchdog_data_mutex);
-
- mutex_lock(&data->watchdog_lock);
- if (data->watchdog_is_open) {
- pr_warn("platform device unregistered with watchdog "
- "open! Stopping watchdog.\n");
- watchdog_stop_unlocked(data);
- }
- /* Tell the wdog start/stop/trigger functions our dev is gone */
- data->addr = 0;
- data->io_lock = NULL;
- mutex_unlock(&data->watchdog_lock);
-
- mutex_lock(&watchdog_data_mutex);
- kref_put(&data->kref, sch56xx_watchdog_release_resources);
- mutex_unlock(&watchdog_data_mutex);
+ watchdog_unregister_device(&data->wddev);
+ kref_put(&data->kref, watchdog_release_resources);
+ /* Don't touch data after this it may have been free-ed! */
}
EXPORT_SYMBOL(sch56xx_watchdog_unregister);
diff --git a/drivers/hwmon/sch56xx-common.h b/drivers/hwmon/sch56xx-common.h
index 7475086..704ea2c 100644
--- a/drivers/hwmon/sch56xx-common.h
+++ b/drivers/hwmon/sch56xx-common.h
@@ -27,6 +27,6 @@ int sch56xx_read_virtual_reg16(u16 addr, u16 reg);
int sch56xx_read_virtual_reg12(u16 addr, u16 msb_reg, u16 lsn_reg,
int high_nibble);
-struct sch56xx_watchdog_data *sch56xx_watchdog_register(
+struct sch56xx_watchdog_data *sch56xx_watchdog_register(struct device *parent,
u16 addr, u32 revision, struct mutex *io_lock, int check_enabled);
void sch56xx_watchdog_unregister(struct sch56xx_watchdog_data *data);
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 6c4d8eb..8275f0e1 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -593,17 +593,14 @@ static int __devinit sis5595_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, SIS5595_EXTENT,
- sis5595_driver.driver.name)) {
- err = -EBUSY;
- goto exit;
- }
+ if (!devm_request_region(&pdev->dev, res->start, SIS5595_EXTENT,
+ sis5595_driver.driver.name))
+ return -EBUSY;
- data = kzalloc(sizeof(struct sis5595_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit_release;
- }
+ data = devm_kzalloc(&pdev->dev, sizeof(struct sis5595_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
mutex_init(&data->lock);
mutex_init(&data->update_lock);
@@ -636,7 +633,7 @@ static int __devinit sis5595_probe(struct platform_device *pdev)
/* Register sysfs hooks */
err = sysfs_create_group(&pdev->dev.kobj, &sis5595_group);
if (err)
- goto exit_free;
+ return err;
if (data->maxins == 4) {
err = sysfs_create_group(&pdev->dev.kobj, &sis5595_group_in4);
if (err)
@@ -659,11 +656,6 @@ exit_remove_files:
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group);
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_in4);
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_temp1);
-exit_free:
- kfree(data);
-exit_release:
- release_region(res->start, SIS5595_EXTENT);
-exit:
return err;
}
@@ -676,10 +668,6 @@ static int __devexit sis5595_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_in4);
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_temp1);
- release_region(data->addr, SIS5595_EXTENT);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index c5f6be4..65b07de 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -231,13 +231,9 @@ static const struct attribute_group smsc47b397_group = {
static int __devexit smsc47b397_remove(struct platform_device *pdev)
{
struct smsc47b397_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &smsc47b397_group);
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, SMSC_EXTENT);
- kfree(data);
return 0;
}
@@ -261,19 +257,17 @@ static int __devinit smsc47b397_probe(struct platform_device *pdev)
int err = 0;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, SMSC_EXTENT,
- smsc47b397_driver.driver.name)) {
+ if (!devm_request_region(dev, res->start, SMSC_EXTENT,
+ smsc47b397_driver.driver.name)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start,
(unsigned long)res->start + SMSC_EXTENT - 1);
return -EBUSY;
}
- data = kzalloc(sizeof(struct smsc47b397_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto error_release;
- }
+ data = devm_kzalloc(dev, sizeof(struct smsc47b397_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
data->addr = res->start;
data->name = "smsc47b397";
@@ -283,7 +277,7 @@ static int __devinit smsc47b397_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj, &smsc47b397_group);
if (err)
- goto error_free;
+ return err;
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -295,10 +289,6 @@ static int __devinit smsc47b397_probe(struct platform_device *pdev)
error_remove:
sysfs_remove_group(&dev->kobj, &smsc47b397_group);
-error_free:
- kfree(data);
-error_release:
- release_region(res->start, SMSC_EXTENT);
return err;
}
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index b5aa38d..dba0c56 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -584,18 +584,17 @@ static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
#define CHECK 1
#define REQUEST 2
-#define RELEASE 3
/*
* This function can be used to:
* - test for resource conflicts with ACPI
* - request the resources
- * - release the resources
* We only allocate the I/O ports we really need, to minimize the risk of
* conflicts with ACPI or with other drivers.
*/
-static int smsc47m1_handle_resources(unsigned short address, enum chips type,
- int action, struct device *dev)
+static int __init smsc47m1_handle_resources(unsigned short address,
+ enum chips type, int action,
+ struct device *dev)
{
static const u8 ports_m1[] = {
/* register, region length */
@@ -642,21 +641,13 @@ static int smsc47m1_handle_resources(unsigned short address, enum chips type,
break;
case REQUEST:
/* Request the resources */
- if (!request_region(start, len, DRVNAME)) {
- dev_err(dev, "Region 0x%hx-0x%hx already in "
- "use!\n", start, start + len);
-
- /* Undo all requests */
- for (i -= 2; i >= 0; i -= 2)
- release_region(address + ports[i],
- ports[i + 1]);
+ if (!devm_request_region(dev, start, len, DRVNAME)) {
+ dev_err(dev,
+ "Region 0x%hx-0x%hx already in use!\n",
+ start, start + len);
return -EBUSY;
}
break;
- case RELEASE:
- /* Release the resources */
- release_region(start, len);
- break;
}
}
@@ -694,11 +685,9 @@ static int __init smsc47m1_probe(struct platform_device *pdev)
if (err < 0)
return err;
- data = kzalloc(sizeof(struct smsc47m1_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto error_release;
- }
+ data = devm_kzalloc(dev, sizeof(struct smsc47m1_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
data->addr = res->start;
data->type = sio_data->type;
@@ -733,8 +722,7 @@ static int __init smsc47m1_probe(struct platform_device *pdev)
}
if (!(fan1 || fan2 || fan3 || pwm1 || pwm2 || pwm3)) {
dev_warn(dev, "Device not configured, will not use\n");
- err = -ENODEV;
- goto error_free;
+ return -ENODEV;
}
/*
@@ -810,27 +798,16 @@ static int __init smsc47m1_probe(struct platform_device *pdev)
error_remove_files:
smsc47m1_remove_files(dev);
-error_free:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-error_release:
- smsc47m1_handle_resources(res->start, sio_data->type, RELEASE, dev);
return err;
}
static int __exit smsc47m1_remove(struct platform_device *pdev)
{
struct smsc47m1_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
smsc47m1_remove_files(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- smsc47m1_handle_resources(res->start, data->type, RELEASE, &pdev->dev);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 4705a8b..36a3478 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -554,11 +554,10 @@ static int smsc47m192_probe(struct i2c_client *client,
int config;
int err;
- data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct smsc47m192_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->vrm = vid_which_vrm();
@@ -570,7 +569,7 @@ static int smsc47m192_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group);
if (err)
- goto exit_free;
+ return err;
/* Pin 110 is either in4 (+12V) or VID4 */
config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG);
@@ -592,9 +591,6 @@ static int smsc47m192_probe(struct i2c_client *client,
exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group);
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -606,8 +602,6 @@ static int smsc47m192_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group);
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index add9f01..080c263 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -361,12 +361,10 @@ static int thmc50_probe(struct i2c_client *client,
struct thmc50_data *data;
int err;
- data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL);
- if (!data) {
- pr_debug("thmc50: detect failed, kzalloc failed!\n");
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct thmc50_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->type = id->driver_data;
@@ -377,7 +375,7 @@ static int thmc50_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &thmc50_group);
if (err)
- goto exit_free;
+ return err;
/* Register ADM1022 sysfs hooks */
if (data->has_temp3) {
@@ -400,9 +398,6 @@ exit_remove_sysfs:
sysfs_remove_group(&client->dev.kobj, &temp3_group);
exit_remove_sysfs_thmc50:
sysfs_remove_group(&client->dev.kobj, &thmc50_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -415,8 +410,6 @@ static int thmc50_remove(struct i2c_client *client)
if (data->has_temp3)
sysfs_remove_group(&client->dev.kobj, &temp3_group);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 0d466b9d..4e1ff82 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -159,17 +159,16 @@ static int __devinit tmp102_probe(struct i2c_client *client,
return -ENODEV;
}
- tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL);
- if (!tmp102) {
- dev_dbg(&client->dev, "kzalloc failed\n");
+ tmp102 = devm_kzalloc(&client->dev, sizeof(*tmp102), GFP_KERNEL);
+ if (!tmp102)
return -ENOMEM;
- }
+
i2c_set_clientdata(client, tmp102);
status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
if (status < 0) {
dev_err(&client->dev, "error reading config register\n");
- goto fail_free;
+ return status;
}
tmp102->config_orig = status;
status = i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
@@ -213,9 +212,6 @@ fail_remove_sysfs:
fail_restore_config:
i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
tmp102->config_orig);
-fail_free:
- kfree(tmp102);
-
return status;
}
@@ -236,8 +232,6 @@ static int __devexit tmp102_remove(struct i2c_client *client)
config | TMP102_CONF_SD);
}
- kfree(tmp102);
-
return 0;
}
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ea54c33..e620548 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -594,7 +594,6 @@ static int tmp401_remove(struct i2c_client *client)
&tmp411_attr[i].dev_attr);
}
- kfree(data);
return 0;
}
@@ -605,7 +604,8 @@ static int tmp401_probe(struct i2c_client *client,
struct tmp401_data *data;
const char *names[] = { "TMP401", "TMP411" };
- data = kzalloc(sizeof(struct tmp401_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct tmp401_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -646,7 +646,7 @@ static int tmp401_probe(struct i2c_client *client,
return 0;
exit_remove:
- tmp401_remove(client); /* will also free data for us */
+ tmp401_remove(client);
return err;
}
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 8fac87a..6a8ded2 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -267,7 +267,8 @@ static int tmp421_probe(struct i2c_client *client,
struct tmp421_data *data;
int err;
- data = kzalloc(sizeof(struct tmp421_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct tmp421_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -277,11 +278,11 @@ static int tmp421_probe(struct i2c_client *client,
err = tmp421_init_client(client);
if (err)
- goto exit_free;
+ return err;
err = sysfs_create_group(&client->dev.kobj, &tmp421_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -293,10 +294,6 @@ static int tmp421_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &tmp421_group);
-
-exit_free:
- kfree(data);
-
return err;
}
@@ -307,8 +304,6 @@ static int tmp421_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &tmp421_group);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 288135d..299399a 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -690,18 +690,17 @@ static int __devinit via686a_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, VIA686A_EXTENT,
- via686a_driver.driver.name)) {
+ if (!devm_request_region(&pdev->dev, res->start, VIA686A_EXTENT,
+ via686a_driver.driver.name)) {
dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start, (unsigned long)res->end);
return -ENODEV;
}
- data = kzalloc(sizeof(struct via686a_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit_release;
- }
+ data = devm_kzalloc(&pdev->dev, sizeof(struct via686a_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
platform_set_drvdata(pdev, data);
data->addr = res->start;
@@ -714,7 +713,7 @@ static int __devinit via686a_probe(struct platform_device *pdev)
/* Register sysfs hooks */
err = sysfs_create_group(&pdev->dev.kobj, &via686a_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -726,10 +725,6 @@ static int __devinit via686a_probe(struct platform_device *pdev)
exit_remove_files:
sysfs_remove_group(&pdev->dev.kobj, &via686a_group);
-exit_free:
- kfree(data);
-exit_release:
- release_region(res->start, VIA686A_EXTENT);
return err;
}
@@ -740,10 +735,6 @@ static int __devexit via686a_remove(struct platform_device *pdev)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &via686a_group);
- release_region(data->addr, VIA686A_EXTENT);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index c2c5c72..f2c6115 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -1148,19 +1148,18 @@ static int __devinit vt1211_probe(struct platform_device *pdev)
struct resource *res;
int i, err;
- data = kzalloc(sizeof(struct vt1211_data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct vt1211_data), GFP_KERNEL);
if (!data) {
- err = -ENOMEM;
dev_err(dev, "Out of memory\n");
- goto EXIT;
+ return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, resource_size(res), DRVNAME)) {
- err = -EBUSY;
+ if (!devm_request_region(dev, res->start, resource_size(res),
+ DRVNAME)) {
dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
(unsigned long)res->start, (unsigned long)res->end);
- goto EXIT_KFREE;
+ return -EBUSY;
}
data->addr = res->start;
data->name = DRVNAME;
@@ -1215,26 +1214,15 @@ EXIT_DEV_REMOVE:
dev_err(dev, "Sysfs interface creation failed (%d)\n", err);
EXIT_DEV_REMOVE_SILENT:
vt1211_remove_sysfs(pdev);
- release_region(res->start, resource_size(res));
-EXIT_KFREE:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-EXIT:
return err;
}
static int __devexit vt1211_remove(struct platform_device *pdev)
{
struct vt1211_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
vt1211_remove_sysfs(pdev);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 54922ed..1821b74 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -599,6 +599,7 @@ static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
| ((data->fan_div[1] << 4) & 0x70);
w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
+ break;
case 2:
reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
| (data->fan_div[2] & 0x7);
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 5ce54a2..ab48252 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1359,19 +1359,17 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
};
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, WINB_REGION_SIZE, DRVNAME)) {
+ if (!devm_request_region(dev, res->start, WINB_REGION_SIZE, DRVNAME)) {
dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
(unsigned long)res->start,
(unsigned long)(res->start + WINB_REGION_SIZE - 1));
- err = -EBUSY;
- goto ERROR0;
+ return -EBUSY;
}
- data = kzalloc(sizeof(struct w83627hf_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto ERROR1;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83627hf_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
data->addr = res->start;
data->type = sio_data->type;
data->name = names[sio_data->type];
@@ -1391,7 +1389,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
/* Register common device attributes */
err = sysfs_create_group(&dev->kobj, &w83627hf_group);
if (err)
- goto ERROR3;
+ return err;
/* Register chip-specific device attributes */
if (data->type == w83627hf || data->type == w83697hf)
@@ -1419,7 +1417,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
&sensor_dev_attr_pwm1_freq.dev_attr))
|| (err = device_create_file(dev,
&sensor_dev_attr_pwm2_freq.dev_attr)))
- goto ERROR4;
+ goto error;
if (data->type != w83697hf)
if ((err = device_create_file(dev,
@@ -1454,7 +1452,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
&sensor_dev_attr_temp3_beep.dev_attr))
|| (err = device_create_file(dev,
&sensor_dev_attr_temp3_type.dev_attr)))
- goto ERROR4;
+ goto error;
if (data->type != w83697hf && data->vid != 0xff) {
/* Convert VID to voltage based on VRM */
@@ -1462,14 +1460,14 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
if ((err = device_create_file(dev, &dev_attr_cpu0_vid))
|| (err = device_create_file(dev, &dev_attr_vrm)))
- goto ERROR4;
+ goto error;
}
if (data->type == w83627thf || data->type == w83637hf
|| data->type == w83687thf) {
err = device_create_file(dev, &sensor_dev_attr_pwm3.dev_attr);
if (err)
- goto ERROR4;
+ goto error;
}
if (data->type == w83637hf || data->type == w83687thf)
@@ -1479,57 +1477,45 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
&sensor_dev_attr_pwm2_freq.dev_attr))
|| (err = device_create_file(dev,
&sensor_dev_attr_pwm3_freq.dev_attr)))
- goto ERROR4;
+ goto error;
if (data->type != w83627hf)
if ((err = device_create_file(dev,
&sensor_dev_attr_pwm1_enable.dev_attr))
|| (err = device_create_file(dev,
&sensor_dev_attr_pwm2_enable.dev_attr)))
- goto ERROR4;
+ goto error;
if (data->type == w83627thf || data->type == w83637hf
|| data->type == w83687thf) {
err = device_create_file(dev,
&sensor_dev_attr_pwm3_enable.dev_attr);
if (err)
- goto ERROR4;
+ goto error;
}
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR4;
+ goto error;
}
return 0;
- ERROR4:
+ error:
sysfs_remove_group(&dev->kobj, &w83627hf_group);
sysfs_remove_group(&dev->kobj, &w83627hf_group_opt);
- ERROR3:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
- ERROR1:
- release_region(res->start, WINB_REGION_SIZE);
- ERROR0:
return err;
}
static int __devexit w83627hf_remove(struct platform_device *pdev)
{
struct w83627hf_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group);
sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group_opt);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, WINB_REGION_SIZE);
return 0;
}
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index b03d54a..5a5046d 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -867,6 +867,7 @@ w83781d_detect_subclients(struct i2c_client *new_client)
struct i2c_adapter *adapter = new_client->adapter;
struct w83781d_data *data = i2c_get_clientdata(new_client);
enum chips kind = data->type;
+ int num_sc = 1;
id = i2c_adapter_id(adapter);
@@ -891,6 +892,7 @@ w83781d_detect_subclients(struct i2c_client *new_client)
}
if (kind != w83783s) {
+ num_sc = 2;
if (force_subclients[0] == id &&
force_subclients[1] == address) {
sc_addr[1] = force_subclients[3];
@@ -906,7 +908,7 @@ w83781d_detect_subclients(struct i2c_client *new_client)
}
}
- for (i = 0; i <= 1; i++) {
+ for (i = 0; i < num_sc; i++) {
data->lm75[i] = i2c_new_dummy(adapter, sc_addr[i]);
if (!data->lm75[i]) {
dev_err(&new_client->dev, "Subclient %d "
@@ -917,8 +919,6 @@ w83781d_detect_subclients(struct i2c_client *new_client)
goto ERROR_SC_3;
goto ERROR_SC_2;
}
- if (kind == w83783s)
- break;
}
return 0;
@@ -1213,11 +1213,9 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct w83781d_data *data;
int err;
- data = kzalloc(sizeof(struct w83781d_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto ERROR1;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83781d_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
@@ -1229,7 +1227,7 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* attach secondary i2c lm75-like clients */
err = w83781d_detect_subclients(client);
if (err)
- goto ERROR3;
+ return err;
/* Initialize the chip */
w83781d_init_device(dev);
@@ -1237,25 +1235,22 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
err = w83781d_create_files(dev, data->type, 0);
if (err)
- goto ERROR4;
+ goto exit_remove_files;
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR4;
+ goto exit_remove_files;
}
return 0;
-ERROR4:
+ exit_remove_files:
w83781d_remove_files(dev);
if (data->lm75[0])
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1])
i2c_unregister_device(data->lm75[1]);
-ERROR3:
- kfree(data);
-ERROR1:
return err;
}
@@ -1273,8 +1268,6 @@ w83781d_remove(struct i2c_client *client)
if (data->lm75[1])
i2c_unregister_device(data->lm75[1]);
- kfree(data);
-
return 0;
}
@@ -1780,17 +1773,16 @@ w83781d_isa_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start + W83781D_ADDR_REG_OFFSET, 2,
- "w83781d")) {
- err = -EBUSY;
- goto exit;
- }
+ if (!devm_request_region(&pdev->dev,
+ res->start + W83781D_ADDR_REG_OFFSET, 2,
+ "w83781d"))
+ return -EBUSY;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct w83781d_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- data = kzalloc(sizeof(struct w83781d_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit_release_region;
- }
mutex_init(&data->lock);
data->isa_addr = res->start;
platform_set_drvdata(pdev, data);
@@ -1829,10 +1821,6 @@ w83781d_isa_probe(struct platform_device *pdev)
exit_remove_files:
w83781d_remove_files(&pdev->dev);
device_remove_file(&pdev->dev, &dev_attr_name);
- kfree(data);
- exit_release_region:
- release_region(res->start + W83781D_ADDR_REG_OFFSET, 2);
- exit:
return err;
}
@@ -1844,8 +1832,6 @@ w83781d_isa_remove(struct platform_device *pdev)
hwmon_device_unregister(data->hwmon_dev);
w83781d_remove_files(&pdev->dev);
device_remove_file(&pdev->dev, &dev_attr_name);
- release_region(data->isa_addr + W83781D_ADDR_REG_OFFSET, 2);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 2f446f9..9ade4d4 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1384,18 +1384,17 @@ static int w83791d_probe(struct i2c_client *client,
(val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1);
#endif
- data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto error0;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct w83791d_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
err = w83791d_detect_subclients(client);
if (err)
- goto error1;
+ return err;
/* Initialize the chip */
w83791d_init_client(client);
@@ -1440,9 +1439,6 @@ error3:
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
-error1:
- kfree(data);
-error0:
return err;
}
@@ -1458,7 +1454,6 @@ static int w83791d_remove(struct i2c_client *client)
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index ffb5fdf..0ba5a2b 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -1422,11 +1422,9 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct device *dev = &client->dev;
int i, val1, err;
- data = kzalloc(sizeof(struct w83792d_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto ERROR0;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83792d_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->valid = 0;
@@ -1434,7 +1432,7 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
err = w83792d_detect_subclients(client);
if (err)
- goto ERROR1;
+ return err;
/* Initialize the chip */
w83792d_init_client(client);
@@ -1448,7 +1446,7 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
err = sysfs_create_group(&dev->kobj, &w83792d_group);
if (err)
- goto ERROR3;
+ goto exit_i2c_unregister;
/*
* Read GPIO enable register to check if pins for fan 4,5 are used as
@@ -1493,14 +1491,11 @@ exit_remove_files:
sysfs_remove_group(&dev->kobj, &w83792d_group);
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
-ERROR3:
+exit_i2c_unregister:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
-ERROR1:
- kfree(data);
-ERROR0:
return err;
}
@@ -1521,7 +1516,6 @@ w83792d_remove(struct i2c_client *client)
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index d887cb3..b813c64 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2157,11 +2157,9 @@ static int w83795_probe(struct i2c_client *client,
struct w83795_data *data;
int err;
- data = kzalloc(sizeof(struct w83795_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83795_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->chip_type = id->driver_data;
@@ -2247,8 +2245,6 @@ static int w83795_probe(struct i2c_client *client,
exit_remove:
w83795_handle_files(dev, device_remove_file_wrapper);
- kfree(data);
-exit:
return err;
}
@@ -2258,7 +2254,6 @@ static int w83795_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
w83795_handle_files(&client->dev, device_remove_file_wrapper);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 5f14e38..39dbe99 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -176,19 +176,18 @@ static int w83l785ts_detect(struct i2c_client *client,
return 0;
}
-static int w83l785ts_probe(struct i2c_client *new_client,
+static int w83l785ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct w83l785ts_data *data;
- int err = 0;
+ struct device *dev = &client->dev;
+ int err;
- data = kzalloc(sizeof(struct w83l785ts_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83l785ts_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ i2c_set_clientdata(client, data);
data->valid = 0;
mutex_init(&data->update_lock);
@@ -200,18 +199,16 @@ static int w83l785ts_probe(struct i2c_client *new_client,
* Nothing yet, assume it is already started.
*/
- err = device_create_file(&new_client->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
+ err = device_create_file(dev, &sensor_dev_attr_temp1_input.dev_attr);
if (err)
- goto exit_remove;
+ return err;
- err = device_create_file(&new_client->dev,
- &sensor_dev_attr_temp1_max.dev_attr);
+ err = device_create_file(dev, &sensor_dev_attr_temp1_max.dev_attr);
if (err)
goto exit_remove;
/* Register sysfs hooks */
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
@@ -220,12 +217,8 @@ static int w83l785ts_probe(struct i2c_client *new_client,
return 0;
exit_remove:
- device_remove_file(&new_client->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
- device_remove_file(&new_client->dev,
- &sensor_dev_attr_temp1_max.dev_attr);
- kfree(data);
-exit:
+ device_remove_file(dev, &sensor_dev_attr_temp1_input.dev_attr);
+ device_remove_file(dev, &sensor_dev_attr_temp1_max.dev_attr);
return err;
}
@@ -239,7 +232,6 @@ static int w83l785ts_remove(struct i2c_client *client)
device_remove_file(&client->dev,
&sensor_dev_attr_temp1_max.dev_attr);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 07cb25a..d0db1f2 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -163,7 +163,8 @@ static int __devinit wm831x_hwmon_probe(struct platform_device *pdev)
struct wm831x_hwmon *hwmon;
int ret;
- hwmon = kzalloc(sizeof(struct wm831x_hwmon), GFP_KERNEL);
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_hwmon),
+ GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
@@ -171,7 +172,7 @@ static int __devinit wm831x_hwmon_probe(struct platform_device *pdev)
ret = sysfs_create_group(&pdev->dev.kobj, &wm831x_attr_group);
if (ret)
- goto err;
+ return ret;
hwmon->classdev = hwmon_device_register(&pdev->dev);
if (IS_ERR(hwmon->classdev)) {
@@ -185,8 +186,6 @@ static int __devinit wm831x_hwmon_probe(struct platform_device *pdev)
err_sysfs:
sysfs_remove_group(&pdev->dev.kobj, &wm831x_attr_group);
-err:
- kfree(hwmon);
return ret;
}
@@ -196,8 +195,6 @@ static int __devexit wm831x_hwmon_remove(struct platform_device *pdev)
hwmon_device_unregister(hwmon->classdev);
sysfs_remove_group(&pdev->dev.kobj, &wm831x_attr_group);
- platform_set_drvdata(pdev, NULL);
- kfree(hwmon);
return 0;
}
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 61c9cf1..1201a15 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
spin_lock_init(&hwlock->lock);
hwlock->bank = bank;
- ret = hwspin_lock_register_single(hwlock, i);
+ ret = hwspin_lock_register_single(hwlock, base_id + i);
if (ret)
goto reg_failed;
}
@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
reg_failed:
while (--i >= 0)
- hwspin_lock_unregister_single(i);
+ hwspin_lock_unregister_single(base_id + i);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_register);
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 7f0b832..fad22b0 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -608,7 +608,7 @@ bailout:
static u32 bit_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ return I2C_FUNC_I2C | I2C_FUNC_NOSTART | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA |
I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
I2C_FUNC_10BIT_ADDR | I2C_FUNC_PROTOCOL_MANGLING;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 8d6b504..370031a 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -53,7 +53,6 @@
#include <linux/of_i2c.h>
#include <linux/pinctrl/consumer.h>
-#include <mach/irqs.h>
#include <mach/hardware.h>
#include <mach/i2c.h>
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 5267ab9..a92440d 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -965,8 +965,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &nmk_i2c_algo;
- adap->timeout = pdata->timeout ? msecs_to_jiffies(pdata->timeout) :
- msecs_to_jiffies(20000);
+ adap->timeout = msecs_to_jiffies(pdata->timeout);
snprintf(adap->name, sizeof(adap->name),
"Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start);
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 03b6157..a26dfb8 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -502,7 +502,8 @@ static int nuc900_i2c_xfer(struct i2c_adapter *adap,
/* declare our i2c functionality */
static u32 nuc900_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 31c47e1..5285f85 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -227,28 +227,138 @@ static int __devexit i2c_powermac_remove(struct platform_device *dev)
return 0;
}
+static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap,
+ struct pmac_i2c_bus *bus,
+ struct device_node *node)
+{
+ const __be32 *prop;
+ int len;
+
+ /* First check for valid "reg" */
+ prop = of_get_property(node, "reg", &len);
+ if (prop && (len >= sizeof(int)))
+ return (be32_to_cpup(prop) & 0xff) >> 1;
+
+ /* Then check old-style "i2c-address" */
+ prop = of_get_property(node, "i2c-address", &len);
+ if (prop && (len >= sizeof(int)))
+ return (be32_to_cpup(prop) & 0xff) >> 1;
+
+ /* Now handle some devices with missing "reg" properties */
+ if (!strcmp(node->name, "cereal"))
+ return 0x60;
+ else if (!strcmp(node->name, "deq"))
+ return 0x34;
+
+ dev_warn(&adap->dev, "No i2c address for %s\n", node->full_name);
+
+ return 0xffffffff;
+}
+
+static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap,
+ const char *type,
+ u32 addr)
+{
+ struct i2c_board_info info = {};
+ struct i2c_client *newdev;
+
+ strncpy(info.type, type, sizeof(info.type));
+ info.addr = addr;
+ newdev = i2c_new_device(adap, &info);
+ if (!newdev)
+ dev_err(&adap->dev,
+ "i2c-powermac: Failure to register missing %s\n",
+ type);
+}
+
+static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap,
+ struct pmac_i2c_bus *bus,
+ bool found_onyx)
+{
+ struct device_node *busnode = pmac_i2c_get_bus_node(bus);
+ int rc;
+
+ /* Check for the onyx audio codec */
+#define ONYX_REG_CONTROL 67
+ if (of_device_is_compatible(busnode, "k2-i2c") && !found_onyx) {
+ union i2c_smbus_data data;
+
+ rc = i2c_smbus_xfer(adap, 0x46, 0, I2C_SMBUS_READ,
+ ONYX_REG_CONTROL, I2C_SMBUS_BYTE_DATA,
+ &data);
+ if (rc >= 0)
+ i2c_powermac_create_one(adap, "MAC,pcm3052", 0x46);
+
+ rc = i2c_smbus_xfer(adap, 0x47, 0, I2C_SMBUS_READ,
+ ONYX_REG_CONTROL, I2C_SMBUS_BYTE_DATA,
+ &data);
+ if (rc >= 0)
+ i2c_powermac_create_one(adap, "MAC,pcm3052", 0x47);
+ }
+}
+
+static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap,
+ struct device_node *node,
+ u32 addr, char *type, int type_size)
+{
+ char tmp[16];
+
+ /* Note: we to _NOT_ want the standard
+ * i2c drivers to match with any of our powermac stuff
+ * unless they have been specifically modified to handle
+ * it on a case by case basis. For example, for thermal
+ * control, things like lm75 etc... shall match with their
+ * corresponding windfarm drivers, _NOT_ the generic ones,
+ * so we force a prefix of AAPL, onto the modalias to
+ * make that happen
+ */
+
+ /* First try proper modalias */
+ if (of_modalias_node(node, tmp, sizeof(tmp)) >= 0) {
+ snprintf(type, type_size, "MAC,%s", tmp);
+ return true;
+ }
+
+ /* Now look for known workarounds */
+ if (!strcmp(node->name, "deq")) {
+ /* Apple uses address 0x34 for TAS3001 and 0x35 for TAS3004 */
+ if (addr == 0x34) {
+ snprintf(type, type_size, "MAC,tas3001");
+ return true;
+ } else if (addr == 0x35) {
+ snprintf(type, type_size, "MAC,tas3004");
+ return true;
+ }
+ }
+
+ dev_err(&adap->dev, "i2c-powermac: modalias failure"
+ " on %s\n", node->full_name);
+ return false;
+}
+
static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus)
{
struct i2c_client *newdev;
struct device_node *node;
+ bool found_onyx = 0;
+
+ /*
+ * In some cases we end up with the via-pmu node itself, in this
+ * case we skip this function completely as the device-tree will
+ * not contain anything useful.
+ */
+ if (!strcmp(adap->dev.of_node->name, "via-pmu"))
+ return;
for_each_child_of_node(adap->dev.of_node, node) {
struct i2c_board_info info = {};
- struct dev_archdata dev_ad = {};
- const __be32 *reg;
- char tmp[16];
u32 addr;
- int len;
/* Get address & channel */
- reg = of_get_property(node, "reg", &len);
- if (!reg || (len < sizeof(int))) {
- dev_err(&adap->dev, "i2c-powermac: invalid reg on %s\n",
- node->full_name);
+ addr = i2c_powermac_get_addr(adap, bus, node);
+ if (addr == 0xffffffff)
continue;
- }
- addr = be32_to_cpup(reg);
/* Multibus setup, check channel */
if (!pmac_i2c_match_adapter(node, adap))
@@ -257,27 +367,23 @@ static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
dev_dbg(&adap->dev, "i2c-powermac: register %s\n",
node->full_name);
- /* Make up a modalias. Note: we to _NOT_ want the standard
- * i2c drivers to match with any of our powermac stuff
- * unless they have been specifically modified to handle
- * it on a case by case basis. For example, for thermal
- * control, things like lm75 etc... shall match with their
- * corresponding windfarm drivers, _NOT_ the generic ones,
- * so we force a prefix of AAPL, onto the modalias to
- * make that happen
+ /*
+ * Keep track of some device existence to handle
+ * workarounds later.
*/
- if (of_modalias_node(node, tmp, sizeof(tmp)) < 0) {
- dev_err(&adap->dev, "i2c-powermac: modalias failure"
- " on %s\n", node->full_name);
+ if (of_device_is_compatible(node, "pcm3052"))
+ found_onyx = true;
+
+ /* Make up a modalias */
+ if (!i2c_powermac_get_type(adap, node, addr,
+ info.type, sizeof(info.type))) {
continue;
}
- snprintf(info.type, sizeof(info.type), "MAC,%s", tmp);
/* Fill out the rest of the info structure */
- info.addr = (addr & 0xff) >> 1;
+ info.addr = addr;
info.irq = irq_of_parse_and_map(node, 0);
info.of_node = of_node_get(node);
- info.archdata = &dev_ad;
newdev = i2c_new_device(adap, &info);
if (!newdev) {
@@ -292,6 +398,9 @@ static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
continue;
}
}
+
+ /* Additional workarounds */
+ i2c_powermac_add_missing(adap, bus, found_onyx);
}
static int __devinit i2c_powermac_probe(struct platform_device *dev)
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index fa0b134..0195915 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -626,7 +626,8 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
/* declare our i2c functionality */
static u32 s3c24xx_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_NOSTART |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
/* i2c bus registration info */
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 8b2e555..3da7ee3 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -341,7 +341,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
u32 val;
int err = 0;
- clk_enable(i2c_dev->clk);
+ clk_prepare_enable(i2c_dev->clk);
tegra_periph_reset_assert(i2c_dev->clk);
udelay(2);
@@ -372,7 +372,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (tegra_i2c_flush_fifos(i2c_dev))
err = -ETIMEDOUT;
- clk_disable(i2c_dev->clk);
+ clk_disable_unprepare(i2c_dev->clk);
if (i2c_dev->irq_disabled) {
i2c_dev->irq_disabled = 0;
@@ -546,14 +546,14 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (i2c_dev->is_suspended)
return -EBUSY;
- clk_enable(i2c_dev->clk);
+ clk_prepare_enable(i2c_dev->clk);
for (i = 0; i < num; i++) {
int stop = (i == (num - 1)) ? 1 : 0;
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
if (ret)
break;
}
- clk_disable(i2c_dev->clk);
+ clk_disable_unprepare(i2c_dev->clk);
return ret ?: i;
}
@@ -666,7 +666,7 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
goto err_free;
}
- clk_enable(i2c_dev->i2c_clk);
+ clk_prepare_enable(i2c_dev->i2c_clk);
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
i2c_dev->adapter.owner = THIS_MODULE;
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 4504832..5ec2261 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -265,19 +265,41 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
res = 0;
for (i = 0; i < rdwr_arg.nmsgs; i++) {
- /* Limit the size of the message to a sane amount;
- * and don't let length change either. */
- if ((rdwr_pa[i].len > 8192) ||
- (rdwr_pa[i].flags & I2C_M_RECV_LEN)) {
+ /* Limit the size of the message to a sane amount */
+ if (rdwr_pa[i].len > 8192) {
res = -EINVAL;
break;
}
+
data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
if (IS_ERR(rdwr_pa[i].buf)) {
res = PTR_ERR(rdwr_pa[i].buf);
break;
}
+
+ /*
+ * If the message length is received from the slave (similar
+ * to SMBus block read), we must ensure that the buffer will
+ * be large enough to cope with a message length of
+ * I2C_SMBUS_BLOCK_MAX as this is the maximum underlying bus
+ * drivers allow. The first byte in the buffer must be
+ * pre-filled with the number of extra bytes, which must be
+ * at least one to hold the message length, but can be
+ * greater (for example to account for a checksum byte at
+ * the end of the message.)
+ */
+ if (rdwr_pa[i].flags & I2C_M_RECV_LEN) {
+ if (!(rdwr_pa[i].flags & I2C_M_RD) ||
+ rdwr_pa[i].buf[0] < 1 ||
+ rdwr_pa[i].len < rdwr_pa[i].buf[0] +
+ I2C_SMBUS_BLOCK_MAX) {
+ res = -EINVAL;
+ break;
+ }
+
+ rdwr_pa[i].len = rdwr_pa[i].buf[0];
+ }
}
if (res < 0) {
int j;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index beb2491..a0edd98 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x
This driver can also be built as a module. If so, the module
will be called i2c-mux-pca954x.
+config I2C_MUX_PINCTRL
+ tristate "pinctrl-based I2C multiplexer"
+ depends on PINCTRL
+ help
+ If you say yes to this option, support will be included for an I2C
+ multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
+ This is useful for SoCs whose I2C module's signals can be routed to
+ different sets of pins at run-time.
+
+ This driver can also be built as a module. If so, the module will be
+ called pinctrl-i2cmux.
+
endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 5826249..76da869 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
+obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644
index 0000000..46a6697
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -0,0 +1,279 @@
+/*
+ * I2C multiplexer using pinctrl API
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of_i2c.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/i2c-mux-pinctrl.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct i2c_mux_pinctrl {
+ struct device *dev;
+ struct i2c_mux_pinctrl_platform_data *pdata;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state **states;
+ struct pinctrl_state *state_idle;
+ struct i2c_adapter *parent;
+ struct i2c_adapter **busses;
+};
+
+static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
+ u32 chan)
+{
+ struct i2c_mux_pinctrl *mux = data;
+
+ return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
+}
+
+static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
+ u32 chan)
+{
+ struct i2c_mux_pinctrl *mux = data;
+
+ return pinctrl_select_state(mux->pinctrl, mux->state_idle);
+}
+
+#ifdef CONFIG_OF
+static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int num_names, i, ret;
+ struct device_node *adapter_np;
+ struct i2c_adapter *adapter;
+
+ if (!np)
+ return 0;
+
+ mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
+ if (!mux->pdata) {
+ dev_err(mux->dev,
+ "Cannot allocate i2c_mux_pinctrl_platform_data\n");
+ return -ENOMEM;
+ }
+
+ num_names = of_property_count_strings(np, "pinctrl-names");
+ if (num_names < 0) {
+ dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+ num_names);
+ return num_names;
+ }
+
+ mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->pdata->pinctrl_states) * num_names,
+ GFP_KERNEL);
+ if (!mux->pdata->pinctrl_states) {
+ dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_names; i++) {
+ ret = of_property_read_string_index(np, "pinctrl-names", i,
+ &mux->pdata->pinctrl_states[mux->pdata->bus_count]);
+ if (ret < 0) {
+ dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
+ ret);
+ return ret;
+ }
+ if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
+ "idle")) {
+ if (i != num_names - 1) {
+ dev_err(mux->dev, "idle state must be last\n");
+ return -EINVAL;
+ }
+ mux->pdata->pinctrl_state_idle = "idle";
+ } else {
+ mux->pdata->bus_count++;
+ }
+ }
+
+ adapter_np = of_parse_phandle(np, "i2c-parent", 0);
+ if (!adapter_np) {
+ dev_err(mux->dev, "Cannot parse i2c-parent\n");
+ return -ENODEV;
+ }
+ adapter = of_find_i2c_adapter_by_node(adapter_np);
+ if (!adapter) {
+ dev_err(mux->dev, "Cannot find parent bus\n");
+ return -ENODEV;
+ }
+ mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
+ put_device(&adapter->dev);
+
+ return 0;
+}
+#else
+static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
+ struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
+{
+ struct i2c_mux_pinctrl *mux;
+ int (*deselect)(struct i2c_adapter *, void *, u32);
+ int i, ret;
+
+ mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux) {
+ dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ platform_set_drvdata(pdev, mux);
+
+ mux->dev = &pdev->dev;
+
+ mux->pdata = pdev->dev.platform_data;
+ if (!mux->pdata) {
+ ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
+ if (ret < 0)
+ goto err;
+ }
+ if (!mux->pdata) {
+ dev_err(&pdev->dev, "Missing platform data\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ mux->states = devm_kzalloc(&pdev->dev,
+ sizeof(*mux->states) * mux->pdata->bus_count,
+ GFP_KERNEL);
+ if (!mux->states) {
+ dev_err(&pdev->dev, "Cannot allocate states\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mux->busses = devm_kzalloc(&pdev->dev,
+ sizeof(mux->busses) * mux->pdata->bus_count,
+ GFP_KERNEL);
+ if (!mux->states) {
+ dev_err(&pdev->dev, "Cannot allocate busses\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ mux->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(mux->pinctrl)) {
+ ret = PTR_ERR(mux->pinctrl);
+ dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
+ goto err;
+ }
+ for (i = 0; i < mux->pdata->bus_count; i++) {
+ mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
+ mux->pdata->pinctrl_states[i]);
+ if (IS_ERR(mux->states[i])) {
+ ret = PTR_ERR(mux->states[i]);
+ dev_err(&pdev->dev,
+ "Cannot look up pinctrl state %s: %d\n",
+ mux->pdata->pinctrl_states[i], ret);
+ goto err;
+ }
+ }
+ if (mux->pdata->pinctrl_state_idle) {
+ mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
+ mux->pdata->pinctrl_state_idle);
+ if (IS_ERR(mux->state_idle)) {
+ ret = PTR_ERR(mux->state_idle);
+ dev_err(&pdev->dev,
+ "Cannot look up pinctrl state %s: %d\n",
+ mux->pdata->pinctrl_state_idle, ret);
+ goto err;
+ }
+
+ deselect = i2c_mux_pinctrl_deselect;
+ } else {
+ deselect = NULL;
+ }
+
+ mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
+ if (!mux->parent) {
+ dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+ mux->pdata->parent_bus_num);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ for (i = 0; i < mux->pdata->bus_count; i++) {
+ u32 bus = mux->pdata->base_bus_num ?
+ (mux->pdata->base_bus_num + i) : 0;
+
+ mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
+ mux, bus, i,
+ i2c_mux_pinctrl_select,
+ deselect);
+ if (!mux->busses[i]) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+ goto err_del_adapter;
+ }
+ }
+
+ return 0;
+
+err_del_adapter:
+ for (; i > 0; i--)
+ i2c_del_mux_adapter(mux->busses[i - 1]);
+ i2c_put_adapter(mux->parent);
+err:
+ return ret;
+}
+
+static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
+{
+ struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < mux->pdata->bus_count; i++)
+ i2c_del_mux_adapter(mux->busses[i]);
+
+ i2c_put_adapter(mux->parent);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
+ { .compatible = "i2c-mux-pinctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
+#endif
+
+static struct platform_driver i2c_mux_pinctrl_driver = {
+ .driver = {
+ .name = "i2c-mux-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
+ },
+ .probe = i2c_mux_pinctrl_probe,
+ .remove = __devexit_p(i2c_mux_pinctrl_remove),
+};
+module_platform_driver(i2c_mux_pinctrl_driver);
+
+MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
+MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-mux-pinctrl");
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 8716066..bcb507b 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = {
*/
static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- unsigned long cycle_time;
+ unsigned long cycle_time = 0;
int use_dma_info = 0;
const u8 xfer_mode = drive->dma_mode;
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
ide_set_drivedata(drive, (void *)cycle_time);
- printk("%s: %s selected (peak %dMB/s)\n", drive->name,
- ide_xfer_verbose(xfer_mode),
- 2000 / (unsigned long)ide_get_drivedata(drive));
+ printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
+ drive->name, ide_xfer_verbose(xfer_mode),
+ 2000 / (cycle_time ? cycle_time : (unsigned long) -1));
}
static const struct ide_port_ops icside_v6_port_ops = {
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = {
.dma_test_irq = icside_dma_test_irq,
.dma_lost_irq = ide_dma_lost_irq,
};
-#else
-#define icside_v6_dma_ops NULL
#endif
static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
@@ -456,7 +454,6 @@ err_free:
static const struct ide_port_info icside_v6_port_info __initdata = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
- .dma_ops = &icside_v6_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
.mwdma_mask = ATA_MWDMA2,
.swdma_mask = ATA_SWDMA2,
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
ecard_set_drvdata(ec, state);
+#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
d.init_dma = icside_dma_init;
d.port_ops = &icside_v6_port_ops;
- } else
- d.dma_ops = NULL;
+ d.dma_ops = &icside_v6_dma_ops;
+ }
+#endif
ret = ide_host_register(host, &d, hws);
if (ret)
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 28e344e..f1e922e 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
{
int *is_kme = priv_data;
- if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) {
+ if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
+ != IO_DATA_PATH_WIDTH_8) {
pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d0f59c3..fe95d54 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -96,6 +96,7 @@ static const struct idle_cpu *icpu;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+static int intel_idle_cpu_init(int cpu);
static struct cpuidle_state *cpuidle_state_table;
@@ -302,22 +303,35 @@ static void __setup_broadcast_timer(void *arg)
clockevents_notify(reason, &cpu);
}
-static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
+static int cpu_hotplug_notify(struct notifier_block *n,
+ unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev;
switch (action & 0xf) {
case CPU_ONLINE:
- smp_call_function_single(hotcpu, __setup_broadcast_timer,
- (void *)true, 1);
+
+ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
+ smp_call_function_single(hotcpu, __setup_broadcast_timer,
+ (void *)true, 1);
+
+ /*
+ * Some systems can hotplug a cpu at runtime after
+ * the kernel has booted, we have to initialize the
+ * driver in this case
+ */
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
+ if (!dev->registered)
+ intel_idle_cpu_init(hotcpu);
+
break;
}
return NOTIFY_OK;
}
-static struct notifier_block setup_broadcast_notifier = {
- .notifier_call = setup_broadcast_cpuhp_notify,
+static struct notifier_block cpu_hotplug_notifier = {
+ .notifier_call = cpu_hotplug_notify,
};
static void auto_demotion_disable(void *dummy)
@@ -405,10 +419,10 @@ static int intel_idle_probe(void)
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
- else {
+ else
on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
- register_cpu_notifier(&setup_broadcast_notifier);
- }
+
+ register_cpu_notifier(&cpu_hotplug_notifier);
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
@@ -494,7 +508,7 @@ static int intel_idle_cpuidle_driver_init(void)
* allocate, initialize, register cpuidle_devices
* @cpu: cpu/core to initialize
*/
-int intel_idle_cpu_init(int cpu)
+static int intel_idle_cpu_init(int cpu)
{
int cstate;
struct cpuidle_device *dev;
@@ -539,7 +553,6 @@ int intel_idle_cpu_init(int cpu)
return 0;
}
-EXPORT_SYMBOL_GPL(intel_idle_cpu_init);
static int __init intel_idle_init(void)
{
@@ -581,10 +594,10 @@ static void __exit intel_idle_exit(void)
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
- if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
+
+ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
- unregister_cpu_notifier(&setup_broadcast_notifier);
- }
+ unregister_cpu_notifier(&cpu_hotplug_notifier);
return;
}
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
index 15c0640..1fc4eef 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/ieee802154/Kconfig
@@ -19,6 +19,7 @@ config IEEE802154_FAKEHARD
This driver can also be built as a module. To do so say M here.
The module will be called 'fakehard'.
+
config IEEE802154_FAKELB
depends on IEEE802154_DRIVERS && MAC802154
tristate "IEEE 802.15.4 loopback driver"
@@ -28,3 +29,8 @@ config IEEE802154_FAKELB
This driver can also be built as a module. To do so say M here.
The module will be called 'fakelb'.
+
+config IEEE802154_AT86RF230
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "AT86RF230/231 transceiver driver"
+ depends on SPI
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index ea784ea..4f4371d 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
+obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/ieee802154/at86rf230.c
new file mode 100644
index 0000000..5d30940
--- /dev/null
+++ b/drivers/ieee802154/at86rf230.c
@@ -0,0 +1,968 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/at86rf230.h>
+#include <linux/skbuff.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+struct at86rf230_local {
+ struct spi_device *spi;
+ int rstn, slp_tr, dig2;
+
+ u8 part;
+ u8 vers;
+
+ u8 buf[2];
+ struct mutex bmux;
+
+ struct work_struct irqwork;
+ struct completion tx_complete;
+
+ struct ieee802154_dev *dev;
+
+ spinlock_t lock;
+ bool irq_disabled;
+ bool is_tx;
+};
+
+#define RG_TRX_STATUS (0x01)
+#define SR_TRX_STATUS 0x01, 0x1f, 0
+#define SR_RESERVED_01_3 0x01, 0x20, 5
+#define SR_CCA_STATUS 0x01, 0x40, 6
+#define SR_CCA_DONE 0x01, 0x80, 7
+#define RG_TRX_STATE (0x02)
+#define SR_TRX_CMD 0x02, 0x1f, 0
+#define SR_TRAC_STATUS 0x02, 0xe0, 5
+#define RG_TRX_CTRL_0 (0x03)
+#define SR_CLKM_CTRL 0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
+#define SR_PAD_IO_CLKM 0x03, 0x30, 4
+#define SR_PAD_IO 0x03, 0xc0, 6
+#define RG_TRX_CTRL_1 (0x04)
+#define SR_IRQ_POLARITY 0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
+#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
+#define SR_RX_BL_CTRL 0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
+#define SR_PA_EXT_EN 0x04, 0x80, 7
+#define RG_PHY_TX_PWR (0x05)
+#define SR_TX_PWR 0x05, 0x0f, 0
+#define SR_PA_LT 0x05, 0x30, 4
+#define SR_PA_BUF_LT 0x05, 0xc0, 6
+#define RG_PHY_RSSI (0x06)
+#define SR_RSSI 0x06, 0x1f, 0
+#define SR_RND_VALUE 0x06, 0x60, 5
+#define SR_RX_CRC_VALID 0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL (0x07)
+#define SR_ED_LEVEL 0x07, 0xff, 0
+#define RG_PHY_CC_CCA (0x08)
+#define SR_CHANNEL 0x08, 0x1f, 0
+#define SR_CCA_MODE 0x08, 0x60, 5
+#define SR_CCA_REQUEST 0x08, 0x80, 7
+#define RG_CCA_THRES (0x09)
+#define SR_CCA_ED_THRES 0x09, 0x0f, 0
+#define SR_RESERVED_09_1 0x09, 0xf0, 4
+#define RG_RX_CTRL (0x0a)
+#define SR_PDT_THRES 0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
+#define RG_SFD_VALUE (0x0b)
+#define SR_SFD_VALUE 0x0b, 0xff, 0
+#define RG_TRX_CTRL_2 (0x0c)
+#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
+#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
+#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
+#define RG_ANT_DIV (0x0d)
+#define SR_ANT_CTRL 0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
+#define SR_ANT_DIV_EN 0x0d, 0x08, 3
+#define SR_RESERVED_0d_2 0x0d, 0x70, 4
+#define SR_ANT_SEL 0x0d, 0x80, 7
+#define RG_IRQ_MASK (0x0e)
+#define SR_IRQ_MASK 0x0e, 0xff, 0
+#define RG_IRQ_STATUS (0x0f)
+#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
+#define SR_IRQ_5_AMI 0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
+#define RG_VREG_CTRL (0x10)
+#define SR_RESERVED_10_6 0x10, 0x03, 0
+#define SR_DVDD_OK 0x10, 0x04, 2
+#define SR_DVREG_EXT 0x10, 0x08, 3
+#define SR_RESERVED_10_3 0x10, 0x30, 4
+#define SR_AVDD_OK 0x10, 0x40, 6
+#define SR_AVREG_EXT 0x10, 0x80, 7
+#define RG_BATMON (0x11)
+#define SR_BATMON_VTH 0x11, 0x0f, 0
+#define SR_BATMON_HR 0x11, 0x10, 4
+#define SR_BATMON_OK 0x11, 0x20, 5
+#define SR_RESERVED_11_1 0x11, 0xc0, 6
+#define RG_XOSC_CTRL (0x12)
+#define SR_XTAL_TRIM 0x12, 0x0f, 0
+#define SR_XTAL_MODE 0x12, 0xf0, 4
+#define RG_RX_SYN (0x15)
+#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
+#define SR_RESERVED_15_2 0x15, 0x70, 4
+#define SR_RX_PDT_DIS 0x15, 0x80, 7
+#define RG_XAH_CTRL_1 (0x17)
+#define SR_RESERVED_17_8 0x17, 0x01, 0
+#define SR_AACK_PROM_MODE 0x17, 0x02, 1
+#define SR_AACK_ACK_TIME 0x17, 0x04, 2
+#define SR_RESERVED_17_5 0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
+#define SR_RESERVED_17_2 0x17, 0x40, 6
+#define SR_RESERVED_17_1 0x17, 0x80, 7
+#define RG_FTN_CTRL (0x18)
+#define SR_RESERVED_18_2 0x18, 0x7f, 0
+#define SR_FTN_START 0x18, 0x80, 7
+#define RG_PLL_CF (0x1a)
+#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
+#define SR_PLL_CF_START 0x1a, 0x80, 7
+#define RG_PLL_DCU (0x1b)
+#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2 0x1b, 0x40, 6
+#define SR_PLL_DCU_START 0x1b, 0x80, 7
+#define RG_PART_NUM (0x1c)
+#define SR_PART_NUM 0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM 0x1d, 0xff, 0
+#define RG_MAN_ID_0 (0x1e)
+#define SR_MAN_ID_0 0x1e, 0xff, 0
+#define RG_MAN_ID_1 (0x1f)
+#define SR_MAN_ID_1 0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0 (0x20)
+#define SR_SHORT_ADDR_0 0x20, 0xff, 0
+#define RG_SHORT_ADDR_1 (0x21)
+#define SR_SHORT_ADDR_1 0x21, 0xff, 0
+#define RG_PAN_ID_0 (0x22)
+#define SR_PAN_ID_0 0x22, 0xff, 0
+#define RG_PAN_ID_1 (0x23)
+#define SR_PAN_ID_1 0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0 0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1 0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2 0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3 0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4 0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5 0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
+#define RG_XAH_CTRL_0 (0x2c)
+#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0 0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1 0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
+#define SR_AACK_SET_PD 0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
+#define RG_CSMA_BE (0x2f)
+#define SR_MIN_BE 0x2f, 0x0f, 0
+#define SR_MAX_BE 0x2f, 0xf0, 4
+
+#define CMD_REG 0x80
+#define CMD_REG_MASK 0x3f
+#define CMD_WRITE 0x40
+#define CMD_FB 0x20
+
+#define IRQ_BAT_LOW (1 << 7)
+#define IRQ_TRX_UR (1 << 6)
+#define IRQ_AMI (1 << 5)
+#define IRQ_CCA_ED (1 << 4)
+#define IRQ_TRX_END (1 << 3)
+#define IRQ_RX_START (1 << 2)
+#define IRQ_PLL_UNL (1 << 1)
+#define IRQ_PLL_LOCK (1 << 0)
+
+#define STATE_P_ON 0x00 /* BUSY */
+#define STATE_BUSY_RX 0x01
+#define STATE_BUSY_TX 0x02
+#define STATE_FORCE_TRX_OFF 0x03
+#define STATE_FORCE_TX_ON 0x04 /* IDLE */
+/* 0x05 */ /* INVALID_PARAMETER */
+#define STATE_RX_ON 0x06
+/* 0x07 */ /* SUCCESS */
+#define STATE_TRX_OFF 0x08
+#define STATE_TX_ON 0x09
+/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP 0x0F
+#define STATE_BUSY_RX_AACK 0x11
+#define STATE_BUSY_TX_ARET 0x12
+#define STATE_BUSY_RX_AACK_ON 0x16
+#define STATE_BUSY_TX_ARET_ON 0x19
+#define STATE_RX_ON_NOCLK 0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+static int
+__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+ buf[1] = data;
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ return status;
+}
+
+static int
+__at86rf230_read_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 *data)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+
+ buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
+ buf[1] = 0xff;
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status == 0)
+ *data = buf[1];
+
+ return status;
+}
+
+static int
+at86rf230_read_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 *data)
+{
+ int status;
+
+ mutex_lock(&lp->bmux);
+ status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_write_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 data)
+{
+ int status;
+ u8 val;
+
+ mutex_lock(&lp->bmux);
+ status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
+ if (status)
+ goto out;
+
+ val &= ~mask;
+ val |= (data << shift) & mask;
+
+ status = __at86rf230_write(lp, addr, val);
+out:
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer_head = {
+ .len = 2,
+ .tx_buf = buf,
+
+ };
+ struct spi_transfer xfer_buf = {
+ .len = len,
+ .tx_buf = data,
+ };
+
+ mutex_lock(&lp->bmux);
+ buf[0] = CMD_WRITE | CMD_FB;
+ buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
+
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head, &msg);
+ spi_message_add_tail(&xfer_buf, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ mutex_unlock(&lp->bmux);
+ return status;
+}
+
+static int
+at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer_head = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ struct spi_transfer xfer_head1 = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ struct spi_transfer xfer_buf = {
+ .len = 0,
+ .rx_buf = data,
+ };
+
+ mutex_lock(&lp->bmux);
+
+ buf[0] = CMD_FB;
+ buf[1] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+
+ xfer_buf.len = *(buf + 1) + 1;
+ *len = buf[1];
+
+ buf[0] = CMD_FB;
+ buf[1] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head1, &msg);
+ spi_message_add_tail(&xfer_buf, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status) {
+ if (lqi && (*len > lp->buf[1]))
+ *lqi = data[lp->buf[1]];
+ }
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+{
+ might_sleep();
+ BUG_ON(!level);
+ *level = 0xbe;
+ return 0;
+}
+
+static int
+at86rf230_state(struct ieee802154_dev *dev, int state)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+ u8 val;
+ u8 desired_status;
+
+ might_sleep();
+
+ if (state == STATE_FORCE_TX_ON)
+ desired_status = STATE_TX_ON;
+ else if (state == STATE_FORCE_TRX_OFF)
+ desired_status = STATE_TRX_OFF;
+ else
+ desired_status = state;
+
+ do {
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+ if (rc)
+ goto err;
+ } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+ if (val == desired_status)
+ return 0;
+
+ /* state is equal to phy states */
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
+ if (rc)
+ goto err;
+
+ do {
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+ if (rc)
+ goto err;
+ } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+
+ if (val == desired_status)
+ return 0;
+
+ pr_err("unexpected state change: %d, asked for %d\n", val, state);
+ return -EBUSY;
+
+err:
+ pr_err("error: %d\n", rc);
+ return rc;
+}
+
+static int
+at86rf230_start(struct ieee802154_dev *dev)
+{
+ struct at86rf230_local *lp = dev->priv;
+ u8 rc;
+
+ rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
+ if (rc)
+ return rc;
+
+ return at86rf230_state(dev, STATE_RX_ON);
+}
+
+static void
+at86rf230_stop(struct ieee802154_dev *dev)
+{
+ at86rf230_state(dev, STATE_FORCE_TRX_OFF);
+}
+
+static int
+at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ might_sleep();
+
+ if (page != 0 || channel < 11 || channel > 26) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+ msleep(1); /* Wait for PLL */
+ dev->phy->current_channel = channel;
+
+ return 0;
+}
+
+static int
+at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+ unsigned long flags;
+
+ spin_lock(&lp->lock);
+ if (lp->irq_disabled) {
+ spin_unlock(&lp->lock);
+ return -EBUSY;
+ }
+ spin_unlock(&lp->lock);
+
+ might_sleep();
+
+ rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
+ if (rc)
+ goto err;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->is_tx = 1;
+ INIT_COMPLETION(lp->tx_complete);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
+ if (rc)
+ goto err_rx;
+
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
+ if (rc)
+ goto err_rx;
+
+ rc = wait_for_completion_interruptible(&lp->tx_complete);
+ if (rc < 0)
+ goto err_rx;
+
+ rc = at86rf230_start(dev);
+
+ return rc;
+
+err_rx:
+ at86rf230_start(dev);
+err:
+ pr_err("error: %d\n", rc);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->is_tx = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return rc;
+}
+
+static int at86rf230_rx(struct at86rf230_local *lp)
+{
+ u8 len = 128, lqi = 0;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+
+ if (!skb)
+ return -ENOMEM;
+
+ if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
+ goto err;
+
+ if (len < 2)
+ goto err;
+
+ skb_trim(skb, len - 2); /* We do not put CRC into the frame */
+
+ ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+
+ dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
+
+ return 0;
+err:
+ pr_debug("received frame is too small\n");
+
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static struct ieee802154_ops at86rf230_ops = {
+ .owner = THIS_MODULE,
+ .xmit = at86rf230_xmit,
+ .ed = at86rf230_ed,
+ .set_channel = at86rf230_channel,
+ .start = at86rf230_start,
+ .stop = at86rf230_stop,
+};
+
+static void at86rf230_irqwork(struct work_struct *work)
+{
+ struct at86rf230_local *lp =
+ container_of(work, struct at86rf230_local, irqwork);
+ u8 status = 0, val;
+ int rc;
+ unsigned long flags;
+
+ rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
+ status |= val;
+
+ status &= ~IRQ_PLL_LOCK; /* ignore */
+ status &= ~IRQ_RX_START; /* ignore */
+ status &= ~IRQ_AMI; /* ignore */
+ status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
+
+ if (status & IRQ_TRX_END) {
+ spin_lock_irqsave(&lp->lock, flags);
+ status &= ~IRQ_TRX_END;
+ if (lp->is_tx) {
+ lp->is_tx = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+ complete(&lp->tx_complete);
+ } else {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ at86rf230_rx(lp);
+ }
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->irq_disabled = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ enable_irq(lp->spi->irq);
+}
+
+static irqreturn_t at86rf230_isr(int irq, void *data)
+{
+ struct at86rf230_local *lp = data;
+
+ disable_irq_nosync(irq);
+
+ spin_lock(&lp->lock);
+ lp->irq_disabled = 1;
+ spin_unlock(&lp->lock);
+
+ schedule_work(&lp->irqwork);
+
+ return IRQ_HANDLED;
+}
+
+
+static int at86rf230_hw_init(struct at86rf230_local *lp)
+{
+ u8 status;
+ int rc;
+
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+ if (status == STATE_P_ON) {
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
+ if (rc)
+ return rc;
+ msleep(1);
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+ }
+
+ rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR |
+ * IRQ_CCA_ED |
+ * IRQ_TRX_END |
+ * IRQ_PLL_UNL |
+ * IRQ_PLL_LOCK
+ */
+ if (rc)
+ return rc;
+
+ /* CLKM changes are applied immediately */
+ rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
+ if (rc)
+ return rc;
+
+ /* Turn CLKM Off */
+ rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00);
+ if (rc)
+ return rc;
+ /* Wait the next SLEEP cycle */
+ msleep(100);
+
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
+ if (rc)
+ return rc;
+ msleep(1);
+
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+
+ rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+ if (rc)
+ return rc;
+ if (!status) {
+ dev_err(&lp->spi->dev, "DVDD error\n");
+ return -EINVAL;
+ }
+
+ rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
+ if (rc)
+ return rc;
+ if (!status) {
+ dev_err(&lp->spi->dev, "AVDD error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
+{
+ return 0;
+}
+
+static int at86rf230_resume(struct spi_device *spi)
+{
+ return 0;
+}
+
+static int at86rf230_fill_data(struct spi_device *spi)
+{
+ struct at86rf230_local *lp = spi_get_drvdata(spi);
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&spi->dev, "no platform_data\n");
+ return -EINVAL;
+ }
+
+ lp->rstn = pdata->rstn;
+ lp->slp_tr = pdata->slp_tr;
+ lp->dig2 = pdata->dig2;
+
+ return 0;
+}
+
+static int __devinit at86rf230_probe(struct spi_device *spi)
+{
+ struct ieee802154_dev *dev;
+ struct at86rf230_local *lp;
+ u8 man_id_0, man_id_1;
+ int rc;
+ const char *chip;
+ int supported = 0;
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "no IRQ specified\n");
+ return -EINVAL;
+ }
+
+ dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
+ if (!dev)
+ return -ENOMEM;
+
+ lp = dev->priv;
+ lp->dev = dev;
+
+ lp->spi = spi;
+
+ dev->priv = lp;
+ dev->parent = &spi->dev;
+ dev->extra_tx_headroom = 0;
+ /* We do support only 2.4 Ghz */
+ dev->phy->channels_supported[0] = 0x7FFF800;
+ dev->flags = IEEE802154_HW_OMIT_CKSUM;
+
+ mutex_init(&lp->bmux);
+ INIT_WORK(&lp->irqwork, at86rf230_irqwork);
+ spin_lock_init(&lp->lock);
+ init_completion(&lp->tx_complete);
+
+ spi_set_drvdata(spi, lp);
+
+ rc = at86rf230_fill_data(spi);
+ if (rc)
+ goto err_fill;
+
+ rc = gpio_request(lp->rstn, "rstn");
+ if (rc)
+ goto err_rstn;
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ rc = gpio_request(lp->slp_tr, "slp_tr");
+ if (rc)
+ goto err_slp_tr;
+ }
+
+ rc = gpio_direction_output(lp->rstn, 1);
+ if (rc)
+ goto err_gpio_dir;
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ rc = gpio_direction_output(lp->slp_tr, 0);
+ if (rc)
+ goto err_gpio_dir;
+ }
+
+ /* Reset */
+ msleep(1);
+ gpio_set_value(lp->rstn, 0);
+ msleep(1);
+ gpio_set_value(lp->rstn, 1);
+ msleep(1);
+
+ rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
+ if (rc)
+ goto err_gpio_dir;
+ rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
+ if (rc)
+ goto err_gpio_dir;
+
+ if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+ dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
+ man_id_1, man_id_0);
+ rc = -EINVAL;
+ goto err_gpio_dir;
+ }
+
+ rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
+ if (rc)
+ goto err_gpio_dir;
+
+ switch (lp->part) {
+ case 2:
+ chip = "at86rf230";
+ /* supported = 1; FIXME: should be easy to support; */
+ break;
+ case 3:
+ chip = "at86rf231";
+ supported = 1;
+ break;
+ default:
+ chip = "UNKNOWN";
+ break;
+ }
+
+ dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
+ if (!supported) {
+ rc = -ENOTSUPP;
+ goto err_gpio_dir;
+ }
+
+ rc = at86rf230_hw_init(lp);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED,
+ dev_name(&spi->dev), lp);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = ieee802154_register_device(lp->dev);
+ if (rc)
+ goto err_irq;
+
+ return rc;
+
+ ieee802154_unregister_device(lp->dev);
+err_irq:
+ free_irq(spi->irq, lp);
+ flush_work(&lp->irqwork);
+err_gpio_dir:
+ if (gpio_is_valid(lp->slp_tr))
+ gpio_free(lp->slp_tr);
+err_slp_tr:
+ gpio_free(lp->rstn);
+err_rstn:
+err_fill:
+ spi_set_drvdata(spi, NULL);
+ mutex_destroy(&lp->bmux);
+ ieee802154_free_device(lp->dev);
+ return rc;
+}
+
+static int __devexit at86rf230_remove(struct spi_device *spi)
+{
+ struct at86rf230_local *lp = spi_get_drvdata(spi);
+
+ ieee802154_unregister_device(lp->dev);
+
+ free_irq(spi->irq, lp);
+ flush_work(&lp->irqwork);
+
+ if (gpio_is_valid(lp->slp_tr))
+ gpio_free(lp->slp_tr);
+ gpio_free(lp->rstn);
+
+ spi_set_drvdata(spi, NULL);
+ mutex_destroy(&lp->bmux);
+ ieee802154_free_device(lp->dev);
+
+ dev_dbg(&spi->dev, "unregistered at86rf230\n");
+ return 0;
+}
+
+static struct spi_driver at86rf230_driver = {
+ .driver = {
+ .name = "at86rf230",
+ .owner = THIS_MODULE,
+ },
+ .probe = at86rf230_probe,
+ .remove = __devexit_p(at86rf230_remove),
+ .suspend = at86rf230_suspend,
+ .resume = at86rf230_resume,
+};
+
+static int __init at86rf230_init(void)
+{
+ return spi_register_driver(&at86rf230_driver);
+}
+module_init(at86rf230_init);
+
+static void __exit at86rf230_exit(void)
+{
+ spi_unregister_driver(&at86rf230_driver);
+}
+module_exit(at86rf230_exit);
+
+MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 56eecef..2ec93da 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -8,8 +8,7 @@ menuconfig IIO
help
The industrial I/O subsystem provides a unified framework for
drivers for many different types of embedded sensors using a
- number of different physical interfaces (i2c, spi, etc). See
- Documentation/iio for more information.
+ number of different physical interfaces (i2c, spi, etc).
if IIO
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 1ddd886..4f947e4 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -661,7 +661,6 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
* New channel registration method - relies on the fact a group does
* not need to be initialized if it is name is NULL.
*/
- INIT_LIST_HEAD(&indio_dev->channel_attr_list);
if (indio_dev->channels)
for (i = 0; i < indio_dev->num_channels; i++) {
ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
static void iio_dev_release(struct device *device)
{
struct iio_dev *indio_dev = dev_to_iio_dev(device);
- cdev_del(&indio_dev->chrdev);
+ if (indio_dev->chrdev.dev)
+ cdev_del(&indio_dev->chrdev);
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
iio_device_unregister_trigger_consumer(indio_dev);
iio_device_unregister_eventset(indio_dev);
iio_device_unregister_sysfs(indio_dev);
iio_device_unregister_debugfs(indio_dev);
+
+ ida_simple_remove(&iio_ida, indio_dev->id);
+ kfree(indio_dev);
}
static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
dev_set_drvdata(&dev->dev, (void *)dev);
mutex_init(&dev->mlock);
mutex_init(&dev->info_exist_lock);
+ INIT_LIST_HEAD(&dev->channel_attr_list);
dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
if (dev->id < 0) {
@@ -778,10 +782,8 @@ EXPORT_SYMBOL(iio_device_alloc);
void iio_device_free(struct iio_dev *dev)
{
- if (dev) {
- ida_simple_remove(&iio_ida, dev->id);
- kfree(dev);
- }
+ if (dev)
+ put_device(&dev->dev);
}
EXPORT_SYMBOL(iio_device_free);
@@ -902,7 +904,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
mutex_lock(&indio_dev->info_exist_lock);
indio_dev->info = NULL;
mutex_unlock(&indio_dev->info_exist_lock);
- device_unregister(&indio_dev->dev);
+ device_del(&indio_dev->dev);
}
EXPORT_SYMBOL(iio_device_unregister);
subsys_initcall(iio_init);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 55d5642..2e826f9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
{
- return (((ib_event->event == IB_CM_REQ_RECEIVED) ||
+ return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
(ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
(id->qp_type == IB_QPT_UD)) ||
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index e497dfbe..3ae2bfd 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -108,12 +108,14 @@ void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
unsigned char *prev_tail;
prev_tail = skb_tail_pointer(skb);
- *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
- len, NLM_F_MULTI);
+ *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
+ len, NLM_F_MULTI);
+ if (!*nlh)
+ goto out_nlmsg_trim;
(*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
- return NLMSG_DATA(*nlh);
+ return nlmsg_data(*nlh);
-nlmsg_failure:
+out_nlmsg_trim:
nlmsg_trim(skb, prev_tail);
return NULL;
}
@@ -171,8 +173,11 @@ static void ibnl_rcv(struct sk_buff *skb)
int __init ibnl_init(void)
{
- nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv,
- NULL, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .input = ibnl_rcv,
+ };
+
+ nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg);
if (!nls) {
pr_warn("Failed to create netlink socket\n");
return -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 740dcc0..77b6b18 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1374,7 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst, NULL);
+ l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1942,7 +1942,8 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail3;
}
ep->dst = &rt->dst;
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
+ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
+ &cm_id->remote_addr.sin_addr.s_addr);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 55ab284e..b18870c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
struct net_device *pdev;
pdev = ip_dev_find(&init_net, peer_ip);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out;
+ }
ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
n, pdev, 0);
if (!ep->l2t)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ee1c577..a07b774 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->max_mr_size = ~0ull;
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
- props->max_qp_wr = dev->dev->caps.max_wqes;
+ props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
props->max_sge = min(dev->dev->caps.max_sq_sg,
dev->dev->caps.max_rq_sg);
props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
@@ -718,26 +718,53 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
return ret;
}
+struct mlx4_ib_steering {
+ struct list_head list;
+ u64 reg_id;
+ union ib_gid gid;
+};
+
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ u64 reg_id;
+ struct mlx4_ib_steering *ib_steering = NULL;
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
+ if (!ib_steering)
+ return -ENOMEM;
+ }
- err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
- !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
- MLX4_PROT_IB_IPV6);
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
+ !!(mqp->flags &
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+ MLX4_PROT_IB_IPV6, &reg_id);
if (err)
- return err;
+ goto err_malloc;
err = add_gid_entry(ibqp, gid);
if (err)
goto err_add;
+ if (ib_steering) {
+ memcpy(ib_steering->gid.raw, gid->raw, 16);
+ ib_steering->reg_id = reg_id;
+ mutex_lock(&mqp->mutex);
+ list_add(&ib_steering->list, &mqp->steering_rules);
+ mutex_unlock(&mqp->mutex);
+ }
return 0;
err_add:
- mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ MLX4_PROT_IB_IPV6, reg_id);
+err_malloc:
+ kfree(ib_steering);
+
return err;
}
@@ -765,9 +792,30 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
u8 mac[6];
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
+ u64 reg_id = 0;
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ struct mlx4_ib_steering *ib_steering;
- err = mlx4_multicast_detach(mdev->dev,
- &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
+ mutex_lock(&mqp->mutex);
+ list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
+ if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
+ list_del(&ib_steering->list);
+ break;
+ }
+ }
+ mutex_unlock(&mqp->mutex);
+ if (&ib_steering->list == &mqp->steering_rules) {
+ pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
+ return -EINVAL;
+ }
+ reg_id = ib_steering->reg_id;
+ kfree(ib_steering);
+ }
+
+ err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ MLX4_PROT_IB_IPV6, reg_id);
if (err)
return err;
@@ -1084,12 +1132,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
int total_eqs = 0;
int i, j, eq;
- /* Init eq table */
- ibdev->eq_table = NULL;
- ibdev->eq_added = 0;
-
- /* Legacy mode? */
- if (dev->caps.comp_pool == 0)
+ /* Legacy mode or comp_pool is not large enough */
+ if (dev->caps.comp_pool == 0 ||
+ dev->caps.num_ports > dev->caps.comp_pool)
return;
eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
@@ -1114,7 +1159,8 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
sprintf(name, "mlx4-ib-%d-%d@%s",
i, j, dev->pdev->bus->name);
/* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) {
+ if (mlx4_assign_eq(dev, name, NULL,
+ &ibdev->eq_table[eq])) {
/* Use legacy (same as mlx4_en driver) */
pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
ibdev->eq_table[eq] =
@@ -1135,7 +1181,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{
int i;
- int total_eqs;
+
+ /* no additional eqs were added */
+ if (!ibdev->eq_table)
+ return;
/* Reset the advertised EQ number */
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
@@ -1148,12 +1197,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
mlx4_release_eq(dev, ibdev->eq_table[i]);
}
- total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
- memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
kfree(ibdev->eq_table);
-
- ibdev->eq_table = NULL;
- ibdev->eq_added = 0;
}
static void *mlx4_ib_add(struct mlx4_dev *dev)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e62297c..42df4f7 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -44,6 +44,14 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+enum {
+ MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
+ MLX4_IB_MAX_HEADROOM = 2048
+};
+
+#define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
+#define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
+
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
@@ -155,6 +163,7 @@ struct mlx4_ib_qp {
u8 state;
int mlx_type;
struct list_head gid_list;
+ struct list_head steering_rules;
};
struct mlx4_ib_srq {
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ceb3332..6af19f6 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
int is_user, int has_rq, struct mlx4_ib_qp *qp)
{
/* Sanity check RQ size before proceeding */
- if (cap->max_recv_wr > dev->dev->caps.max_wqes ||
- cap->max_recv_sge > dev->dev->caps.max_rq_sg)
+ if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
+ cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
return -EINVAL;
if (!has_rq) {
@@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
}
- cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
- cap->max_recv_sge = qp->rq.max_gs;
+ /* leave userspace return values as they were, so as not to break ABI */
+ if (is_user) {
+ cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
+ cap->max_recv_sge = qp->rq.max_gs;
+ } else {
+ cap->max_recv_wr = qp->rq.max_post =
+ min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
+ cap->max_recv_sge = min(qp->rq.max_gs,
+ min(dev->dev->caps.max_sq_sg,
+ dev->dev->caps.max_rq_sg));
+ }
return 0;
}
@@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
int s;
/* Sanity check SQ size before proceeding */
- if (cap->max_send_wr > dev->dev->caps.max_wqes ||
- cap->max_send_sge > dev->dev->caps.max_sq_sg ||
+ if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
+ cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
return -EINVAL;
@@ -486,6 +495,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
INIT_LIST_HEAD(&qp->gid_list);
+ INIT_LIST_HEAD(&qp->steering_rules);
qp->state = IB_QPS_RESET;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 85a69c9..48970af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
u32 max_inline_data;
int max_send_sge;
int max_recv_sge;
+ int max_srq_sge;
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
@@ -231,7 +232,6 @@ struct ocrdma_qp_hwq_info {
u32 entry_size;
u32 max_cnt;
u32 max_wqe_idx;
- u32 free_delta;
u16 dbid; /* qid, where to ring the doorbell. */
u32 len;
dma_addr_t pa;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index a411a4e..517ab20 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp {
u32 rsvd1;
u32 num_wqe_allocated;
u32 num_rqe_allocated;
- u32 free_wqe_delta;
- u32 free_rqe_delta;
u32 db_sq_offset;
u32 db_rq_offset;
u32 db_shift;
@@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp {
u32 db_rq_offset;
u32 db_shift;
- u32 free_rqe_delta;
- u32 rsvd2;
+ u64 rsvd2;
u64 rsvd3;
} __packed;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 9b204b1..71942af 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
break;
case OCRDMA_SRQ_LIMIT_EVENT:
ib_evt.element.srq = &qp->srq->ibsrq;
- ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
srq_event = 1;
qp_event = 0;
break;
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
struct ocrdma_dev_attr *attr,
struct ocrdma_mbx_query_config *rsp)
{
- int max_q_mem;
-
attr->max_pd =
(rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_recv_sge = (rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
+ attr->max_srq_sge = (rsp->max_srq_rqe_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_inline_data =
attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
sizeof(struct ocrdma_sge));
- max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
- /* hw can queue one less then the configured size,
- * so publish less by one to stack.
- */
if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
attr->ird = 1;
attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
- } else
- dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1;
- dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1;
+ }
+ dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
+ OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
+ dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
}
static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
@@ -1990,19 +1988,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
max_wqe_allocated = 1 << max_wqe_allocated;
max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
- if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- qp->sq.free_delta = 0;
- qp->rq.free_delta = 1;
- } else
- qp->sq.free_delta = 1;
-
qp->sq.max_cnt = max_wqe_allocated;
qp->sq.max_wqe_idx = max_wqe_allocated - 1;
if (!attrs->srq) {
qp->rq.max_cnt = max_rqe_allocated;
qp->rq.max_wqe_idx = max_rqe_allocated - 1;
- qp->rq.free_delta = 1;
}
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index a20d16e..b050e62 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -26,7 +26,6 @@
*******************************************************************/
#include <linux/module.h>
-#include <linux/version.h>
#include <linux/idr.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -98,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
sgid->raw[15] = mac_addr[5];
}
-static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
+static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
bool is_vlan, u16 vlan_id)
{
int i;
- bool found = false;
union ib_gid new_sgid;
- int free_idx = OCRDMA_MAX_SGID;
unsigned long flags;
memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -116,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
sizeof(union ib_gid))) {
/* found free entry */
- if (!found) {
- free_idx = i;
- found = true;
- break;
- }
+ memcpy(&dev->sgid_tbl[i], &new_sgid,
+ sizeof(union ib_gid));
+ spin_unlock_irqrestore(&dev->sgid_lock, flags);
+ return true;
} else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
sizeof(union ib_gid))) {
/* entry already present, no addition is required. */
spin_unlock_irqrestore(&dev->sgid_lock, flags);
- return;
+ return false;
}
}
- /* if entry doesn't exist and if table has some space, add entry */
- if (found)
- memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
- sizeof(union ib_gid));
spin_unlock_irqrestore(&dev->sgid_lock, flags);
+ return false;
}
static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -168,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
ocrdma_get_guid(dev, &sgid->raw[8]);
}
-static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
{
struct net_device *netdev, *tmp;
u16 vlan_id;
@@ -176,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
netdev = dev->nic_info.netdev;
- ocrdma_add_default_sgid(dev);
-
rcu_read_lock();
for_each_netdev_rcu(&init_net, tmp) {
if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -195,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
}
}
rcu_read_unlock();
+}
+#else
+static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
+{
+
+}
+#endif /* VLAN */
+
+static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
+{
+ ocrdma_add_default_sgid(dev);
+ ocrdma_add_vlan_sgids(dev);
return 0;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
+defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
@@ -209,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
struct ib_event gid_event;
struct ocrdma_dev *dev;
bool found = false;
+ bool updated = false;
bool is_vlan = false;
u16 vid = 0;
@@ -234,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
mutex_lock(&dev->dev_lock);
switch (event) {
case NETDEV_UP:
- ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
+ updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
break;
case NETDEV_DOWN:
- found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
- if (found) {
- /* found the matching entry, notify
- * the consumers about it
- */
- gid_event.device = &dev->ibdev;
- gid_event.element.port_num = 1;
- gid_event.event = IB_EVENT_GID_CHANGE;
- ib_dispatch_event(&gid_event);
- }
+ updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
break;
default:
break;
}
+ if (updated) {
+ /* GID table updated, notify the consumers about it */
+ gid_event.device = &dev->ibdev;
+ gid_event.element.port_num = 1;
+ gid_event.event = IB_EVENT_GID_CHANGE;
+ ib_dispatch_event(&gid_event);
+ }
mutex_unlock(&dev->dev_lock);
return NOTIFY_OK;
}
@@ -259,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
.notifier_call = ocrdma_inet6addr_event
};
-#endif /* IPV6 */
+#endif /* IPV6 and VLAN */
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u8 port_num)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 7fd80cc..c75cbdf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -418,6 +418,9 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -458,7 +461,7 @@ enum {
OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0,
OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF <<
- OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
+ OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16,
OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF <<
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e9f74d1..2e2e7ae 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
dev = get_ocrdma_dev(ibdev);
memset(sgid, 0, sizeof(*sgid));
- if (index > OCRDMA_MAX_SGID)
+ if (index >= OCRDMA_MAX_SGID)
return -EINVAL;
memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY;
- attr->max_sge = dev->attr.max_send_sge;
- attr->max_sge_rd = dev->attr.max_send_sge;
+ attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
+ attr->max_sge_rd = 0;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
attr->max_srq = (dev->attr.max_qp - 1);
- attr->max_srq_sge = attr->max_sge;
+ attr->max_srq_sge = attr->max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
attr->max_fast_reg_page_list_len = 0;
@@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
uresp.db_shift = 16;
}
- uresp.free_wqe_delta = qp->sq.free_delta;
- uresp.free_rqe_delta = qp->rq.free_delta;
if (qp->dpp_enabled) {
uresp.dpp_credit = dpp_credit_lmt;
@@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
free_cnt = (q->max_cnt - q->head) + q->tail;
else
free_cnt = q->tail - q->head;
- if (q->free_delta)
- free_cnt -= q->free_delta;
return free_cnt;
}
@@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
(srq->pd->id * srq->dev->nic_info.db_page_size);
uresp.db_page_size = srq->dev->nic_info.db_page_size;
uresp.num_rqe_allocated = srq->rq.max_cnt;
- uresp.free_rqe_delta = 1;
if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
uresp.db_shift = 24;
@@ -2306,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
*stop = true;
expand = false;
}
- } else
+ } else {
+ *polled = true;
expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
+ }
return expand;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index e648343..633f03d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -28,7 +28,6 @@
#ifndef __OCRDMA_VERBS_H__
#define __OCRDMA_VERBS_H__
-#include <linux/version.h>
int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
struct ib_send_wr **bad_wr);
int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 014504d..1ca7322 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1397,7 +1397,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
int e = skb_queue_empty(&priv->cm.skb_queue);
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5c1bc99..f10221f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
skb_frag_size_set(frag, size);
skb->data_len += size;
- skb->truesize += size;
+ skb->truesize += PAGE_SIZE;
} else
skb_put(skb, length);
@@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int buf_size;
+ int tailroom;
u64 *mapping;
- if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
buf_size = IPOIB_UD_HEAD_SIZE;
- else
+ tailroom = 128; /* reserve some tailroom for IP/TCP headers */
+ } else {
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ tailroom = 0;
+ }
- skb = dev_alloc_skb(buf_size + 4);
+ skb = dev_alloc_skb(buf_size + tailroom + 4);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3974c29..bbee4b2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -715,7 +715,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
if (likely(skb_dst(skb))) {
- n = dst_get_neighbour_noref(skb_dst(skb));
+ n = dst_neigh_lookup_skb(skb_dst(skb), skb);
if (!n) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
@@ -797,6 +797,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
unlock:
+ if (n)
+ neigh_release(n);
rcu_read_unlock();
return NETDEV_TX_OK;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 20ebc6f..7cecb16 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -658,9 +658,15 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct dst_entry *dst = skb_dst(skb);
struct ipoib_mcast *mcast;
+ struct neighbour *n;
unsigned long flags;
+ n = NULL;
+ if (dst)
+ n = dst_neigh_lookup_skb(dst, skb);
+
spin_lock_irqsave(&priv->lock, flags);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
@@ -715,29 +721,28 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
out:
if (mcast && mcast->ah) {
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = NULL;
-
- rcu_read_lock();
- if (dst)
- n = dst_get_neighbour_noref(dst);
- if (n && !*to_ipoib_neigh(n)) {
- struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
- skb->dev);
-
- if (neigh) {
- kref_get(&mcast->ah->ref);
- neigh->ah = mcast->ah;
- list_add_tail(&neigh->list, &mcast->neigh_list);
+ if (n) {
+ if (!*to_ipoib_neigh(n)) {
+ struct ipoib_neigh *neigh;
+
+ neigh = ipoib_neigh_alloc(n, skb->dev);
+ if (neigh) {
+ kref_get(&mcast->ah->ref);
+ neigh->ah = mcast->ah;
+ list_add_tail(&neigh->list,
+ &mcast->neigh_list);
+ }
}
+ neigh_release(n);
}
- rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
}
unlock:
+ if (n)
+ neigh_release(n);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 5f6b7f6..7a0ce8d 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1377,10 +1377,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
break;
case SRPT_STATE_NEED_DATA:
/* DMA_TO_DEVICE (write) - RDMA read error. */
+
+ /* XXX(hch): this is a horrible layering violation.. */
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
+ ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
- transport_generic_handle_data(&ioctx->cmd);
+
+ complete(&ioctx->cmd.transport_lun_stop_comp);
break;
case SRPT_STATE_CMD_RSP_SENT:
/*
@@ -1463,9 +1467,10 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
/**
* srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
*
- * Note: transport_generic_handle_data() is asynchronous so unmapping the
- * data that has been transferred via IB RDMA must be postponed until the
- * check_stop_free() callback.
+ * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
+ * the data that has been transferred via IB RDMA had to be postponed until the
+ * check_stop_free() callback. None of this is nessecary anymore and needs to
+ * be cleaned up.
*/
static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx,
@@ -1477,7 +1482,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
if (opcode == SRPT_RDMA_READ_LAST) {
if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
SRPT_STATE_DATA_IN))
- transport_generic_handle_data(&ioctx->cmd);
+ target_execute_cmd(&ioctx->cmd);
else
printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
__LINE__, srpt_get_cmd_state(ioctx));
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index 3063464..c96653b 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -231,6 +231,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
}
if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING)) {
dev_err(&client->dev,
"need i2c bus that supports protocol mangling\n");
@@ -281,7 +282,8 @@ static int __devinit as5011_probe(struct i2c_client *client,
error = request_threaded_irq(as5011->button_irq,
NULL, as5011_button_interrupt,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"as5011_button", as5011);
if (error < 0) {
dev_err(&client->dev,
@@ -295,7 +297,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
error = request_threaded_irq(as5011->axis_irq, NULL,
as5011_axis_interrupt,
- plat_data->axis_irqflags,
+ plat_data->axis_irqflags | IRQF_ONESHOT,
"as5011_joystick", as5011);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ee16fb6..83811e4 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -142,6 +142,7 @@ static const struct xpad_device {
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
{ 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
@@ -164,6 +165,7 @@ static const struct xpad_device {
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
+ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
{ }
};
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 64a0ca4..0d77f6c 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -178,7 +178,8 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
}
error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
- IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_free_mem;
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index caa218a..7613f1c 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -248,7 +248,7 @@ static int __devinit mpr_touchkey_probe(struct i2c_client *client,
error = request_threaded_irq(client->irq, NULL,
mpr_touchkey_interrupt,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->dev.driver->name, mpr121);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 0b7b2f8..ca68f29 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -201,7 +201,8 @@ static int __devinit qt1070_probe(struct i2c_client *client,
msleep(QT1070_RESET_TIME);
err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
- IRQF_TRIGGER_NONE, client->dev.driver->name, data);
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (err) {
dev_err(&client->dev, "fail to request irq\n");
goto err_free_mem;
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 3afea3f..c355cdd 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -278,7 +278,8 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
error = request_threaded_irq(chip->irqnum, NULL,
tca6416_keys_isr,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
"tca6416-keypad", chip);
if (error) {
dev_dbg(&client->dev,
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 5f87b28..893869b 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -360,7 +360,7 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
client->irq = gpio_to_irq(client->irq);
error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, keypad_data);
if (error) {
dev_dbg(&client->dev,
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 4ffe64d..2c1c9ed 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -492,7 +492,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
unsigned int debounce_cnt;
u32 val = 0;
- clk_enable(kbc->clk);
+ clk_prepare_enable(kbc->clk);
/* Reset the KBC controller to clear all previous status.*/
tegra_periph_reset_assert(kbc->clk);
@@ -556,7 +556,7 @@ static void tegra_kbc_stop(struct tegra_kbc *kbc)
disable_irq(kbc->irq);
del_timer_sync(&kbc->timer);
- clk_disable(kbc->clk);
+ clk_disable_unprepare(kbc->clk);
}
static int tegra_kbc_open(struct input_dev *dev)
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index a4a445f..4c34f21 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -227,15 +227,15 @@ static int __devinit keypad_probe(struct platform_device *pdev)
goto error_clk;
}
- error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
- dev_name(dev), kp);
+ error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
+ IRQF_ONESHOT, dev_name(dev), kp);
if (error < 0) {
dev_err(kp->dev, "Could not allocate keypad press key irq\n");
goto error_irq_press;
}
- error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
- dev_name(dev), kp);
+ error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
+ IRQF_ONESHOT, dev_name(dev), kp);
if (error < 0) {
dev_err(kp->dev, "Could not allocate keypad release key irq\n");
goto error_irq_release;
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index 0ac75bb..2e5d5e1 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -972,6 +972,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
struct ad714x_platform_data *plat_data = dev->platform_data;
struct ad714x_chip *ad714x;
void *drv_mem;
+ unsigned long irqflags;
struct ad714x_button_drv *bt_drv;
struct ad714x_slider_drv *sd_drv;
@@ -1162,10 +1163,11 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
alloc_idx++;
}
+ irqflags = plat_data->irqflags ?: IRQF_TRIGGER_FALLING;
+ irqflags |= IRQF_ONESHOT;
+
error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
- plat_data->irqflags ?
- plat_data->irqflags : IRQF_TRIGGER_FALLING,
- "ad714x_captouch", ad714x);
+ irqflags, "ad714x_captouch", ad714x);
if (error) {
dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
goto err_unreg_dev;
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 35083c6..c1313d8 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -213,7 +213,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
/* REVISIT: flush the event queue? */
status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
- IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&pdev->dev), keys);
if (status < 0)
goto fail2;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 2cf681d..d528c23 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -79,6 +79,10 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
+/* MacbookPro10,1 (unibody, June 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+ /* MacbookPro10,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
/* Terminating entry */
{}
};
@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
{ DIM_X, DIM_X / SN_COORD, -4620, 5140 },
{ DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+ },
{}
};
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index cad5602..8b31473 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -216,7 +216,7 @@ static void wacom_retrieve_report_data(struct usb_interface *intf,
rep_data[0] = 12;
result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
- rep_data[0], &rep_data, 2,
+ rep_data[0], rep_data, 2,
WAC_MSG_RETRIES);
if (result >= 0 && rep_data[1] > 2)
@@ -401,7 +401,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
break;
case HID_USAGE_CONTACTMAX:
- wacom_retrieve_report_data(intf, features);
+ /* leave touch_max as is if predefined */
+ if (!features->touch_max)
+ wacom_retrieve_report_data(intf, features);
i++;
break;
}
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index e2482b4..bd4eb42 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -597,7 +597,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
AD7879_TMR(ts->pen_down_acc_interval);
err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), ts);
if (err) {
dev_err(dev, "irq %d busy?\n", ts->irq);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 42e6450..25fd056 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1149,7 +1149,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
goto err_free_object;
error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
- pdata->irqflags, client->dev.driver->name, data);
+ pdata->irqflags | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_free_object;
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index f2d03c0..5c487d2 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -509,7 +509,8 @@ static int __devinit bu21013_probe(struct i2c_client *client,
input_set_drvdata(in_dev, bu21013_data);
error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
- IRQF_TRIGGER_FALLING | IRQF_SHARED,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED |
+ IRQF_ONESHOT,
DRIVER_TP, bu21013_data);
if (error) {
dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 237753a..464f1bf 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -251,7 +251,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
}
err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
- IRQF_TRIGGER_RISING, "touch_reset_key", ts);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "touch_reset_key", ts);
if (err < 0) {
dev_err(&client->dev,
"irq %d busy? error %d\n", client->irq, err);
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 3cd7a83..cf29937 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -620,7 +620,7 @@ static int __devinit mrstouch_probe(struct platform_device *pdev)
MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
- 0, "mrstouch", tsdev);
+ IRQF_ONESHOT, "mrstouch", tsdev);
if (err) {
dev_err(tsdev->dev, "unable to allocate irq\n");
goto err_free_mem;
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 72f6ba3..953b4c1 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -165,7 +165,7 @@ static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
input_set_drvdata(input, tsdata);
error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, tsdata);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 7e74880..368d2c6c 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -297,7 +297,7 @@ static int __devinit tsc_probe(struct platform_device *pdev)
goto error_clk;
}
- error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+ error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
dev_name(dev), ts);
if (error < 0) {
dev_err(ts->dev, "Could not allocate ts irq\n");
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b6adeae..5ce3fa8 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -650,7 +650,8 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
tsc2005_stop_scan(ts);
error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
- IRQF_TRIGGER_RISING, "tsc2005", ts);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "tsc2005", ts);
if (error) {
dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
goto err_free_mem;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a5bee8e..6256263 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
+static struct dma_map_ops amd_iommu_dma_ops;
+
/*
* general struct to manage commands send to an IOMMU
*/
@@ -402,7 +404,7 @@ static void amd_iommu_stats_init(void)
return;
de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
- (u32 *)&amd_iommu_unmap_flush);
+ &amd_iommu_unmap_flush);
amd_iommu_stats_add(&compl_wait);
amd_iommu_stats_add(&cnt_map_single);
@@ -450,12 +452,27 @@ static void dump_command(unsigned long phys_addr)
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
- u32 *event = __evt;
- int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
- int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
- int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
- u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+ int type, devid, domid, flags;
+ volatile u32 *event = __evt;
+ int count = 0;
+ u64 address;
+
+retry:
+ type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ address = (u64)(((u64)event[3]) << 32) | event[2];
+
+ if (type == 0) {
+ /* Did we hit the erratum? */
+ if (++count == LOOP_TIMEOUT) {
+ pr_err("AMD-Vi: No event written to event log\n");
+ return;
+ }
+ udelay(1);
+ goto retry;
+ }
printk(KERN_ERR "AMD-Vi: Event logged [");
@@ -508,6 +525,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
default:
printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
}
+
+ memset(__evt, 0, 4 * sizeof(u32));
}
static void iommu_poll_events(struct amd_iommu *iommu)
@@ -530,26 +549,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
{
struct amd_iommu_fault fault;
- volatile u64 *raw;
- int i;
INC_STATS_COUNTER(pri_requests);
- raw = (u64 *)(iommu->ppr_log + head);
-
- /*
- * Hardware bug: Interrupt may arrive before the entry is written to
- * memory. If this happens we need to wait for the entry to arrive.
- */
- for (i = 0; i < LOOP_TIMEOUT; ++i) {
- if (PPR_REQ_TYPE(raw[0]) != 0)
- break;
- udelay(1);
- }
-
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
return;
@@ -561,12 +566,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
- /*
- * To detect the hardware bug we need to clear the entry
- * to back to zero.
- */
- raw[0] = raw[1] = 0;
-
atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
}
@@ -578,25 +577,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
if (iommu->ppr_log == NULL)
return;
+ /* enable ppr interrupts again */
+ writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
while (head != tail) {
+ volatile u64 *raw;
+ u64 entry[2];
+ int i;
- /* Handle PPR entry */
- iommu_handle_ppr_entry(iommu, head);
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is
+ * written to memory. If this happens we need to wait for the
+ * entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* Avoid memcpy function-call overhead */
+ entry[0] = raw[0];
+ entry[1] = raw[1];
+
+ /*
+ * To detect the hardware bug we need to clear the entry
+ * back to zero.
+ */
+ raw[0] = raw[1] = 0UL;
- /* Update and refresh ring-buffer state*/
+ /* Update head pointer of hardware ring-buffer */
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+ /*
+ * Release iommu->lock because ppr-handling might need to
+ * re-aquire it
+ */
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ /* Handle PPR entry */
+ iommu_handle_ppr_entry(iommu, entry);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ /* Refresh ring-buffer information */
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
- /* enable ppr interrupts again */
- writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -2035,20 +2071,20 @@ out_err:
}
/* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF (1 << 2)
+#define PCI_PRI_TLP_OFF (1 << 15)
bool pci_pri_tlp_required(struct pci_dev *pdev)
{
- u16 control;
+ u16 status;
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- return (control & PCI_PRI_TLP_OFF) ? true : false;
+ return (status & PCI_PRI_TLP_OFF) ? true : false;
}
/*
@@ -2233,6 +2269,13 @@ static int device_change_notifier(struct notifier_block *nb,
list_add_tail(&dma_domain->list, &iommu_pd_list);
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+ dev_data = get_dev_data(dev);
+
+ if (!dev_data->passthrough)
+ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+ else
+ dev->archdata.dma_ops = &nommu_dma_ops;
+
break;
case BUS_NOTIFY_DEL_DEVICE:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index c567903..a33612f 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -129,7 +129,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
-bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
+u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->dev)
return 1;
+ iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
+ PCI_DEVFN(0, 0));
+
iommu->cap_ptr = h->cap_ptr;
iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
{
int i, j;
u32 ioc_feature_control;
- struct pci_dev *pdev = NULL;
+ struct pci_dev *pdev = iommu->root_pdev;
/* RD890 BIOSes may not have completely reconfigured the iommu */
- if (!is_rd890_iommu(iommu->dev))
+ if (!is_rd890_iommu(iommu->dev) || !pdev)
return;
/*
* First, we need to ensure that the iommu is enabled. This is
* controlled by a register in the northbridge
*/
- pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
- if (!pdev)
- return;
/* Select Northbridge indirect register 0x75 and enable writing */
pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
if (!(ioc_feature_control & 0x1))
pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
- pci_dev_put(pdev);
-
/* Restore the iommu BAR */
pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
iommu->stored_addr_lo);
@@ -1644,6 +1641,8 @@ static int __init amd_iommu_init(void)
amd_iommu_init_api();
+ x86_platform.iommu_shutdown = disable_iommus;
+
if (iommu_pass_through)
goto out;
@@ -1652,8 +1651,6 @@ static int __init amd_iommu_init(void)
else
printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
- x86_platform.iommu_shutdown = disable_iommus;
-
out:
return ret;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 2452f3b..c1b1d48 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -481,6 +481,9 @@ struct amd_iommu {
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
+ /* Cache pdev to root device for resume quirks */
+ struct pci_dev *root_pdev;
+
/* physical address of MMIO space */
u64 mmio_phys;
/* virtual address of MMIO space */
@@ -649,7 +652,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
* If true, the addresses will be flushed on unmap time, not when
* they are reused
*/
-extern bool amd_iommu_unmap_flush;
+extern u32 amd_iommu_unmap_flush;
/* Smallest number of PASIDs supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasids;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3a74e44..86e2f4a 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -26,6 +26,8 @@
* These routines are used by both DMA-remapping and Interrupt-remapping
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
+
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
@@ -39,8 +41,6 @@
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
-#define PREFIX "DMAR: "
-
/* No locks are needed as DMA remapping hardware unit
* list is constructed at boot time and hotplug of
* these units are not supported by the architecture.
@@ -83,16 +83,12 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
* ignore it
*/
if (!bus) {
- printk(KERN_WARNING
- PREFIX "Device scope bus [%d] not found\n",
- scope->bus);
+ pr_warn("Device scope bus [%d] not found\n", scope->bus);
break;
}
pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
if (!pdev) {
- printk(KERN_WARNING PREFIX
- "Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, bus->number, path->dev, path->fn);
+ /* warning will be printed below */
break;
}
path ++;
@@ -100,9 +96,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
bus = pdev->subordinate;
}
if (!pdev) {
- printk(KERN_WARNING PREFIX
- "Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, scope->bus, path->dev, path->fn);
+ pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
+ segment, scope->bus, path->dev, path->fn);
*dev = NULL;
return 0;
}
@@ -110,9 +105,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
pdev->subordinate) || (scope->entry_type == \
ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
pci_dev_put(pdev);
- printk(KERN_WARNING PREFIX
- "Device scope type does not match for %s\n",
- pci_name(pdev));
+ pr_warn("Device scope type does not match for %s\n",
+ pci_name(pdev));
return -EINVAL;
}
*dev = pdev;
@@ -134,8 +128,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++;
else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- printk(KERN_WARNING PREFIX
- "Unsupported device scope\n");
+ pr_warn("Unsupported device scope\n");
}
start += scope->length;
}
@@ -261,25 +254,23 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
drhd = container_of(header, struct acpi_dmar_hardware_unit,
header);
- printk (KERN_INFO PREFIX
- "DRHD base: %#016Lx flags: %#x\n",
+ pr_info("DRHD base: %#016Lx flags: %#x\n",
(unsigned long long)drhd->address, drhd->flags);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
rmrr = container_of(header, struct acpi_dmar_reserved_memory,
header);
- printk (KERN_INFO PREFIX
- "RMRR base: %#016Lx end: %#016Lx\n",
+ pr_info("RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
case ACPI_DMAR_TYPE_ATSR:
atsr = container_of(header, struct acpi_dmar_atsr, header);
- printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+ pr_info("ATSR flags: %#x\n", atsr->flags);
break;
case ACPI_DMAR_HARDWARE_AFFINITY:
rhsa = container_of(header, struct acpi_dmar_rhsa, header);
- printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+ pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
(unsigned long long)rhsa->base_address,
rhsa->proximity_domain);
break;
@@ -299,7 +290,7 @@ static int __init dmar_table_detect(void)
&dmar_tbl_size);
if (ACPI_SUCCESS(status) && !dmar_tbl) {
- printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+ pr_warn("Unable to map DMAR\n");
status = AE_NOT_FOUND;
}
@@ -333,20 +324,18 @@ parse_dmar_table(void)
return -ENODEV;
if (dmar->width < PAGE_SHIFT - 1) {
- printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
+ pr_warn("Invalid DMAR haw\n");
return -EINVAL;
}
- printk (KERN_INFO PREFIX "Host address width %d\n",
- dmar->width + 1);
+ pr_info("Host address width %d\n", dmar->width + 1);
entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
- printk(KERN_WARNING PREFIX
- "Invalid 0-length structure\n");
+ pr_warn("Invalid 0-length structure\n");
ret = -EINVAL;
break;
}
@@ -369,8 +358,7 @@ parse_dmar_table(void)
#endif
break;
default:
- printk(KERN_WARNING PREFIX
- "Unknown DMAR structure type %d\n",
+ pr_warn("Unknown DMAR structure type %d\n",
entry_header->type);
ret = 0; /* for forward compatibility */
break;
@@ -469,12 +457,12 @@ int __init dmar_table_init(void)
ret = parse_dmar_table();
if (ret) {
if (ret != -ENODEV)
- printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
+ pr_info("parse DMAR table failure.\n");
return ret;
}
if (list_empty(&dmar_drhd_units)) {
- printk(KERN_INFO PREFIX "No DMAR devices found\n");
+ pr_info("No DMAR devices found\n");
return -ENODEV;
}
@@ -506,8 +494,7 @@ int __init check_zero_address(void)
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
- printk(KERN_WARNING PREFIX
- "Invalid 0-length structure\n");
+ pr_warn("Invalid 0-length structure\n");
return 0;
}
@@ -558,8 +545,7 @@ int __init detect_intel_iommu(void)
if (ret && irq_remapping_enabled && cpu_has_x2apic &&
dmar->flags & 0x1)
- printk(KERN_INFO
- "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+ pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
@@ -579,14 +565,89 @@ int __init detect_intel_iommu(void)
}
+static void unmap_iommu(struct intel_iommu *iommu)
+{
+ iounmap(iommu->reg);
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+}
+
+/**
+ * map_iommu: map the iommu's registers
+ * @iommu: the iommu to map
+ * @phys_addr: the physical address of the base resgister
+ *
+ * Memory map the iommu's registers. Start w/ a single page, and
+ * possibly expand if that turns out to be insufficent.
+ */
+static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
+{
+ int map_size, err=0;
+
+ iommu->reg_phys = phys_addr;
+ iommu->reg_size = VTD_PAGE_SIZE;
+
+ if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
+ pr_err("IOMMU: can't reserve memory\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
+ if (!iommu->reg) {
+ pr_err("IOMMU: can't map the region\n");
+ err = -ENOMEM;
+ goto release;
+ }
+
+ iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
+ iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
+
+ if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
+ err = -EINVAL;
+ warn_invalid_dmar(phys_addr, " returns all ones");
+ goto unmap;
+ }
+
+ /* the registers might be more than one page */
+ map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
+ cap_max_fault_reg_offset(iommu->cap));
+ map_size = VTD_PAGE_ALIGN(map_size);
+ if (map_size > iommu->reg_size) {
+ iounmap(iommu->reg);
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+ iommu->reg_size = map_size;
+ if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
+ iommu->name)) {
+ pr_err("IOMMU: can't reserve memory\n");
+ err = -EBUSY;
+ goto out;
+ }
+ iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
+ if (!iommu->reg) {
+ pr_err("IOMMU: can't map the region\n");
+ err = -ENOMEM;
+ goto release;
+ }
+ }
+ err = 0;
+ goto out;
+
+unmap:
+ iounmap(iommu->reg);
+release:
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+out:
+ return err;
+}
+
int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
- int map_size;
u32 ver;
static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
+ int err;
if (!drhd->reg_base_addr) {
warn_invalid_dmar(0, "");
@@ -600,30 +661,22 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->seq_id = iommu_allocated++;
sprintf (iommu->name, "dmar%d", iommu->seq_id);
- iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
+ err = map_iommu(iommu, drhd->reg_base_addr);
+ if (err) {
+ pr_err("IOMMU: failed to map %s\n", iommu->name);
goto error;
}
- iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
- iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
- if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
- goto err_unmap;
- }
+ err = -EINVAL;
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
- printk(KERN_ERR
- "Cannot get a valid agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
+ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
goto err_unmap;
}
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
- printk(KERN_ERR
- "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
+ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto err_unmap;
}
@@ -632,19 +685,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->node = -1;
- /* the registers might be more than one page */
- map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
- cap_max_fault_reg_offset(iommu->cap));
- map_size = VTD_PAGE_ALIGN(map_size);
- if (map_size > VTD_PAGE_SIZE) {
- iounmap(iommu->reg);
- iommu->reg = ioremap(drhd->reg_base_addr, map_size);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
- goto error;
- }
- }
-
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
iommu->seq_id,
@@ -659,10 +699,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
err_unmap:
- iounmap(iommu->reg);
+ unmap_iommu(iommu);
error:
kfree(iommu);
- return -1;
+ return err;
}
void free_iommu(struct intel_iommu *iommu)
@@ -673,7 +713,8 @@ void free_iommu(struct intel_iommu *iommu)
free_dmar_iommu(iommu);
if (iommu->reg)
- iounmap(iommu->reg);
+ unmap_iommu(iommu);
+
kfree(iommu);
}
@@ -710,7 +751,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> DMAR_IQ_SHIFT) == index) {
- printk(KERN_ERR "VT-d detected invalid descriptor: "
+ pr_err("VT-d detected invalid descriptor: "
"low=%llx, high=%llx\n",
(unsigned long long)qi->desc[index].low,
(unsigned long long)qi->desc[index].high);
@@ -1129,15 +1170,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP)
- printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
+ pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
"fault index %llx\n"
"INTR-REMAP:[fault reason %02d] %s\n",
(source_id >> 8), PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason);
else
- printk(KERN_ERR
- "DMAR:[%s] Request device [%02x:%02x.%d] "
+ pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
"fault addr %llx \n"
"DMAR:[fault reason %02d] %s\n",
(type ? "DMA Read" : "DMA Write"),
@@ -1157,8 +1197,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status)
- printk(KERN_ERR "DRHD: handling fault status reg %x\n",
- fault_status);
+ pr_err("DRHD: handling fault status reg %x\n", fault_status);
/* TBD: ignore advanced fault log currently */
if (!(fault_status & DMA_FSTS_PPF))
@@ -1224,7 +1263,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
irq = create_irq();
if (!irq) {
- printk(KERN_ERR "IOMMU: no free vectors\n");
+ pr_err("IOMMU: no free vectors\n");
return -EINVAL;
}
@@ -1241,7 +1280,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
if (ret)
- printk(KERN_ERR "IOMMU: can't request irq\n");
+ pr_err("IOMMU: can't request irq\n");
return ret;
}
@@ -1258,8 +1297,7 @@ int __init enable_drhd_fault_handling(void)
ret = dmar_set_interrupt(iommu);
if (ret) {
- printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
- " interrupt, ret %d\n",
+ pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
(unsigned long long)drhd->reg_base_addr, ret);
return -1;
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 6d34706..e0b18f3 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -902,7 +902,6 @@ static int intel_setup_ioapic_entry(int irq,
return 0;
}
-#ifdef CONFIG_SMP
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
@@ -924,6 +923,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_cfg *cfg = data->chip_data;
unsigned int dest, irq = data->irq;
struct irte irte;
+ int err;
+
+ if (!config_enabled(CONFIG_SMP))
+ return -EINVAL;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
@@ -931,10 +934,16 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
if (get_irte(irq, &irte))
return -EBUSY;
- if (assign_irq_vector(irq, cfg, mask))
- return -EBUSY;
+ err = assign_irq_vector(irq, cfg, mask);
+ if (err)
+ return err;
- dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
+ err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
+ if (err) {
+ if (assign_irq_vector(irq, cfg, data->affinity))
+ pr_err("Failed to recover vector for irq %d\n", irq);
+ return err;
+ }
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
@@ -956,7 +965,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
cpumask_copy(data->affinity, mask);
return 0;
}
-#endif
static void intel_compose_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
@@ -1058,9 +1066,7 @@ struct irq_remap_ops intel_irq_remap_ops = {
.reenable = reenable_irq_remapping,
.enable_faulting = enable_drhd_fault_handling,
.setup_ioapic_entry = intel_setup_ioapic_entry,
-#ifdef CONFIG_SMP
.set_affinity = intel_ioapic_set_affinity,
-#endif
.free_irq = free_irte,
.compose_msi_msg = intel_compose_msi_msg,
.msi_alloc_irq = intel_msi_alloc_irq,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2198b2d..8b9ded8 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -119,6 +119,7 @@ EXPORT_SYMBOL_GPL(iommu_present);
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
* @handler: fault handler
+ * @token: user data, will be passed back to the fault handler
*
* This function should be used by IOMMU users which want to be notified
* whenever an IOMMU fault happens.
@@ -127,11 +128,13 @@ EXPORT_SYMBOL_GPL(iommu_present);
* error code otherwise.
*/
void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler)
+ iommu_fault_handler_t handler,
+ void *token)
{
BUG_ON(!domain);
domain->handler = handler;
+ domain->handler_token = token;
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 40cda8e..1d29b1c 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -111,16 +111,15 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr);
}
-#ifdef CONFIG_SMP
int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- if (!remap_ops || !remap_ops->set_affinity)
+ if (!config_enabled(CONFIG_SMP) || !remap_ops ||
+ !remap_ops->set_affinity)
return 0;
return remap_ops->set_affinity(data, mask, force);
}
-#endif
void free_remapped_irq(int irq)
{
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index be9d729..b12974c 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -59,11 +59,9 @@ struct irq_remap_ops {
unsigned int, int,
struct io_apic_irq_attr *);
-#ifdef CONFIG_SMP
/* Set the CPU affinity of a remapped interrupt */
int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
bool force);
-#endif
/* Free an IRQ */
int (*free_irq)(int);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6899dcd..e70ee2b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -41,11 +41,13 @@
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
+ * @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
+ struct device *dev;
spinlock_t lock;
};
@@ -1081,6 +1083,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+ omap_domain->dev = dev;
oiommu->domain = domain;
out:
@@ -1088,19 +1091,16 @@ out:
return ret;
}
-static void omap_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
+ struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
-
- spin_lock(&omap_domain->lock);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
dev_err(dev, "invalid iommu device\n");
- goto out;
+ return;
}
iopgtable_clear_entry_all(oiommu);
@@ -1108,8 +1108,16 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
omap_iommu_detach(oiommu);
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+ omap_domain->dev = NULL;
+}
-out:
+static void omap_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+
+ spin_lock(&omap_domain->lock);
+ _omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
@@ -1148,13 +1156,19 @@ out:
return -ENOMEM;
}
-/* assume device was already detached */
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = domain->priv;
domain->priv = NULL;
+ /*
+ * An iommu device is still attached
+ * (currently, only one device can be attached) ?
+ */
+ if (omap_domain->iommu_dev)
+ _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
+
kfree(omap_domain->pgtable);
kfree(omap_domain);
}
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 779306e..0c0a377 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -29,15 +29,17 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iommu.h>
+#include <linux/of.h>
#include <asm/cacheflush.h>
/* bitmap of the page sizes currently supported */
#define GART_IOMMU_PGSIZES (SZ_4K)
-#define GART_CONFIG 0x24
-#define GART_ENTRY_ADDR 0x28
-#define GART_ENTRY_DATA 0x2c
+#define GART_REG_BASE 0x24
+#define GART_CONFIG (0x24 - GART_REG_BASE)
+#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
+#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
#define GART_PAGE_SHIFT 12
@@ -158,7 +160,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct gart_client *client, *c;
int err = 0;
- gart = dev_get_drvdata(dev->parent);
+ gart = gart_handle;
if (!gart)
return -EINVAL;
domain->priv = gart;
@@ -422,6 +424,14 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
.resume = tegra_gart_resume,
};
+#ifdef CONFIG_OF
+static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-gart", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
+#endif
+
static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
.remove = tegra_gart_remove,
@@ -429,6 +439,7 @@ static struct platform_driver tegra_gart_driver = {
.owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
+ .of_match_table = of_match_ptr(tegra_gart_of_match),
},
};
@@ -448,4 +459,5 @@ module_exit(tegra_gart_exit);
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_ALIAS("platform:tegra-gart");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index eb93c82..3f3d09d5 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -550,13 +550,13 @@ static int alloc_pdir(struct smmu_as *as)
return 0;
as->pte_count = devm_kzalloc(smmu->dev,
- sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
+ sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
if (!as->pte_count) {
dev_err(smmu->dev,
"failed to allocate smmu_device PTE cunters\n");
return -ENOMEM;
}
- as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
+ as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
if (!as->pdir_page) {
dev_err(smmu->dev,
"failed to allocate smmu_device page directory\n");
@@ -733,7 +733,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
}
- dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
+ dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
return 0;
err_client:
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 27e4a3e..68452b7 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -288,6 +288,7 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
* format CAPI IE as string
*/
+#ifdef CONFIG_GIGASET_DEBUG
static const char *format_ie(const char *ie)
{
static char result[3 * MAX_FMT_IE_LEN];
@@ -313,6 +314,7 @@ static const char *format_ie(const char *ie)
*--pout = 0;
return result;
}
+#endif
/*
* emit DATA_B3_CONF message
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index c65c344..114f3bc 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -2084,13 +2084,21 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* create the control pipes needed for register access */
hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0);
hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0);
+
+ driver_info = (struct hfcsusb_vdata *)
+ hfcsusb_idtab[vend_idx].driver_info;
+
hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!hw->ctrl_urb) {
+ pr_warn("%s: No memory for control urb\n",
+ driver_info->vend_name);
+ kfree(hw);
+ return -ENOMEM;
+ }
- driver_info =
- (struct hfcsusb_vdata *)hfcsusb_idtab[vend_idx].driver_info;
- printk(KERN_DEBUG "%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
- hw->name, __func__, driver_info->vend_name,
- conf_str[small_match], ifnum, alt_used);
+ pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
+ hw->name, __func__, driver_info->vend_name,
+ conf_str[small_match], ifnum, alt_used);
if (setup_instance(hw, dev->dev.parent))
return -EIO;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 84f9c810..849a807 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -1483,13 +1483,21 @@ hfc_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_rcvctrlpipe(context->dev, 0);
context->ctrl_out_pipe =
usb_sndctrlpipe(context->dev, 0);
+
+ driver_info = (hfcsusb_vdata *)
+ hfcusb_idtab[vend_idx].driver_info;
+
context->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
- driver_info =
- (hfcsusb_vdata *) hfcusb_idtab[vend_idx].
- driver_info;
- printk(KERN_INFO "HFC-S USB: detected \"%s\"\n",
- driver_info->vend_name);
+ if (!context->ctrl_urb) {
+ pr_warn("%s: No memory for control urb\n",
+ driver_info->vend_name);
+ kfree(context);
+ return -ENOMEM;
+ }
+
+ pr_info("HFC-S USB: detected \"%s\"\n",
+ driver_info->vend_name);
DBG(HFCUSB_DBG_INIT,
"HFC-S USB: Endpoint-Config: %s (if=%d alt=%d), E-Channel(%d)",
diff --git a/drivers/isdn/hisax/isurf.c b/drivers/isdn/hisax/isurf.c
index ea27172..c1530fe 100644
--- a/drivers/isdn/hisax/isurf.c
+++ b/drivers/isdn/hisax/isurf.c
@@ -231,6 +231,11 @@ setup_isurf(struct IsdnCard *card)
}
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
+ if (err < 0) {
+ pr_warn("%s: pnp_activate_dev ret=%d\n",
+ __func__, err);
+ return 0;
+ }
cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
cs->irq = pnp_irq(pnp_d, 0);
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 1a0ae44..5f21f629 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
skb = NULL;
else if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
- "%s ch%d mgr prim(%x) addr(%x) err %d\n",
- __func__, ch->nr, hh->prim, ch->addr, ret);
+ "%s mgr prim(%x) err %d\n",
+ __func__, hh->prim, ret);
}
out:
mutex_unlock(&st->lmutex);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff4b8cf..12b2b55 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,19 @@ config LEDS_LM3530
controlled manually or using PWM input or using ambient
light automatically.
+config LEDS_LM3533
+ tristate "LED support for LM3533"
+ depends on LEDS_CLASS
+ depends on MFD_LM3533
+ help
+ This option enables support for the LEDs on National Semiconductor /
+ TI LM3533 Lighting Power chips.
+
+ The LEDs can be controlled directly, through PWM input, or by the
+ ambient-light-sensor interface. The chip supports
+ hardware-accelerated blinking with maximum on and off periods of 9.8
+ and 77 seconds respectively.
+
config LEDS_LOCOMO
tristate "LED Support for Locomo device"
depends on LEDS_CLASS
@@ -259,6 +272,14 @@ config LEDS_DA903X
This option enables support for on-chip LED drivers found
on Dialog Semiconductor DA9030/DA9034 PMICs.
+config LEDS_DA9052
+ tristate "Dialog DA9052/DA9053 LEDS"
+ depends on LEDS_CLASS
+ depends on PMIC_DA9052
+ help
+ This option enables support for on-chip LED drivers found
+ on Dialog Semiconductor DA9052-BC and DA9053-AA/Bx PMICs.
+
config LEDS_DAC124S085
tristate "LED Support for DAC124S085 SPI DAC"
depends on LEDS_CLASS
@@ -358,7 +379,7 @@ config LEDS_NETXBIG
config LEDS_ASIC3
bool "LED support for the HTC ASIC3"
- depends on LEDS_CLASS
+ depends on LEDS_CLASS=y
depends on MFD_ASIC3
default y
help
@@ -369,7 +390,7 @@ config LEDS_ASIC3
config LEDS_RENESAS_TPU
bool "LED support for Renesas TPU"
- depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO
+ depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
help
This option enables build of the LED TPU platform driver,
suitable to drive any TPU channel on newer Renesas SoCs.
@@ -471,4 +492,12 @@ config LEDS_TRIGGER_DEFAULT_ON
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
+config LEDS_TRIGGER_TRANSIENT
+ tristate "LED Transient Trigger"
+ depends on LEDS_TRIGGERS
+ help
+ This allows one time activation of a transient state on
+ GPIO/PWM based hadrware.
+ If unsure, say Y.
+
endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 890481c..f8958cd 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
+obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_PCA9633) += leds-pca9633.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
+obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
@@ -56,3 +58,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 5bff843..e663e6f 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
led_cdev->brightness = led_cdev->brightness_get(led_cdev);
}
-static ssize_t led_brightness_show(struct device *dev,
+static ssize_t led_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -44,23 +44,18 @@ static ssize_t led_brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ unsigned long state;
ssize_t ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
- if (isspace(*after))
- count++;
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
- if (count == size) {
- ret = count;
+ if (state == LED_OFF)
+ led_trigger_remove(led_cdev);
+ led_set_brightness(led_cdev, state);
- if (state == LED_OFF)
- led_trigger_remove(led_cdev);
- led_set_brightness(led_cdev, state);
- }
-
- return ret;
+ return size;
}
static ssize_t led_max_brightness_show(struct device *dev,
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index d686004..d65353d 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
if (!led_cdev->blink_brightness)
led_cdev->blink_brightness = led_cdev->max_brightness;
- if (led_get_trigger_data(led_cdev) &&
- delay_on == led_cdev->blink_delay_on &&
- delay_off == led_cdev->blink_delay_off)
- return;
-
- led_stop_software_blink(led_cdev);
-
led_cdev->blink_delay_on = delay_on;
led_cdev->blink_delay_off = delay_off;
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
new file mode 100644
index 0000000..58a5244
--- /dev/null
+++ b/drivers/leds/leds-da9052.c
@@ -0,0 +1,214 @@
+/*
+ * LED Driver for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+
+#define DA9052_OPENDRAIN_OUTPUT 2
+#define DA9052_SET_HIGH_LVL_OUTPUT (1 << 3)
+#define DA9052_MASK_UPPER_NIBBLE 0xF0
+#define DA9052_MASK_LOWER_NIBBLE 0x0F
+#define DA9052_NIBBLE_SHIFT 4
+#define DA9052_MAX_BRIGHTNESS 0x5f
+
+struct da9052_led {
+ struct led_classdev cdev;
+ struct work_struct work;
+ struct da9052 *da9052;
+ unsigned char led_index;
+ unsigned char id;
+ int brightness;
+};
+
+static unsigned char led_reg[] = {
+ DA9052_LED_CONT_4_REG,
+ DA9052_LED_CONT_5_REG,
+};
+
+static int da9052_set_led_brightness(struct da9052_led *led)
+{
+ u8 val;
+ int error;
+
+ val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM;
+
+ error = da9052_reg_write(led->da9052, led_reg[led->led_index], val);
+ if (error < 0)
+ dev_err(led->da9052->dev, "Failed to set led brightness, %d\n",
+ error);
+ return error;
+}
+
+static void da9052_led_work(struct work_struct *work)
+{
+ struct da9052_led *led = container_of(work, struct da9052_led, work);
+
+ da9052_set_led_brightness(led);
+}
+
+static void da9052_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct da9052_led *led;
+
+ led = container_of(led_cdev, struct da9052_led, cdev);
+ led->brightness = value;
+ schedule_work(&led->work);
+}
+
+static int da9052_configure_leds(struct da9052 *da9052)
+{
+ int error;
+ unsigned char register_value = DA9052_OPENDRAIN_OUTPUT
+ | DA9052_SET_HIGH_LVL_OUTPUT;
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+ DA9052_MASK_LOWER_NIBBLE,
+ register_value);
+
+ if (error < 0) {
+ dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+ error);
+ return error;
+ }
+
+ error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
+ DA9052_MASK_UPPER_NIBBLE,
+ register_value << DA9052_NIBBLE_SHIFT);
+ if (error < 0)
+ dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
+ error);
+
+ return error;
+}
+
+static int __devinit da9052_led_probe(struct platform_device *pdev)
+{
+ struct da9052_pdata *pdata;
+ struct da9052 *da9052;
+ struct led_platform_data *pled;
+ struct da9052_led *led = NULL;
+ int error = -ENODEV;
+ int i;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data\n");
+ goto err;
+ }
+
+ pled = pdata->pled;
+ if (pled == NULL) {
+ dev_err(&pdev->dev, "No platform data for LED\n");
+ goto err;
+ }
+
+ led = devm_kzalloc(&pdev->dev,
+ sizeof(struct da9052_led) * pled->num_leds,
+ GFP_KERNEL);
+ if (led == NULL) {
+ dev_err(&pdev->dev, "Failed to alloc memory\n");
+ error = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < pled->num_leds; i++) {
+ led[i].cdev.name = pled->leds[i].name;
+ led[i].cdev.brightness_set = da9052_led_set;
+ led[i].cdev.brightness = LED_OFF;
+ led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS;
+ led[i].brightness = LED_OFF;
+ led[i].led_index = pled->leds[i].flags;
+ led[i].da9052 = dev_get_drvdata(pdev->dev.parent);
+ INIT_WORK(&led[i].work, da9052_led_work);
+
+ error = led_classdev_register(pdev->dev.parent, &led[i].cdev);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to register led %d\n",
+ led[i].led_index);
+ goto err_register;
+ }
+
+ error = da9052_set_led_brightness(&led[i]);
+ if (error) {
+ dev_err(&pdev->dev, "Unable to init led %d\n",
+ led[i].led_index);
+ continue;
+ }
+ }
+ error = da9052_configure_leds(led->da9052);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error);
+ goto err_register;
+ }
+
+ platform_set_drvdata(pdev, led);
+
+ return 0;
+
+err_register:
+ for (i = i - 1; i >= 0; i--) {
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+err:
+ return error;
+}
+
+static int __devexit da9052_led_remove(struct platform_device *pdev)
+{
+ struct da9052_led *led = platform_get_drvdata(pdev);
+ struct da9052_pdata *pdata;
+ struct da9052 *da9052;
+ struct led_platform_data *pled;
+ int i;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ pled = pdata->pled;
+
+ for (i = 0; i < pled->num_leds; i++) {
+ led[i].brightness = 0;
+ da9052_set_led_brightness(&led[i]);
+ led_classdev_unregister(&led[i].cdev);
+ cancel_work_sync(&led[i].work);
+ }
+
+ return 0;
+}
+
+static struct platform_driver da9052_led_driver = {
+ .driver = {
+ .name = "da9052-leds",
+ .owner = THIS_MODULE,
+ },
+ .probe = da9052_led_probe,
+ .remove = __devexit_p(da9052_led_remove),
+};
+
+module_platform_driver(da9052_led_driver);
+
+MODULE_AUTHOR("Dialog Semiconductor Ltd <dchen@diasemi.com>");
+MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 968fd5f..84ba6de 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -113,6 +113,18 @@ struct lm3530_data {
bool enable;
};
+/*
+ * struct lm3530_als_data
+ * @config : value of ALS configuration register
+ * @imp_sel : value of ALS resistor select register
+ * @zone : values of ALS ZB(Zone Boundary) registers
+ */
+struct lm3530_als_data {
+ u8 config;
+ u8 imp_sel;
+ u8 zones[LM3530_ALS_ZB_MAX];
+};
+
static const u8 lm3530_reg[LM3530_REG_MAX] = {
LM3530_GEN_CONFIG,
LM3530_ALS_CONFIG,
@@ -141,29 +153,65 @@ static int lm3530_get_mode_from_str(const char *str)
return -1;
}
+static void lm3530_als_configure(struct lm3530_platform_data *pdata,
+ struct lm3530_als_data *als)
+{
+ int i;
+ u32 als_vmin, als_vmax, als_vstep;
+
+ if (pdata->als_vmax == 0) {
+ pdata->als_vmin = 0;
+ pdata->als_vmax = LM3530_ALS_WINDOW_mV;
+ }
+
+ als_vmin = pdata->als_vmin;
+ als_vmax = pdata->als_vmax;
+
+ if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
+ pdata->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV;
+
+ /* n zone boundary makes n+1 zones */
+ als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
+
+ for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
+ als->zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
+ als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
+
+ als->config =
+ (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
+ (LM3530_ENABLE_ALS) |
+ (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
+
+ als->imp_sel =
+ (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
+ (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
+}
+
static int lm3530_init_registers(struct lm3530_data *drvdata)
{
int ret = 0;
int i;
u8 gen_config;
- u8 als_config = 0;
u8 brt_ramp;
- u8 als_imp_sel = 0;
u8 brightness;
u8 reg_val[LM3530_REG_MAX];
- u8 zones[LM3530_ALS_ZB_MAX];
- u32 als_vmin, als_vmax, als_vstep;
struct lm3530_platform_data *pdata = drvdata->pdata;
struct i2c_client *client = drvdata->client;
struct lm3530_pwm_data *pwm = &pdata->pwm_data;
+ struct lm3530_als_data als;
+
+ memset(&als, 0, sizeof(struct lm3530_als_data));
gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
switch (drvdata->mode) {
case LM3530_BL_MODE_MANUAL:
+ gen_config |= LM3530_ENABLE_I2C;
+ break;
case LM3530_BL_MODE_ALS:
gen_config |= LM3530_ENABLE_I2C;
+ lm3530_als_configure(pdata, &als);
break;
case LM3530_BL_MODE_PWM:
gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
@@ -171,38 +219,6 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
break;
}
- if (drvdata->mode == LM3530_BL_MODE_ALS) {
- if (pdata->als_vmax == 0) {
- pdata->als_vmin = 0;
- pdata->als_vmax = LM3530_ALS_WINDOW_mV;
- }
-
- als_vmin = pdata->als_vmin;
- als_vmax = pdata->als_vmax;
-
- if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
- pdata->als_vmax = als_vmax =
- als_vmin + LM3530_ALS_WINDOW_mV;
-
- /* n zone boundary makes n+1 zones */
- als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
-
- for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
- zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
- als_vstep + (i * als_vstep)) * LED_FULL)
- / 1000;
-
- als_config =
- (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
- (LM3530_ENABLE_ALS) |
- (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
-
- als_imp_sel =
- (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
- (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
-
- }
-
brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
(pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
@@ -215,14 +231,14 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
brightness = drvdata->led_dev.max_brightness;
reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
- reg_val[1] = als_config; /* LM3530_ALS_CONFIG */
+ reg_val[1] = als.config; /* LM3530_ALS_CONFIG */
reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
- reg_val[3] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */
+ reg_val[3] = als.imp_sel; /* LM3530_ALS_IMP_SELECT */
reg_val[4] = brightness; /* LM3530_BRT_CTRL_REG */
- reg_val[5] = zones[0]; /* LM3530_ALS_ZB0_REG */
- reg_val[6] = zones[1]; /* LM3530_ALS_ZB1_REG */
- reg_val[7] = zones[2]; /* LM3530_ALS_ZB2_REG */
- reg_val[8] = zones[3]; /* LM3530_ALS_ZB3_REG */
+ reg_val[5] = als.zones[0]; /* LM3530_ALS_ZB0_REG */
+ reg_val[6] = als.zones[1]; /* LM3530_ALS_ZB1_REG */
+ reg_val[7] = als.zones[2]; /* LM3530_ALS_ZB2_REG */
+ reg_val[8] = als.zones[3]; /* LM3530_ALS_ZB3_REG */
reg_val[9] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
reg_val[10] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
reg_val[11] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
new file mode 100644
index 0000000..f56b6e7
--- /dev/null
+++ b/drivers/leds/leds-lm3533.c
@@ -0,0 +1,785 @@
+/*
+ * leds-lm3533.c -- LM3533 LED driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/leds.h>
+#include <linux/mfd/core.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_LVCTRLBANK_MIN 2
+#define LM3533_LVCTRLBANK_MAX 5
+#define LM3533_LVCTRLBANK_COUNT 4
+#define LM3533_RISEFALLTIME_MAX 7
+#define LM3533_ALS_CHANNEL_LV_MIN 1
+#define LM3533_ALS_CHANNEL_LV_MAX 2
+
+#define LM3533_REG_CTRLBANK_BCONF_BASE 0x1b
+#define LM3533_REG_PATTERN_ENABLE 0x28
+#define LM3533_REG_PATTERN_LOW_TIME_BASE 0x71
+#define LM3533_REG_PATTERN_HIGH_TIME_BASE 0x72
+#define LM3533_REG_PATTERN_RISETIME_BASE 0x74
+#define LM3533_REG_PATTERN_FALLTIME_BASE 0x75
+
+#define LM3533_REG_PATTERN_STEP 0x10
+
+#define LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK 0x04
+#define LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK 0x02
+#define LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK 0x01
+
+#define LM3533_LED_FLAG_PATTERN_ENABLE 1
+
+
+struct lm3533_led {
+ struct lm3533 *lm3533;
+ struct lm3533_ctrlbank cb;
+ struct led_classdev cdev;
+ int id;
+
+ struct mutex mutex;
+ unsigned long flags;
+
+ struct work_struct work;
+ u8 new_brightness;
+};
+
+
+static inline struct lm3533_led *to_lm3533_led(struct led_classdev *cdev)
+{
+ return container_of(cdev, struct lm3533_led, cdev);
+}
+
+static inline int lm3533_led_get_ctrlbank_id(struct lm3533_led *led)
+{
+ return led->id + 2;
+}
+
+static inline u8 lm3533_led_get_lv_reg(struct lm3533_led *led, u8 base)
+{
+ return base + led->id;
+}
+
+static inline u8 lm3533_led_get_pattern(struct lm3533_led *led)
+{
+ return led->id;
+}
+
+static inline u8 lm3533_led_get_pattern_reg(struct lm3533_led *led,
+ u8 base)
+{
+ return base + lm3533_led_get_pattern(led) * LM3533_REG_PATTERN_STEP;
+}
+
+static int lm3533_led_pattern_enable(struct lm3533_led *led, int enable)
+{
+ u8 mask;
+ u8 val;
+ int pattern;
+ int state;
+ int ret = 0;
+
+ dev_dbg(led->cdev.dev, "%s - %d\n", __func__, enable);
+
+ mutex_lock(&led->mutex);
+
+ state = test_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+ if ((enable && state) || (!enable && !state))
+ goto out;
+
+ pattern = lm3533_led_get_pattern(led);
+ mask = 1 << (2 * pattern);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, LM3533_REG_PATTERN_ENABLE, val, mask);
+ if (ret) {
+ dev_err(led->cdev.dev, "failed to enable pattern %d (%d)\n",
+ pattern, enable);
+ goto out;
+ }
+
+ __change_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
+out:
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+static void lm3533_led_work(struct work_struct *work)
+{
+ struct lm3533_led *led = container_of(work, struct lm3533_led, work);
+
+ dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness);
+
+ if (led->new_brightness == 0)
+ lm3533_led_pattern_enable(led, 0); /* disable blink */
+
+ lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness);
+}
+
+static void lm3533_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+
+ dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value);
+
+ led->new_brightness = value;
+ schedule_work(&led->work);
+}
+
+static enum led_brightness lm3533_led_get(struct led_classdev *cdev)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_brightness(&led->cb, &val);
+ if (ret)
+ return ret;
+
+ dev_dbg(led->cdev.dev, "%s - %u\n", __func__, val);
+
+ return val;
+}
+
+/* Pattern generator defines (delays in us). */
+#define LM3533_LED_DELAY1_VMIN 0x00
+#define LM3533_LED_DELAY2_VMIN 0x3d
+#define LM3533_LED_DELAY3_VMIN 0x80
+
+#define LM3533_LED_DELAY1_VMAX (LM3533_LED_DELAY2_VMIN - 1)
+#define LM3533_LED_DELAY2_VMAX (LM3533_LED_DELAY3_VMIN - 1)
+#define LM3533_LED_DELAY3_VMAX 0xff
+
+#define LM3533_LED_DELAY1_TMIN 16384U
+#define LM3533_LED_DELAY2_TMIN 1130496U
+#define LM3533_LED_DELAY3_TMIN 10305536U
+
+#define LM3533_LED_DELAY1_TMAX 999424U
+#define LM3533_LED_DELAY2_TMAX 9781248U
+#define LM3533_LED_DELAY3_TMAX 76890112U
+
+/* t_step = (t_max - t_min) / (v_max - v_min) */
+#define LM3533_LED_DELAY1_TSTEP 16384
+#define LM3533_LED_DELAY2_TSTEP 131072
+#define LM3533_LED_DELAY3_TSTEP 524288
+
+/* Delay limits for hardware accelerated blinking (in ms). */
+#define LM3533_LED_DELAY_ON_MAX \
+ ((LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY2_TSTEP / 2) / 1000)
+#define LM3533_LED_DELAY_OFF_MAX \
+ ((LM3533_LED_DELAY3_TMAX + LM3533_LED_DELAY3_TSTEP / 2) / 1000)
+
+/*
+ * Returns linear map of *t from [t_min,t_max] to [v_min,v_max] with a step
+ * size of t_step, where
+ *
+ * t_step = (t_max - t_min) / (v_max - v_min)
+ *
+ * and updates *t to reflect the mapped value.
+ */
+static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step,
+ u8 v_min, u8 v_max)
+{
+ unsigned val;
+
+ val = (*t + t_step / 2 - t_min) / t_step + v_min;
+
+ *t = t_step * (val - v_min) + t_min;
+
+ return (u8)val;
+}
+
+/*
+ * Returns time code corresponding to *delay (in ms) and updates *delay to
+ * reflect actual hardware delay.
+ *
+ * Hardware supports 256 discrete delay times, divided into three groups with
+ * the following ranges and step-sizes:
+ *
+ * [ 16, 999] [0x00, 0x3e] step 16 ms
+ * [ 1130, 9781] [0x3d, 0x7f] step 131 ms
+ * [10306, 76890] [0x80, 0xff] step 524 ms
+ *
+ * Note that delay group 3 is only available for delay_off.
+ */
+static u8 lm3533_led_get_hw_delay(unsigned *delay)
+{
+ unsigned t;
+ u8 val;
+
+ t = *delay * 1000;
+
+ if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) {
+ t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY3_TMIN,
+ LM3533_LED_DELAY3_TSTEP,
+ LM3533_LED_DELAY3_VMIN,
+ LM3533_LED_DELAY3_VMAX);
+ } else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) {
+ t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY2_TMIN,
+ LM3533_LED_DELAY2_TSTEP,
+ LM3533_LED_DELAY2_VMIN,
+ LM3533_LED_DELAY2_VMAX);
+ } else {
+ t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX);
+ val = time_to_val(&t, LM3533_LED_DELAY1_TMIN,
+ LM3533_LED_DELAY1_TSTEP,
+ LM3533_LED_DELAY1_VMIN,
+ LM3533_LED_DELAY1_VMAX);
+ }
+
+ *delay = (t + 500) / 1000;
+
+ return val;
+}
+
+/*
+ * Set delay register base to *delay (in ms) and update *delay to reflect
+ * actual hardware delay used.
+ */
+static u8 lm3533_led_delay_set(struct lm3533_led *led, u8 base,
+ unsigned long *delay)
+{
+ unsigned t;
+ u8 val;
+ u8 reg;
+ int ret;
+
+ t = (unsigned)*delay;
+
+ /* Delay group 3 is only available for low time (delay off). */
+ if (base != LM3533_REG_PATTERN_LOW_TIME_BASE)
+ t = min(t, LM3533_LED_DELAY2_TMAX / 1000);
+
+ val = lm3533_led_get_hw_delay(&t);
+
+ dev_dbg(led->cdev.dev, "%s - %lu: %u (0x%02x)\n", __func__,
+ *delay, t, val);
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_write(led->lm3533, reg, val);
+ if (ret)
+ dev_err(led->cdev.dev, "failed to set delay (%02x)\n", reg);
+
+ *delay = t;
+
+ return ret;
+}
+
+static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t)
+{
+ return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t);
+}
+
+static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t)
+{
+ return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t);
+}
+
+static int lm3533_led_blink_set(struct led_classdev *cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct lm3533_led *led = to_lm3533_led(cdev);
+ int ret;
+
+ dev_dbg(led->cdev.dev, "%s - on = %lu, off = %lu\n", __func__,
+ *delay_on, *delay_off);
+
+ if (*delay_on > LM3533_LED_DELAY_ON_MAX ||
+ *delay_off > LM3533_LED_DELAY_OFF_MAX)
+ return -EINVAL;
+
+ if (*delay_on == 0 && *delay_off == 0) {
+ *delay_on = 500;
+ *delay_off = 500;
+ }
+
+ ret = lm3533_led_delay_on_set(led, delay_on);
+ if (ret)
+ return ret;
+
+ ret = lm3533_led_delay_off_set(led, delay_off);
+ if (ret)
+ return ret;
+
+ return lm3533_led_pattern_enable(led, 1);
+}
+
+static ssize_t show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", led->id);
+}
+
+/*
+ * Pattern generator rise/fall times:
+ *
+ * 0 - 2048 us (default)
+ * 1 - 262 ms
+ * 2 - 524 ms
+ * 3 - 1.049 s
+ * 4 - 2.097 s
+ * 5 - 4.194 s
+ * 6 - 8.389 s
+ * 7 - 16.78 s
+ */
+static ssize_t show_risefalltime(struct device *dev,
+ struct device_attribute *attr,
+ char *buf, u8 base)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ ssize_t ret;
+ u8 reg;
+ u8 val;
+
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", val);
+}
+
+static ssize_t show_risetime(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_risefalltime(dev, attr, buf,
+ LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t show_falltime(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return show_risefalltime(dev, attr, buf,
+ LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t store_risefalltime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len, u8 base)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ u8 reg;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val) || val > LM3533_RISEFALLTIME_MAX)
+ return -EINVAL;
+
+ reg = lm3533_led_get_pattern_reg(led, base);
+ ret = lm3533_write(led->lm3533, reg, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t store_risetime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_risefalltime(dev, attr, buf, len,
+ LM3533_REG_PATTERN_RISETIME_BASE);
+}
+
+static ssize_t store_falltime(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return store_risefalltime(dev, attr, buf, len,
+ LM3533_REG_PATTERN_FALLTIME_BASE);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned channel;
+ u8 reg;
+ u8 val;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t store_als_channel(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned channel;
+ u8 reg;
+ u8 val;
+ u8 mask;
+ int ret;
+
+ if (kstrtouint(buf, 0, &channel))
+ return -EINVAL;
+
+ if (channel < LM3533_ALS_CHANNEL_LV_MIN ||
+ channel > LM3533_ALS_CHANNEL_LV_MAX)
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK;
+ val = channel - 1;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_als_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ bool enable;
+ u8 reg;
+ u8 val;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned enable;
+ u8 reg;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtouint(buf, 0, &enable))
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 reg;
+ u8 val;
+ int linear;
+ int ret;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ ret = lm3533_read(led->lm3533, reg, &val);
+ if (ret)
+ return ret;
+
+ if (val & LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK)
+ linear = 1;
+ else
+ linear = 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ unsigned long linear;
+ u8 reg;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &linear))
+ return -EINVAL;
+
+ reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
+ mask = LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK;
+
+ if (linear)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(led->lm3533, reg, val, mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_pwm(&led->cb, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ ret = lm3533_ctrlbank_set_pwm(&led->cb, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static LM3533_ATTR_RW(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RW(falltime);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+static LM3533_ATTR_RW(risetime);
+
+static struct attribute *lm3533_led_attributes[] = {
+ &dev_attr_als_channel.attr,
+ &dev_attr_als_en.attr,
+ &dev_attr_falltime.attr,
+ &dev_attr_id.attr,
+ &dev_attr_linear.attr,
+ &dev_attr_pwm.attr,
+ &dev_attr_risetime.attr,
+ NULL,
+};
+
+static umode_t lm3533_led_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct lm3533_led *led = to_lm3533_led(led_cdev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_als_channel.attr ||
+ attr == &dev_attr_als_en.attr) {
+ if (!led->lm3533->have_als)
+ mode = 0;
+ }
+
+ return mode;
+};
+
+static struct attribute_group lm3533_led_attribute_group = {
+ .is_visible = lm3533_led_attr_is_visible,
+ .attrs = lm3533_led_attributes
+};
+
+static int __devinit lm3533_led_setup(struct lm3533_led *led,
+ struct lm3533_led_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current);
+ if (ret)
+ return ret;
+
+ return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_led_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_led_platform_data *pdata;
+ struct lm3533_led *led;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdev->id < 0 || pdev->id >= LM3533_LVCTRLBANK_COUNT) {
+ dev_err(&pdev->dev, "illegal LED id %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
+ if (!led)
+ return -ENOMEM;
+
+ led->lm3533 = lm3533;
+ led->cdev.name = pdata->name;
+ led->cdev.default_trigger = pdata->default_trigger;
+ led->cdev.brightness_set = lm3533_led_set;
+ led->cdev.brightness_get = lm3533_led_get;
+ led->cdev.blink_set = lm3533_led_blink_set;
+ led->cdev.brightness = LED_OFF;
+ led->id = pdev->id;
+
+ mutex_init(&led->mutex);
+ INIT_WORK(&led->work, lm3533_led_work);
+
+ /* The class framework makes a callback to get brightness during
+ * registration so use parent device (for error reporting) until
+ * registered.
+ */
+ led->cb.lm3533 = lm3533;
+ led->cb.id = lm3533_led_get_ctrlbank_id(led);
+ led->cb.dev = lm3533->dev;
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(pdev->dev.parent, &led->cdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
+ return ret;
+ }
+
+ led->cb.dev = led->cdev.dev;
+
+ ret = sysfs_create_group(&led->cdev.dev->kobj,
+ &lm3533_led_attribute_group);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ ret = lm3533_led_setup(led, pdata);
+ if (ret)
+ goto err_sysfs_remove;
+
+ ret = lm3533_ctrlbank_enable(&led->cb);
+ if (ret)
+ goto err_sysfs_remove;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+err_unregister:
+ led_classdev_unregister(&led->cdev);
+ flush_work_sync(&led->work);
+
+ return ret;
+}
+
+static int __devexit lm3533_led_remove(struct platform_device *pdev)
+{
+ struct lm3533_led *led = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
+ sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
+ led_classdev_unregister(&led->cdev);
+ flush_work_sync(&led->work);
+
+ return 0;
+}
+
+static void lm3533_led_shutdown(struct platform_device *pdev)
+{
+
+ struct lm3533_led *led = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&led->cb);
+ lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */
+ flush_work_sync(&led->work);
+}
+
+static struct platform_driver lm3533_led_driver = {
+ .driver = {
+ .name = "lm3533-leds",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm3533_led_probe,
+ .remove = __devexit_p(lm3533_led_remove),
+ .shutdown = lm3533_led_shutdown,
+};
+module_platform_driver(lm3533_led_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-leds");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 410a723..2381562 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -193,9 +193,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
/* move current engine to direct mode and remember the state */
ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+ if (ret)
+ return ret;
+
/* Mode change requires min 500 us delay. 1 - 2 ms with margin */
usleep_range(1000, 2000);
- ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+ ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+ if (ret)
+ return ret;
/* For loading, all the engines to load mode */
lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
@@ -211,8 +216,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
LP5521_PROG_MEM_SIZE,
pattern);
- ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
- return ret;
+ return lp5521_write(client, LP5521_REG_OP_MODE, mode);
}
static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
@@ -785,7 +789,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
* LP5521_REG_ENABLE register will not have any effect - strange!
*/
ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
- if (buf != LP5521_REG_R_CURR_DEFAULT) {
+ if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
dev_err(&client->dev, "error in resetting chip\n");
goto fail2;
}
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 8bc4915..4cc6a2e 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
return -EINVAL;
}
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
dev_err(&pdev->dev, "failed to alloc memory\n");
return -ENOMEM;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index dcc3bc3..5f462db 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -101,11 +101,16 @@ static const struct i2c_device_id pca955x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca955x_id);
-struct pca955x_led {
+struct pca955x {
+ struct mutex lock;
+ struct pca955x_led *leds;
struct pca955x_chipdef *chipdef;
struct i2c_client *client;
+};
+
+struct pca955x_led {
+ struct pca955x *pca955x;
struct work_struct work;
- spinlock_t lock;
enum led_brightness brightness;
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
@@ -140,7 +145,7 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
*/
static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
@@ -156,7 +161,7 @@ static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
*/
static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
@@ -169,7 +174,7 @@ static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
*/
static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
i2c_smbus_write_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
@@ -182,7 +187,7 @@ static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
*/
static u8 pca955x_read_ls(struct i2c_client *client, int n)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
return (u8) i2c_smbus_read_byte_data(client,
pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
@@ -190,18 +195,23 @@ static u8 pca955x_read_ls(struct i2c_client *client, int n)
static void pca955x_led_work(struct work_struct *work)
{
- struct pca955x_led *pca955x;
+ struct pca955x_led *pca955x_led;
+ struct pca955x *pca955x;
u8 ls;
int chip_ls; /* which LSx to use (0-3 potentially) */
int ls_led; /* which set of bits within LSx to use (0-3) */
- pca955x = container_of(work, struct pca955x_led, work);
- chip_ls = pca955x->led_num / 4;
- ls_led = pca955x->led_num % 4;
+ pca955x_led = container_of(work, struct pca955x_led, work);
+ pca955x = pca955x_led->pca955x;
+
+ chip_ls = pca955x_led->led_num / 4;
+ ls_led = pca955x_led->led_num % 4;
+
+ mutex_lock(&pca955x->lock);
ls = pca955x_read_ls(pca955x->client, chip_ls);
- switch (pca955x->brightness) {
+ switch (pca955x_led->brightness) {
case LED_FULL:
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
break;
@@ -219,12 +229,15 @@ static void pca955x_led_work(struct work_struct *work)
* OFF, HALF, or FULL. But, this is probably better than
* just turning off for all other values.
*/
- pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness);
+ pca955x_write_pwm(pca955x->client, 1,
+ 255 - pca955x_led->brightness);
ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
break;
}
pca955x_write_ls(pca955x->client, chip_ls, ls);
+
+ mutex_unlock(&pca955x->lock);
}
static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
@@ -233,7 +246,6 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
- spin_lock(&pca955x->lock);
pca955x->brightness = value;
/*
@@ -241,14 +253,13 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
* can sleep.
*/
schedule_work(&pca955x->work);
-
- spin_unlock(&pca955x->lock);
}
static int __devinit pca955x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct pca955x_led *pca955x;
+ struct pca955x *pca955x;
+ struct pca955x_led *pca955x_led;
struct pca955x_chipdef *chip;
struct i2c_adapter *adapter;
struct led_platform_data *pdata;
@@ -282,39 +293,48 @@ static int __devinit pca955x_probe(struct i2c_client *client,
}
}
- pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL);
+ pca955x = kzalloc(sizeof(*pca955x), GFP_KERNEL);
if (!pca955x)
return -ENOMEM;
+ pca955x->leds = kzalloc(sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
+ if (!pca955x->leds) {
+ err = -ENOMEM;
+ goto exit_nomem;
+ }
+
i2c_set_clientdata(client, pca955x);
+ mutex_init(&pca955x->lock);
+ pca955x->client = client;
+ pca955x->chipdef = chip;
+
for (i = 0; i < chip->bits; i++) {
- pca955x[i].chipdef = chip;
- pca955x[i].client = client;
- pca955x[i].led_num = i;
+ pca955x_led = &pca955x->leds[i];
+ pca955x_led->led_num = i;
+ pca955x_led->pca955x = pca955x;
/* Platform data can specify LED names and default triggers */
if (pdata) {
if (pdata->leds[i].name)
- snprintf(pca955x[i].name,
- sizeof(pca955x[i].name), "pca955x:%s",
- pdata->leds[i].name);
+ snprintf(pca955x_led->name,
+ sizeof(pca955x_led->name), "pca955x:%s",
+ pdata->leds[i].name);
if (pdata->leds[i].default_trigger)
- pca955x[i].led_cdev.default_trigger =
+ pca955x_led->led_cdev.default_trigger =
pdata->leds[i].default_trigger;
} else {
- snprintf(pca955x[i].name, sizeof(pca955x[i].name),
+ snprintf(pca955x_led->name, sizeof(pca955x_led->name),
"pca955x:%d", i);
}
- spin_lock_init(&pca955x[i].lock);
-
- pca955x[i].led_cdev.name = pca955x[i].name;
- pca955x[i].led_cdev.brightness_set = pca955x_led_set;
+ pca955x_led->led_cdev.name = pca955x_led->name;
+ pca955x_led->led_cdev.brightness_set = pca955x_led_set;
- INIT_WORK(&pca955x[i].work, pca955x_led_work);
+ INIT_WORK(&pca955x_led->work, pca955x_led_work);
- err = led_classdev_register(&client->dev, &pca955x[i].led_cdev);
+ err = led_classdev_register(&client->dev,
+ &pca955x_led->led_cdev);
if (err < 0)
goto exit;
}
@@ -337,10 +357,12 @@ static int __devinit pca955x_probe(struct i2c_client *client,
exit:
while (i--) {
- led_classdev_unregister(&pca955x[i].led_cdev);
- cancel_work_sync(&pca955x[i].work);
+ led_classdev_unregister(&pca955x->leds[i].led_cdev);
+ cancel_work_sync(&pca955x->leds[i].work);
}
+ kfree(pca955x->leds);
+exit_nomem:
kfree(pca955x);
return err;
@@ -348,14 +370,15 @@ exit:
static int __devexit pca955x_remove(struct i2c_client *client)
{
- struct pca955x_led *pca955x = i2c_get_clientdata(client);
+ struct pca955x *pca955x = i2c_get_clientdata(client);
int i;
for (i = 0; i < pca955x->chipdef->bits; i++) {
- led_classdev_unregister(&pca955x[i].led_cdev);
- cancel_work_sync(&pca955x[i].work);
+ led_classdev_unregister(&pca955x->leds[i].led_cdev);
+ cancel_work_sync(&pca955x->leds[i].work);
}
+ kfree(pca955x->leds);
kfree(pca955x);
return 0;
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index 2b513a2..e272686 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -120,6 +120,7 @@ static void bl_trig_activate(struct led_classdev *led)
ret = fb_register_client(&n->notifier);
if (ret)
dev_err(led->dev, "unable to register backlight trigger\n");
+ led->activated = true;
return;
@@ -133,10 +134,11 @@ static void bl_trig_deactivate(struct led_classdev *led)
struct bl_trig_notifier *n =
(struct bl_trig_notifier *) led->trigger_data;
- if (n) {
+ if (led->activated) {
device_remove_file(led->dev, &dev_attr_inverted);
fb_unregister_client(&n->notifier);
kfree(n);
+ led->activated = false;
}
}
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ecc4bf3..f057c10 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -200,6 +200,7 @@ static void gpio_trig_activate(struct led_classdev *led)
gpio_data->led = led;
led->trigger_data = gpio_data;
INIT_WORK(&gpio_data->work, gpio_trig_work);
+ led->activated = true;
return;
@@ -217,7 +218,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
{
struct gpio_trig_data *gpio_data = led->trigger_data;
- if (gpio_data) {
+ if (led->activated) {
device_remove_file(led->dev, &dev_attr_gpio);
device_remove_file(led->dev, &dev_attr_inverted);
device_remove_file(led->dev, &dev_attr_desired_brightness);
@@ -225,6 +226,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
if (gpio_data->gpio != 0)
free_irq(gpio_to_irq(gpio_data->gpio), led);
kfree(gpio_data);
+ led->activated = false;
}
}
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
index 759c0bba..a019fbb 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -18,8 +18,11 @@
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/leds.h>
+#include <linux/reboot.h>
#include "leds.h"
+static int panic_heartbeats;
+
struct heartbeat_trig_data {
unsigned int phase;
unsigned int period;
@@ -33,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
unsigned long brightness = LED_OFF;
unsigned long delay = 0;
+ if (unlikely(panic_heartbeats)) {
+ led_set_brightness(led_cdev, LED_OFF);
+ return;
+ }
+
/* acts like an actual heart beat -- ie thump-thump-pause... */
switch (heartbeat_data->phase) {
case 0:
@@ -83,15 +91,17 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
led_heartbeat_function, (unsigned long) led_cdev);
heartbeat_data->phase = 0;
led_heartbeat_function(heartbeat_data->timer.data);
+ led_cdev->activated = true;
}
static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
{
struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
- if (heartbeat_data) {
+ if (led_cdev->activated) {
del_timer_sync(&heartbeat_data->timer);
kfree(heartbeat_data);
+ led_cdev->activated = false;
}
}
@@ -101,13 +111,45 @@ static struct led_trigger heartbeat_led_trigger = {
.deactivate = heartbeat_trig_deactivate,
};
+static int heartbeat_reboot_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ led_trigger_unregister(&heartbeat_led_trigger);
+ return NOTIFY_DONE;
+}
+
+static int heartbeat_panic_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ panic_heartbeats = 1;
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block heartbeat_reboot_nb = {
+ .notifier_call = heartbeat_reboot_notifier,
+};
+
+static struct notifier_block heartbeat_panic_nb = {
+ .notifier_call = heartbeat_panic_notifier,
+};
+
static int __init heartbeat_trig_init(void)
{
- return led_trigger_register(&heartbeat_led_trigger);
+ int rc = led_trigger_register(&heartbeat_led_trigger);
+
+ if (!rc) {
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &heartbeat_panic_nb);
+ register_reboot_notifier(&heartbeat_reboot_nb);
+ }
+ return rc;
}
static void __exit heartbeat_trig_exit(void)
{
+ unregister_reboot_notifier(&heartbeat_reboot_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &heartbeat_panic_nb);
led_trigger_unregister(&heartbeat_led_trigger);
}
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 328c64c..9010f7a 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -31,21 +31,17 @@ static ssize_t led_delay_on_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- int ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
-
- if (isspace(*after))
- count++;
-
- if (count == size) {
- led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
- led_cdev->blink_delay_on = state;
- ret = count;
- }
+ unsigned long state;
+ ssize_t ret = -EINVAL;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
- return ret;
+ led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
+ led_cdev->blink_delay_on = state;
+
+ return size;
}
static ssize_t led_delay_off_show(struct device *dev,
@@ -60,21 +56,17 @@ static ssize_t led_delay_off_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
- int ret = -EINVAL;
- char *after;
- unsigned long state = simple_strtoul(buf, &after, 10);
- size_t count = after - buf;
-
- if (isspace(*after))
- count++;
-
- if (count == size) {
- led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
- led_cdev->blink_delay_off = state;
- ret = count;
- }
+ unsigned long state;
+ ssize_t ret = -EINVAL;
- return ret;
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
+ led_cdev->blink_delay_off = state;
+
+ return size;
}
static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
@@ -95,8 +87,7 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
&led_cdev->blink_delay_off);
-
- led_cdev->trigger_data = (void *)1;
+ led_cdev->activated = true;
return;
@@ -106,9 +97,10 @@ err_out_delayon:
static void timer_trig_deactivate(struct led_classdev *led_cdev)
{
- if (led_cdev->trigger_data) {
+ if (led_cdev->activated) {
device_remove_file(led_cdev->dev, &dev_attr_delay_on);
device_remove_file(led_cdev->dev, &dev_attr_delay_off);
+ led_cdev->activated = false;
}
/* Stop blinking */
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/ledtrig-transient.c
new file mode 100644
index 0000000..83179f4
--- /dev/null
+++ b/drivers/leds/ledtrig-transient.c
@@ -0,0 +1,237 @@
+/*
+ * LED Kernel Transient Trigger
+ *
+ * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
+ *
+ * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
+ * ledtrig-heartbeat.c
+ * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
+ * Neil Brown <neilb@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+/*
+ * Transient trigger allows one shot timer activation. Please refer to
+ * Documentation/leds/ledtrig-transient.txt for details
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/leds.h>
+#include "leds.h"
+
+struct transient_trig_data {
+ int activate;
+ int state;
+ int restore_state;
+ unsigned long duration;
+ struct timer_list timer;
+};
+
+static void transient_timer_function(unsigned long data)
+{
+ struct led_classdev *led_cdev = (struct led_classdev *) data;
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ transient_data->activate = 0;
+ led_set_brightness(led_cdev, transient_data->restore_state);
+}
+
+static ssize_t transient_activate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%d\n", transient_data->activate);
+}
+
+static ssize_t transient_activate_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (state != 1 && state != 0)
+ return -EINVAL;
+
+ /* cancel the running timer */
+ if (state == 0 && transient_data->activate == 1) {
+ del_timer(&transient_data->timer);
+ transient_data->activate = state;
+ led_set_brightness(led_cdev, transient_data->restore_state);
+ return size;
+ }
+
+ /* start timer if there is no active timer */
+ if (state == 1 && transient_data->activate == 0 &&
+ transient_data->duration != 0) {
+ transient_data->activate = state;
+ led_set_brightness(led_cdev, transient_data->state);
+ transient_data->restore_state =
+ (transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
+ mod_timer(&transient_data->timer,
+ jiffies + transient_data->duration);
+ }
+
+ /* state == 0 && transient_data->activate == 0
+ timer is not active - just return */
+ /* state == 1 && transient_data->activate == 1
+ timer is already active - just return */
+
+ return size;
+}
+
+static ssize_t transient_duration_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ return sprintf(buf, "%lu\n", transient_data->duration);
+}
+
+static ssize_t transient_duration_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ transient_data->duration = state;
+ return size;
+}
+
+static ssize_t transient_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ int state;
+
+ state = (transient_data->state == LED_FULL) ? 1 : 0;
+ return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t transient_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+ unsigned long state;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 10, &state);
+ if (ret)
+ return ret;
+
+ if (state != 1 && state != 0)
+ return -EINVAL;
+
+ transient_data->state = (state == 1) ? LED_FULL : LED_OFF;
+ return size;
+}
+
+static DEVICE_ATTR(activate, 0644, transient_activate_show,
+ transient_activate_store);
+static DEVICE_ATTR(duration, 0644, transient_duration_show,
+ transient_duration_store);
+static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
+
+static void transient_trig_activate(struct led_classdev *led_cdev)
+{
+ int rc;
+ struct transient_trig_data *tdata;
+
+ tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
+ if (!tdata) {
+ dev_err(led_cdev->dev,
+ "unable to allocate transient trigger\n");
+ return;
+ }
+ led_cdev->trigger_data = tdata;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_activate);
+ if (rc)
+ goto err_out;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_duration);
+ if (rc)
+ goto err_out_duration;
+
+ rc = device_create_file(led_cdev->dev, &dev_attr_state);
+ if (rc)
+ goto err_out_state;
+
+ setup_timer(&tdata->timer, transient_timer_function,
+ (unsigned long) led_cdev);
+ led_cdev->activated = true;
+
+ return;
+
+err_out_state:
+ device_remove_file(led_cdev->dev, &dev_attr_duration);
+err_out_duration:
+ device_remove_file(led_cdev->dev, &dev_attr_activate);
+err_out:
+ dev_err(led_cdev->dev, "unable to register transient trigger\n");
+ led_cdev->trigger_data = NULL;
+ kfree(tdata);
+}
+
+static void transient_trig_deactivate(struct led_classdev *led_cdev)
+{
+ struct transient_trig_data *transient_data = led_cdev->trigger_data;
+
+ if (led_cdev->activated) {
+ del_timer_sync(&transient_data->timer);
+ led_set_brightness(led_cdev, transient_data->restore_state);
+ device_remove_file(led_cdev->dev, &dev_attr_activate);
+ device_remove_file(led_cdev->dev, &dev_attr_duration);
+ device_remove_file(led_cdev->dev, &dev_attr_state);
+ led_cdev->trigger_data = NULL;
+ led_cdev->activated = false;
+ kfree(transient_data);
+ }
+}
+
+static struct led_trigger transient_trigger = {
+ .name = "transient",
+ .activate = transient_trig_activate,
+ .deactivate = transient_trig_deactivate,
+};
+
+static int __init transient_trig_init(void)
+{
+ return led_trigger_register(&transient_trigger);
+}
+
+static void __exit transient_trig_exit(void)
+{
+ led_trigger_unregister(&transient_trigger);
+}
+
+module_init(transient_trig_init);
+module_exit(transient_trig_exit);
+
+MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
+MODULE_DESCRIPTION("Transient LED trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 754f38f..638dae0 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/workqueue.h>
+#include <linux/delay.h>
#include <scsi/scsi_dh.h>
#include <linux/atomic.h>
@@ -61,11 +62,11 @@ struct multipath {
struct list_head list;
struct dm_target *ti;
- spinlock_t lock;
-
const char *hw_handler_name;
char *hw_handler_params;
+ spinlock_t lock;
+
unsigned nr_priority_groups;
struct list_head priority_groups;
@@ -81,16 +82,17 @@ struct multipath {
struct priority_group *next_pg; /* Switch to this PG if set */
unsigned repeat_count; /* I/Os left before calling PS again */
- unsigned queue_io; /* Must we queue all I/O? */
- unsigned queue_if_no_path; /* Queue I/O if last path fails? */
- unsigned saved_queue_if_no_path;/* Saved state during suspension */
+ unsigned queue_io:1; /* Must we queue all I/O? */
+ unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
+ unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
+
unsigned pg_init_retries; /* Number of times to retry pg_init */
unsigned pg_init_count; /* Number of times pg_init called */
unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
+ unsigned queue_size;
struct work_struct process_queued_ios;
struct list_head queued_ios;
- unsigned queue_size;
struct work_struct trigger_event;
@@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
/*
* Loop through priority groups until we find a valid path.
* First time we skip PGs marked 'bypassed'.
- * Second time we only try the ones we skipped.
+ * Second time we only try the ones we skipped, but set
+ * pg_init_delay_retry so we do not hammer controllers.
*/
do {
list_for_each_entry(pg, &m->priority_groups, list) {
if (pg->bypassed == bypassed)
continue;
- if (!__choose_path_in_pg(m, pg, nr_bytes))
+ if (!__choose_path_in_pg(m, pg, nr_bytes)) {
+ if (!bypassed)
+ m->pg_init_delay_retry = 1;
return;
+ }
}
} while (bypassed--);
@@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work)
spin_lock_irqsave(&m->lock, flags);
- if (!m->queue_size)
- goto out;
-
if (!m->current_pgpath)
__choose_pgpath(m, 0);
@@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work)
if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
__pg_init_all_paths(m);
-out:
spin_unlock_irqrestore(&m->lock, flags);
if (!must_queue)
dispatch_queued_ios(m);
@@ -1517,11 +1519,16 @@ out:
static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
- struct multipath *m = (struct multipath *) ti->private;
- struct block_device *bdev = NULL;
- fmode_t mode = 0;
+ struct multipath *m = ti->private;
+ struct block_device *bdev;
+ fmode_t mode;
unsigned long flags;
- int r = 0;
+ int r;
+
+again:
+ bdev = NULL;
+ mode = 0;
+ r = 0;
spin_lock_irqsave(&m->lock, flags);
@@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
r = scsi_verify_blk_ioctl(NULL, cmd);
+ if (r == -EAGAIN && !fatal_signal_pending(current)) {
+ queue_work(kmultipathd, &m->process_queued_ios);
+ msleep(10);
+ goto again;
+ }
+
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
@@ -1643,7 +1656,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d039de8..b58b7a3 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1084,6 +1084,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->split_io = dm_rh_get_region_size(ms->rh);
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->discard_zeroes_data_unsupported = 1;
ms->kmirrord_wq = alloc_workqueue("kmirrord",
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
@@ -1214,7 +1215,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- if (!(bio->bi_rw & REQ_FLUSH))
+ if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
dm_rh_dec(ms->rh, map_context->ll);
return error;
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 7771ed2..69732e0 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
return;
}
+ if (bio->bi_rw & REQ_DISCARD)
+ return;
+
/* We must inform the log that the sync count has changed. */
log->type->set_region_sync(log, region, 0);
@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio->bi_rw & REQ_FLUSH)
+ if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 737d388..3e2907f 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1082,12 +1082,89 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
return 0;
}
-static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
- dm_block_t *result)
+static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+ int r, inc;
+ struct thin_disk_superblock *disk_super;
+ struct dm_block *copy, *sblock;
+ dm_block_t held_root;
+
+ /*
+ * Copy the superblock.
+ */
+ dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
+ r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &copy, &inc);
+ if (r)
+ return r;
+
+ BUG_ON(!inc);
+
+ held_root = dm_block_location(copy);
+ disk_super = dm_block_data(copy);
+
+ if (le64_to_cpu(disk_super->held_root)) {
+ DMWARN("Pool metadata snapshot already exists: release this before taking another.");
+
+ dm_tm_dec(pmd->tm, held_root);
+ dm_tm_unlock(pmd->tm, copy);
+ pmd->need_commit = 1;
+
+ return -EBUSY;
+ }
+
+ /*
+ * Wipe the spacemap since we're not publishing this.
+ */
+ memset(&disk_super->data_space_map_root, 0,
+ sizeof(disk_super->data_space_map_root));
+ memset(&disk_super->metadata_space_map_root, 0,
+ sizeof(disk_super->metadata_space_map_root));
+
+ /*
+ * Increment the data structures that need to be preserved.
+ */
+ dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
+ dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
+ dm_tm_unlock(pmd->tm, copy);
+
+ /*
+ * Write the held root into the superblock.
+ */
+ r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r) {
+ dm_tm_dec(pmd->tm, held_root);
+ pmd->need_commit = 1;
+ return r;
+ }
+
+ disk_super = dm_block_data(sblock);
+ disk_super->held_root = cpu_to_le64(held_root);
+ dm_bm_unlock(sblock);
+
+ pmd->need_commit = 1;
+
+ return 0;
+}
+
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __reserve_metadata_snap(pmd);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+static int __release_metadata_snap(struct dm_pool_metadata *pmd)
{
int r;
struct thin_disk_superblock *disk_super;
- struct dm_block *sblock;
+ struct dm_block *sblock, *copy;
+ dm_block_t held_root;
r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
&sb_validator, &sblock);
@@ -1095,18 +1172,65 @@ static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
return r;
disk_super = dm_block_data(sblock);
+ held_root = le64_to_cpu(disk_super->held_root);
+ disk_super->held_root = cpu_to_le64(0);
+ pmd->need_commit = 1;
+
+ dm_bm_unlock(sblock);
+
+ if (!held_root) {
+ DMWARN("No pool metadata snapshot found: nothing to release.");
+ return -EINVAL;
+ }
+
+ r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(copy);
+ dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
+ dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
+ dm_sm_dec_block(pmd->metadata_sm, held_root);
+
+ return dm_tm_unlock(pmd->tm, copy);
+}
+
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
+{
+ int r;
+
+ down_write(&pmd->root_lock);
+ r = __release_metadata_snap(pmd);
+ up_write(&pmd->root_lock);
+
+ return r;
+}
+
+static int __get_metadata_snap(struct dm_pool_metadata *pmd,
+ dm_block_t *result)
+{
+ int r;
+ struct thin_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &sb_validator, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
*result = le64_to_cpu(disk_super->held_root);
return dm_bm_unlock(sblock);
}
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
- dm_block_t *result)
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+ dm_block_t *result)
{
int r;
down_read(&pmd->root_lock);
- r = __get_held_metadata_root(pmd, result);
+ r = __get_metadata_snap(pmd, result);
up_read(&pmd->root_lock);
return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index ed4725e..b88918c 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
/*
* Hold/get root for userspace transaction.
+ *
+ * The metadata snapshot is a copy of the current superblock (minus the
+ * space maps). Userland can access the data structures for READ
+ * operations only. A small performance hit is incurred by providing this
+ * copy of the metadata to userland due to extra copy-on-write operations
+ * on the metadata nodes. Release this as soon as you finish with it.
*/
-int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd);
+int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
+int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
-int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd,
- dm_block_t *result);
+int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
+ dm_block_t *result);
/*
* Actions on a single virtual device.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index eb3d138..68694da 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -111,7 +111,7 @@ struct cell_key {
dm_block_t block;
};
-struct cell {
+struct dm_bio_prison_cell {
struct hlist_node list;
struct bio_prison *prison;
struct cell_key key;
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
return n;
}
+static struct kmem_cache *_cell_cache;
+
/*
* @nr_cells should be the number of cells you want in use _concurrently_.
* Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
return NULL;
spin_lock_init(&prison->lock);
- prison->cell_pool = mempool_create_kmalloc_pool(nr_cells,
- sizeof(struct cell));
+ prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
if (!prison->cell_pool) {
kfree(prison);
return NULL;
@@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
(lhs->block == rhs->block);
}
-static struct cell *__search_bucket(struct hlist_head *bucket,
- struct cell_key *key)
+static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
+ struct cell_key *key)
{
- struct cell *cell;
+ struct dm_bio_prison_cell *cell;
struct hlist_node *tmp;
hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
* Returns 1 if the cell was already held, 0 if @inmate is the new holder.
*/
static int bio_detain(struct bio_prison *prison, struct cell_key *key,
- struct bio *inmate, struct cell **ref)
+ struct bio *inmate, struct dm_bio_prison_cell **ref)
{
int r = 1;
unsigned long flags;
uint32_t hash = hash_key(prison, key);
- struct cell *cell, *cell2;
+ struct dm_bio_prison_cell *cell, *cell2;
BUG_ON(hash > prison->nr_buckets);
@@ -273,7 +274,7 @@ out:
/*
* @inmates must have been initialised prior to this call
*/
-static void __cell_release(struct cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
{
struct bio_prison *prison = cell->prison;
@@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
mempool_free(cell, prison->cell_pool);
}
-static void cell_release(struct cell *cell, struct bio_list *bios)
+static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
{
unsigned long flags;
struct bio_prison *prison = cell->prison;
@@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
* bio may be in the cell. This function releases the cell, and also does
* a sanity check.
*/
-static void __cell_release_singleton(struct cell *cell, struct bio *bio)
+static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
{
BUG_ON(cell->holder != bio);
BUG_ON(!bio_list_empty(&cell->bios));
@@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
__cell_release(cell, NULL);
}
-static void cell_release_singleton(struct cell *cell, struct bio *bio)
+static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
{
unsigned long flags;
struct bio_prison *prison = cell->prison;
@@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio)
/*
* Sometimes we don't want the holder, just the additional bios.
*/
-static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
struct bio_prison *prison = cell->prison;
@@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates
mempool_free(cell, prison->cell_pool);
}
-static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
+static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
unsigned long flags;
struct bio_prison *prison = cell->prison;
@@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
spin_unlock_irqrestore(&prison->lock, flags);
}
-static void cell_error(struct cell *cell)
+static void cell_error(struct dm_bio_prison_cell *cell)
{
struct bio_prison *prison = cell->prison;
struct bio_list bios;
@@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
* also provides the interface for creating and destroying internal
* devices.
*/
-struct new_mapping;
+struct dm_thin_new_mapping;
struct pool_features {
unsigned zero_new_blocks:1;
@@ -537,7 +540,7 @@ struct pool {
struct deferred_set shared_read_ds;
struct deferred_set all_io_ds;
- struct new_mapping *next_mapping;
+ struct dm_thin_new_mapping *next_mapping;
mempool_t *mapping_pool;
mempool_t *endio_hook_pool;
};
@@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
/*----------------------------------------------------------------*/
-struct endio_hook {
+struct dm_thin_endio_hook {
struct thin_c *tc;
struct deferred_entry *shared_read_entry;
struct deferred_entry *all_io_entry;
- struct new_mapping *overwrite_mapping;
+ struct dm_thin_new_mapping *overwrite_mapping;
};
static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
bio_list_init(master);
while ((bio = bio_list_pop(&bios))) {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
if (h->tc == tc)
bio_endio(bio, DM_ENDIO_REQUEUE);
else
@@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool)
/*
* Bio endio functions.
*/
-struct new_mapping {
+struct dm_thin_new_mapping {
struct list_head list;
unsigned quiesced:1;
@@ -746,7 +750,7 @@ struct new_mapping {
struct thin_c *tc;
dm_block_t virt_block;
dm_block_t data_block;
- struct cell *cell, *cell2;
+ struct dm_bio_prison_cell *cell, *cell2;
int err;
/*
@@ -759,7 +763,7 @@ struct new_mapping {
bio_end_io_t *saved_bi_end_io;
};
-static void __maybe_add_mapping(struct new_mapping *m)
+static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
{
struct pool *pool = m->tc->pool;
@@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
static void copy_complete(int read_err, unsigned long write_err, void *context)
{
unsigned long flags;
- struct new_mapping *m = context;
+ struct dm_thin_new_mapping *m = context;
struct pool *pool = m->tc->pool;
m->err = read_err || write_err ? -EIO : 0;
@@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
static void overwrite_endio(struct bio *bio, int err)
{
unsigned long flags;
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
- struct new_mapping *m = h->overwrite_mapping;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_new_mapping *m = h->overwrite_mapping;
struct pool *pool = m->tc->pool;
m->err = err;
@@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err)
/*
* This sends the bios in the cell back to the deferred_bios list.
*/
-static void cell_defer(struct thin_c *tc, struct cell *cell,
+static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
dm_block_t data_block)
{
struct pool *pool = tc->pool;
@@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
* Same as cell_defer above, except it omits one particular detainee,
* a write bio that covers the block and has already been processed.
*/
-static void cell_defer_except(struct thin_c *tc, struct cell *cell)
+static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
struct bio_list bios;
struct pool *pool = tc->pool;
@@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell)
wake_worker(pool);
}
-static void process_prepared_mapping(struct new_mapping *m)
+static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
struct bio *bio;
@@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m)
mempool_free(m, tc->pool->mapping_pool);
}
-static void process_prepared_discard(struct new_mapping *m)
+static void process_prepared_discard(struct dm_thin_new_mapping *m)
{
int r;
struct thin_c *tc = m->tc;
@@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m)
}
static void process_prepared(struct pool *pool, struct list_head *head,
- void (*fn)(struct new_mapping *))
+ void (*fn)(struct dm_thin_new_mapping *))
{
unsigned long flags;
struct list_head maps;
- struct new_mapping *m, *tmp;
+ struct dm_thin_new_mapping *m, *tmp;
INIT_LIST_HEAD(&maps);
spin_lock_irqsave(&pool->lock, flags);
@@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool)
return pool->next_mapping ? 0 : -ENOMEM;
}
-static struct new_mapping *get_next_mapping(struct pool *pool)
+static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
{
- struct new_mapping *r = pool->next_mapping;
+ struct dm_thin_new_mapping *r = pool->next_mapping;
BUG_ON(!pool->next_mapping);
@@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct dm_dev *origin, dm_block_t data_origin,
dm_block_t data_dest,
- struct cell *cell, struct bio *bio)
+ struct dm_bio_prison_cell *cell, struct bio *bio)
{
int r;
struct pool *pool = tc->pool;
- struct new_mapping *m = get_next_mapping(pool);
+ struct dm_thin_new_mapping *m = get_next_mapping(pool);
INIT_LIST_HEAD(&m->list);
m->quiesced = 0;
@@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
* bio immediately. Otherwise we use kcopyd to clone the data first.
*/
if (io_overwrites_block(pool, bio)) {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_origin, dm_block_t data_dest,
- struct cell *cell, struct bio *bio)
+ struct dm_bio_prison_cell *cell, struct bio *bio)
{
schedule_copy(tc, virt_block, tc->pool_dev,
data_origin, data_dest, cell, bio);
@@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
dm_block_t data_dest,
- struct cell *cell, struct bio *bio)
+ struct dm_bio_prison_cell *cell, struct bio *bio)
{
schedule_copy(tc, virt_block, tc->origin_dev,
virt_block, data_dest, cell, bio);
}
static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
- dm_block_t data_block, struct cell *cell,
+ dm_block_t data_block, struct dm_bio_prison_cell *cell,
struct bio *bio)
{
struct pool *pool = tc->pool;
- struct new_mapping *m = get_next_mapping(pool);
+ struct dm_thin_new_mapping *m = get_next_mapping(pool);
INIT_LIST_HEAD(&m->list);
m->quiesced = 1;
@@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
process_prepared_mapping(m);
else if (io_overwrites_block(pool, bio)) {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
+
h->overwrite_mapping = m;
m->bio = bio;
save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
remap_and_issue(tc, bio, data_block);
-
} else {
int r;
struct dm_io_region to;
@@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
*/
static void retry_on_resume(struct bio *bio)
{
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
struct thin_c *tc = h->tc;
struct pool *pool = tc->pool;
unsigned long flags;
@@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void no_space(struct cell *cell)
+static void no_space(struct dm_bio_prison_cell *cell)
{
struct bio *bio;
struct bio_list bios;
@@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
int r;
unsigned long flags;
struct pool *pool = tc->pool;
- struct cell *cell, *cell2;
+ struct dm_bio_prison_cell *cell, *cell2;
struct cell_key key, key2;
dm_block_t block = get_bio_block(tc, bio);
struct dm_thin_lookup_result lookup_result;
- struct new_mapping *m;
+ struct dm_thin_new_mapping *m;
build_virtual_key(tc->td, block, &key);
if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1240,7 +1245,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
cell_release_singleton(cell, bio);
cell_release_singleton(cell2, bio);
- remap_and_issue(tc, bio, lookup_result.block);
+ if ((!lookup_result.shared) && pool->pf.discard_passdown)
+ remap_and_issue(tc, bio, lookup_result.block);
+ else
+ bio_endio(bio, 0);
}
break;
@@ -1263,7 +1271,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
struct cell_key *key,
struct dm_thin_lookup_result *lookup_result,
- struct cell *cell)
+ struct dm_bio_prison_cell *cell)
{
int r;
dm_block_t data_block;
@@ -1290,7 +1298,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
dm_block_t block,
struct dm_thin_lookup_result *lookup_result)
{
- struct cell *cell;
+ struct dm_bio_prison_cell *cell;
struct pool *pool = tc->pool;
struct cell_key key;
@@ -1305,7 +1313,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
if (bio_data_dir(bio) == WRITE)
break_sharing(tc, bio, block, &key, lookup_result, cell);
else {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
h->shared_read_entry = ds_inc(&pool->shared_read_ds);
@@ -1315,7 +1323,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
}
static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
- struct cell *cell)
+ struct dm_bio_prison_cell *cell)
{
int r;
dm_block_t data_block;
@@ -1363,7 +1371,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
{
int r;
dm_block_t block = get_bio_block(tc, bio);
- struct cell *cell;
+ struct dm_bio_prison_cell *cell;
struct cell_key key;
struct dm_thin_lookup_result lookup_result;
@@ -1432,7 +1440,7 @@ static void process_deferred_bios(struct pool *pool)
spin_unlock_irqrestore(&pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
- struct endio_hook *h = dm_get_mapinfo(bio)->ptr;
+ struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
struct thin_c *tc = h->tc;
/*
@@ -1522,10 +1530,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
wake_worker(pool);
}
-static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
+static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
{
struct pool *pool = tc->pool;
- struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
+ struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
h->tc = tc;
h->shared_read_entry = NULL;
@@ -1687,6 +1695,9 @@ static void __pool_destroy(struct pool *pool)
kfree(pool);
}
+static struct kmem_cache *_new_mapping_cache;
+static struct kmem_cache *_endio_hook_cache;
+
static struct pool *pool_create(struct mapped_device *pool_md,
struct block_device *metadata_dev,
unsigned long block_size, char **error)
@@ -1755,16 +1766,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
ds_init(&pool->all_io_ds);
pool->next_mapping = NULL;
- pool->mapping_pool =
- mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping));
+ pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
+ _new_mapping_cache);
if (!pool->mapping_pool) {
*error = "Error creating pool's mapping mempool";
err_p = ERR_PTR(-ENOMEM);
goto bad_mapping_pool;
}
- pool->endio_hook_pool =
- mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook));
+ pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
+ _endio_hook_cache);
if (!pool->endio_hook_pool) {
*error = "Error creating pool's endio_hook mempool";
err_p = ERR_PTR(-ENOMEM);
@@ -2276,6 +2287,43 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
return 0;
}
+static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+ int r;
+
+ r = check_arg_count(argc, 1);
+ if (r)
+ return r;
+
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ return r;
+ }
+
+ r = dm_pool_reserve_metadata_snap(pool->pmd);
+ if (r)
+ DMWARN("reserve_metadata_snap message failed.");
+
+ return r;
+}
+
+static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+{
+ int r;
+
+ r = check_arg_count(argc, 1);
+ if (r)
+ return r;
+
+ r = dm_pool_release_metadata_snap(pool->pmd);
+ if (r)
+ DMWARN("release_metadata_snap message failed.");
+
+ return r;
+}
+
/*
* Messages supported:
* create_thin <dev_id>
@@ -2283,6 +2331,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
* delete <dev_id>
* trim <dev_id> <new_size_in_sectors>
* set_transaction_id <current_trans_id> <new_trans_id>
+ * reserve_metadata_snap
+ * release_metadata_snap
*/
static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
{
@@ -2302,6 +2352,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
else if (!strcasecmp(argv[0], "set_transaction_id"))
r = process_set_transaction_id_mesg(argc, argv, pool);
+ else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
+ r = process_reserve_metadata_snap_mesg(argc, argv, pool);
+
+ else if (!strcasecmp(argv[0], "release_metadata_snap"))
+ r = process_release_metadata_snap_mesg(argc, argv, pool);
+
else
DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
@@ -2361,7 +2417,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
if (r)
return r;
- r = dm_pool_get_held_metadata_root(pool->pmd, &held_root);
+ r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
if (r)
return r;
@@ -2457,7 +2513,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 1, 0},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2575,6 +2631,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (tc->pool->pf.discard_enabled) {
ti->discards_supported = 1;
ti->num_discard_requests = 1;
+ ti->discard_zeroes_data_unsupported = 1;
}
dm_put(pool_md);
@@ -2613,9 +2670,9 @@ static int thin_endio(struct dm_target *ti,
union map_info *map_context)
{
unsigned long flags;
- struct endio_hook *h = map_context->ptr;
+ struct dm_thin_endio_hook *h = map_context->ptr;
struct list_head work;
- struct new_mapping *m, *tmp;
+ struct dm_thin_new_mapping *m, *tmp;
struct pool *pool = h->tc->pool;
if (h->shared_read_entry) {
@@ -2755,7 +2812,32 @@ static int __init dm_thin_init(void)
r = dm_register_target(&pool_target);
if (r)
- dm_unregister_target(&thin_target);
+ goto bad_pool_target;
+
+ r = -ENOMEM;
+
+ _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
+ if (!_cell_cache)
+ goto bad_cell_cache;
+
+ _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
+ if (!_new_mapping_cache)
+ goto bad_new_mapping_cache;
+
+ _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
+ if (!_endio_hook_cache)
+ goto bad_endio_hook_cache;
+
+ return 0;
+
+bad_endio_hook_cache:
+ kmem_cache_destroy(_new_mapping_cache);
+bad_new_mapping_cache:
+ kmem_cache_destroy(_cell_cache);
+bad_cell_cache:
+ dm_unregister_target(&pool_target);
+bad_pool_target:
+ dm_unregister_target(&thin_target);
return r;
}
@@ -2764,6 +2846,10 @@ static void dm_thin_exit(void)
{
dm_unregister_target(&thin_target);
dm_unregister_target(&pool_target);
+
+ kmem_cache_destroy(_cell_cache);
+ kmem_cache_destroy(_new_mapping_cache);
+ kmem_cache_destroy(_endio_hook_cache);
}
module_init(dm_thin_init);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1c2f904..d5ab449 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2931,6 +2931,7 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len)
* can be sane */
return -EBUSY;
rdev->data_offset = offset;
+ rdev->new_data_offset = offset;
return len;
}
@@ -3926,8 +3927,8 @@ array_state_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", array_states[st]);
}
-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
-static int md_set_readonly(struct mddev * mddev, int is_open);
+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
static int do_md_run(struct mddev * mddev);
static int restart_array(struct mddev *mddev);
@@ -3943,14 +3944,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
/* stopping an active array */
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
- err = do_md_stop(mddev, 0, 0);
+ err = do_md_stop(mddev, 0, NULL);
break;
case inactive:
/* stopping an active array */
if (mddev->pers) {
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
- err = do_md_stop(mddev, 2, 0);
+ err = do_md_stop(mddev, 2, NULL);
} else
err = 0; /* already inactive */
break;
@@ -3958,7 +3959,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
break; /* not supported yet */
case readonly:
if (mddev->pers)
- err = md_set_readonly(mddev, 0);
+ err = md_set_readonly(mddev, NULL);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
@@ -3968,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
- err = md_set_readonly(mddev, 0);
+ err = md_set_readonly(mddev, NULL);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
@@ -5351,15 +5352,17 @@ void md_stop(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(md_stop);
-static int md_set_readonly(struct mddev *mddev, int is_open)
+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > is_open) {
+ if (atomic_read(&mddev->openers) > !!bdev) {
printk("md: %s still in use.\n",mdname(mddev));
err = -EBUSY;
goto out;
}
+ if (bdev)
+ sync_blockdev(bdev);
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5381,18 +5384,26 @@ out:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
+static int do_md_stop(struct mddev * mddev, int mode,
+ struct block_device *bdev)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > is_open ||
+ if (atomic_read(&mddev->openers) > !!bdev ||
mddev->sysfs_active) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
+ if (bdev)
+ /* It is possible IO was issued on some other
+ * open file which was closed before we took ->open_mutex.
+ * As that was not the last close __blkdev_put will not
+ * have called sync_blockdev, so we must.
+ */
+ sync_blockdev(bdev);
if (mddev->pers) {
if (mddev->ro)
@@ -5466,7 +5477,7 @@ static void autorun_array(struct mddev *mddev)
err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
- do_md_stop(mddev, 0, 0);
+ do_md_stop(mddev, 0, NULL);
}
}
@@ -5784,8 +5795,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
- (!test_bit(In_sync, &rdev->flags) ||
- rdev->raid_disk != info->raid_disk)) {
+ rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
@@ -6482,11 +6492,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto done_unlock;
case STOP_ARRAY:
- err = do_md_stop(mddev, 0, 1);
+ err = do_md_stop(mddev, 0, bdev);
goto done_unlock;
case STOP_ARRAY_RO:
- err = md_set_readonly(mddev, 1);
+ err = md_set_readonly(mddev, bdev);
goto done_unlock;
case BLKROSET:
@@ -6751,7 +6761,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
- name ?: mddev->pers->name);
+ name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
@@ -7298,6 +7308,7 @@ void md_do_sync(struct mddev *mddev)
int skipped = 0;
struct md_rdev *rdev;
char *desc;
+ struct blk_plug plug;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7458,7 @@ void md_do_sync(struct mddev *mddev)
}
mddev->curr_resync_completed = j;
+ blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
@@ -7552,6 +7564,7 @@ void md_do_sync(struct mddev *mddev)
* this also signals 'finished resyncing' to md_stop
*/
out:
+ blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 9339e67..61a1833 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
}
{
- mddev->thread = md_register_thread(multipathd, mddev, NULL);
+ mddev->thread = md_register_thread(multipathd, mddev,
+ "multipath");
if (!mddev->thread) {
printk(KERN_ERR "multipath: couldn't allocate thread"
" for %s\n", mdname(mddev));
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
index 50ed53b..fc90c11 100644
--- a/drivers/md/persistent-data/dm-space-map-checker.c
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
@@ -8,6 +8,7 @@
#include <linux/device-mapper.h>
#include <linux/export.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
ca->nr = nr_blocks;
ca->nr_free = nr_blocks;
- ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
- if (!ca->counts)
- return -ENOMEM;
+
+ if (!nr_blocks)
+ ca->counts = NULL;
+ else {
+ ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+ if (!ca->counts)
+ return -ENOMEM;
+ }
return 0;
}
+static void ca_destroy(struct count_array *ca)
+{
+ vfree(ca->counts);
+}
+
static int ca_load(struct count_array *ca, struct dm_space_map *sm)
{
int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
{
dm_block_t nr_blocks = ca->nr + extra_blocks;
- uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+ uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
if (!counts)
return -ENOMEM;
- memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
- kfree(ca->counts);
+ if (ca->counts) {
+ memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+ ca_destroy(ca);
+ }
ca->nr = nr_blocks;
ca->nr_free += extra_blocks;
ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
return 0;
}
-static void ca_destroy(struct count_array *ca)
-{
- kfree(ca->counts);
-}
-
/*----------------------------------------------------------------*/
struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
int r;
struct sm_checker *smc;
- if (!sm)
- return NULL;
+ if (IS_ERR_OR_NULL(sm))
+ return ERR_PTR(-EINVAL);
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
if (!smc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
r = ca_create(&smc->old_counts, sm);
if (r) {
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_create(&smc->counts, sm);
if (r) {
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
ca_destroy(&smc->counts);
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
ca_destroy(&smc->counts);
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
int r;
struct sm_checker *smc;
- if (!sm)
- return NULL;
+ if (IS_ERR_OR_NULL(sm))
+ return ERR_PTR(-EINVAL);
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
if (!smc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
r = ca_create(&smc->old_counts, sm);
if (r) {
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_create(&smc->counts, sm);
if (r) {
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
smc->real_sm = sm;
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index fc469ba..3d0ed53 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
dm_block_t nr_blocks)
{
struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
- return dm_sm_checker_create_fresh(sm);
+ struct dm_space_map *smc;
+
+ if (IS_ERR_OR_NULL(sm))
+ return sm;
+
+ smc = dm_sm_checker_create_fresh(sm);
+ if (IS_ERR(smc))
+ dm_sm_destroy(sm);
+
+ return smc;
}
EXPORT_SYMBOL_GPL(dm_sm_disk_create);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 6f8d387..e5604b3 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
void dm_tm_destroy(struct dm_transaction_manager *tm)
{
+ if (!tm->is_clone)
+ wipe_shadow_table(tm);
+
kfree(tm);
}
EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -249,6 +252,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
return r;
}
+EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
struct dm_block_validator *v,
@@ -259,6 +263,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
return dm_bm_read_lock(tm->bm, b, v, blk);
}
+EXPORT_SYMBOL_GPL(dm_tm_read_lock);
int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
{
@@ -342,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
}
*sm = dm_sm_checker_create(inner);
- if (!*sm)
+ if (IS_ERR(*sm)) {
+ r = PTR_ERR(*sm);
goto bad2;
+ }
} else {
r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -362,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
}
*sm = dm_sm_checker_create(inner);
- if (!*sm)
+ if (IS_ERR(*sm)) {
+ r = PTR_ERR(*sm);
goto bad2;
+ }
}
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 835de71..cacd008 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
int bad_sectors;
int disk = start_disk + i;
- if (disk >= conf->raid_disks)
- disk -= conf->raid_disks;
+ if (disk >= conf->raid_disks * 2)
+ disk -= conf->raid_disks * 2;
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
- int plugged;
int first_clone;
int sectors_handled;
int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
* the bad blocks. Each set of writes gets it's own r1bio
* with a set of bios attached.
*/
- plugged = mddev_check_plugged(mddev);
disks = conf->raid_disks * 2;
retry_write:
@@ -1191,6 +1189,8 @@ read_again:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
/* Mustn't call r1_bio_write_done before this next test,
* as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
-
- if (do_sync || !bitmap || !plugged)
- md_wakeup_thread(mddev->thread);
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1821,8 +1818,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
if (atomic_dec_and_test(&r1_bio->remaining)) {
/* if we're here, all write(s) have completed, so clean up */
- md_done_sync(mddev, r1_bio->sectors, 1);
- put_buf(r1_bio);
+ int s = r1_bio->sectors;
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else {
+ put_buf(r1_bio);
+ md_done_sync(mddev, s, 1);
+ }
}
}
@@ -2488,9 +2491,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*/
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
atomic_set(&r1_bio->remaining, read_targets);
- for (i = 0; i < conf->raid_disks * 2; i++) {
+ for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
+ read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
@@ -2550,6 +2554,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
err = -EINVAL;
spin_lock_init(&conf->device_lock);
rdev_for_each(rdev, mddev) {
+ struct request_queue *q;
int disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
@@ -2562,6 +2567,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (disk->rdev)
goto abort;
disk->rdev = rdev;
+ q = bdev_get_queue(rdev->bdev);
+ if (q->merge_bvec_fn)
+ mddev->merge_check_needed = 1;
disk->head_position = 0;
}
@@ -2617,7 +2625,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
goto abort;
}
err = -ENOMEM;
- conf->thread = md_register_thread(raid1d, mddev, NULL);
+ conf->thread = md_register_thread(raid1d, mddev, "raid1");
if (!conf->thread) {
printk(KERN_ERR
"md/raid1:%s: couldn't allocate thread\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 987db37..8da6282 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
- int plugged;
int sectors_handled;
int max_sectors;
int sectors;
@@ -1239,7 +1238,6 @@ read_again:
* of r10_bios is recored in bio->bi_phys_segments just as with
* the read case.
*/
- plugged = mddev_check_plugged(mddev);
r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
if (!r10_bio->devs[i].repl_bio)
continue;
@@ -1423,6 +1423,8 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
-
- if (do_sync || !mddev->bitmap || !plugged)
- md_wakeup_thread(mddev->thread);
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s<<9, conf->tmppage, WRITE)
+ s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s<<9, conf->tmppage,
+ s, conf->tmppage,
READ)) {
case 0:
/* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
slot = r10_bio->read_slot;
printk_ratelimited(
KERN_ERR
- "md/raid10:%s: %s: redirecting"
+ "md/raid10:%s: %s: redirecting "
"sector %llu to another mirror\n",
mdname(mddev),
bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {
- flush_pending_writes(conf);
+ if (atomic_read(&mddev->plug_cnt) == 0)
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
+ if (sect >= mddev->resync_max_sectors) {
+ /* last stripe is not complete - don't
+ * try to recover this sector.
+ */
+ continue;
+ }
/* Unless we are doing a full sync, or a replacement
* we only need to recover the block if it is set in
* the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
- conf->thread = md_register_thread(raid10d, mddev, NULL);
+ conf->thread = md_register_thread(raid10d, mddev, "raid10");
if (!conf->thread)
goto out;
@@ -3475,6 +3481,7 @@ static int run(struct mddev *mddev)
rdev_for_each(rdev, mddev) {
long long diff;
+ struct request_queue *q;
disk_idx = rdev->raid_disk;
if (disk_idx < 0)
@@ -3493,6 +3500,9 @@ static int run(struct mddev *mddev)
goto out_free_conf;
disk->rdev = rdev;
}
+ q = bdev_get_queue(rdev->bdev);
+ if (q->merge_bvec_fn)
+ mddev->merge_check_needed = 1;
diff = (rdev->new_data_offset - rdev->data_offset);
if (!mddev->reshape_backwards)
diff = -diff;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d267672..04348d7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
- if (test_bit(STRIPE_DELAYED, &sh->state))
+ if (test_bit(STRIPE_DELAYED, &sh->state) &&
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
else {
+ clear_bit(STRIPE_DELAYED, &sh->state);
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
}
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
* a chance*/
md_check_recovery(conf->mddev);
}
+ /*
+ * Because md_wait_for_blocked_rdev
+ * will dec nr_pending, we must
+ * increment it first.
+ */
+ atomic_inc(&rdev->nr_pending);
md_wait_for_blocked_rdev(rdev, conf->mddev);
} else {
/* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
} else {
const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
+ int set_bad = 0;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (conf->mddev->degraded >= conf->max_degraded)
+ else if (conf->mddev->degraded >= conf->max_degraded) {
+ set_bad = 1;
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+ } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
+ set_bad = 1;
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (atomic_read(&rdev->read_errors)
+ } else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
"md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
else {
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
- md_error(conf->mddev, rdev);
+ if (!(set_bad
+ && test_bit(In_sync, &rdev->flags)
+ && rdev_set_badblocks(
+ rdev, sh->sector, STRIPE_SECTORS, 0)))
+ md_error(conf->mddev, rdev);
}
}
rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
finish:
/* wait for this device to become unblocked */
- if (conf->mddev->external && unlikely(s.blocked_rdev))
- md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+ if (unlikely(s.blocked_rdev)) {
+ if (conf->mddev->external)
+ md_wait_for_blocked_rdev(s.blocked_rdev,
+ conf->mddev);
+ else
+ /* Internal metadata will immediately
+ * be written by raid5d, so we don't
+ * need to wait here.
+ */
+ rdev_dec_pending(s.blocked_rdev,
+ conf->mddev);
+ }
if (s.handle_bad_blocks)
for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
- /* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_sector += rdev->data_offset;
if (!bio_fits_rdev(align_bi) ||
is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
return 0;
}
+ /* No reshape active, so we can trust rdev->data_offset */
+ align_bi->bi_sector += rdev->data_offset;
+
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
- int plugged;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
- plugged = mddev_check_plugged(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ((bi->bi_rw & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
+ mddev_check_plugged(mddev);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
finish_wait(&conf->wait_for_overlap, &w);
break;
}
-
}
- if (!plugged)
- md_wakeup_thread(mddev->thread);
spin_lock_irq(&conf->device_lock);
remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
int raid_disk, memory, max_disks;
struct md_rdev *rdev;
struct disk_info *disk;
+ char pers_name[6];
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
mdname(mddev), memory);
- conf->thread = md_register_thread(raid5d, mddev, NULL);
+ sprintf(pers_name, "raid%d", mddev->new_level);
+ conf->thread = md_register_thread(raid5d, mddev, pers_name);
if (!conf->thread) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
- disk = rdev->saved_raid_disk;
- else
- disk = first;
- for ( ; disk <= last ; disk++) {
+ first = rdev->saved_raid_disk;
+
+ for (disk = first; disk <= last; disk++) {
p = conf->disks + disk;
if (p->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
- break;
+ goto out;
}
+ }
+ for (disk = first; disk <= last; disk++) {
+ p = conf->disks + disk;
if (test_bit(WantReplacement, &p->rdev->flags) &&
p->replacement == NULL) {
clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
+out:
print_raid5_conf(conf);
return err;
}
diff --git a/drivers/media/common/saa7146_fops.c b/drivers/media/common/saa7146_fops.c
index 7d42c11..0cdbd74 100644
--- a/drivers/media/common/saa7146_fops.c
+++ b/drivers/media/common/saa7146_fops.c
@@ -198,7 +198,6 @@ static int fops_open(struct file *file)
struct saa7146_dev *dev = video_drvdata(file);
struct saa7146_fh *fh = NULL;
int result = 0;
- enum v4l2_buf_type type;
DEB_EE("file:%p, dev:%s\n", file, video_device_node_name(vdev));
@@ -207,10 +206,6 @@ static int fops_open(struct file *file)
DEB_D("using: %p\n", dev);
- type = vdev->vfl_type == VFL_TYPE_GRABBER
- ? V4L2_BUF_TYPE_VIDEO_CAPTURE
- : V4L2_BUF_TYPE_VBI_CAPTURE;
-
/* check if an extension is registered */
if( NULL == dev->ext ) {
DEB_S("no extension registered for this device\n");
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 00a6732..39eab73 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
if (minor == MAX_DVB_MINORS) {
kfree(dvbdevfops);
kfree(dvbdev);
+ up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
return -EINVAL;
}
diff --git a/drivers/media/dvb/frontends/cx24110.c b/drivers/media/dvb/frontends/cx24110.c
index 98ecaf0..3180f5b 100644
--- a/drivers/media/dvb/frontends/cx24110.c
+++ b/drivers/media/dvb/frontends/cx24110.c
@@ -516,9 +516,9 @@ static int cx24110_read_ucblocks(struct dvb_frontend* fe, u32* ucblocks)
if(cx24110_readreg(state,0x10)&0x40) {
/* the RS error counter has finished one counting window */
cx24110_writereg(state,0x10,0x60); /* select the byer reg */
- cx24110_readreg(state, 0x12) |
+ (void)(cx24110_readreg(state, 0x12) |
(cx24110_readreg(state, 0x13) << 8) |
- (cx24110_readreg(state, 0x14) << 16);
+ (cx24110_readreg(state, 0x14) << 16));
cx24110_writereg(state,0x10,0x70); /* select the bler reg */
state->lastbler=cx24110_readreg(state,0x12)|
(cx24110_readreg(state,0x13)<<8)|
diff --git a/drivers/media/dvb/frontends/cxd2820r_c.c b/drivers/media/dvb/frontends/cxd2820r_c.c
index 9454049..ed3b0ba6 100644
--- a/drivers/media/dvb/frontends/cxd2820r_c.c
+++ b/drivers/media/dvb/frontends/cxd2820r_c.c
@@ -121,7 +121,7 @@ int cxd2820r_get_frontend_c(struct dvb_frontend *fe)
if (ret)
goto error;
- switch ((buf[0] >> 0) & 0x03) {
+ switch ((buf[0] >> 0) & 0x07) {
case 0:
c->modulation = QAM_16;
break;
diff --git a/drivers/media/dvb/frontends/lg2160.c b/drivers/media/dvb/frontends/lg2160.c
index a3ab1a5..cc11260 100644
--- a/drivers/media/dvb/frontends/lg2160.c
+++ b/drivers/media/dvb/frontends/lg2160.c
@@ -126,7 +126,7 @@ static int lg216x_write_regs(struct lg216x_state *state,
lg_reg("writing %d registers...\n", len);
- for (i = 0; i < len - 1; i++) {
+ for (i = 0; i < len; i++) {
ret = lg216x_write_reg(state, regs[i].reg, regs[i].val);
if (lg_fail(ret))
return ret;
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 63c004a..664e460 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -544,6 +544,8 @@ static const struct usb_device_id smsusb_id_table[] __devinitconst = {
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ USB_DEVICE(0x2040, 0xc0a0),
.driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
+ { USB_DEVICE(0x2040, 0xf5a0),
+ .driver_info = SMS1XXX_BOARD_HAUPPAUGE_WINDHAM },
{ } /* Terminating entry */
};
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index 740a3d5..b415211 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -157,7 +157,7 @@ static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_devi
goto err_out_free_region;
dev->io = pci_resource_start(pdev, 0);
- if (snd_tea575x_init(&dev->tea)) {
+ if (snd_tea575x_init(&dev->tea, THIS_MODULE)) {
printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n");
goto err_out_free_region;
}
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 52b8011..4efcbec 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -238,7 +238,7 @@ static int __devinit fmr2_probe(struct fmr2 *fmr2, struct device *pdev, int io)
snprintf(fmr2->tea.bus_info, sizeof(fmr2->tea.bus_info), "%s:%s",
fmr2->is_fmd2 ? "PnP" : "ISA", dev_name(pdev));
- if (snd_tea575x_init(&fmr2->tea)) {
+ if (snd_tea575x_init(&fmr2->tea, THIS_MODULE)) {
printk(KERN_ERR "radio-sf16fmr2: Unable to detect TEA575x tuner\n");
release_region(fmr2->io, 2);
return -ENODEV;
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index e9f6387..f412f7a 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -51,6 +51,8 @@ static struct usb_device_id si470x_usb_driver_id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1b80, 0xd700, USB_CLASS_HID, 0, 0) },
/* Sanei Electric, Inc. FM USB Radio (sold as DealExtreme.com PCear) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x10c5, 0x819a, USB_CLASS_HID, 0, 0) },
+ /* Axentia ALERT FM USB Receiver */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x12cf, 0x7111, USB_CLASS_HID, 0, 0) },
/* Terminating entry */
{ }
};
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 342c2c8..54ee348 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -232,7 +232,7 @@ MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
static bool txandrx; /* default = 0 */
module_param(txandrx, bool, 0444);
-MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
+MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
static unsigned int wake_sc = 0x800F040C;
module_param(wake_sc, uint, 0644);
@@ -1032,6 +1032,8 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->tx_ir = wbcir_tx;
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
+ data->dev->timeout = MS_TO_NS(100);
+ data->dev->allowed_protos = RC_TYPE_ALL;
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index ff2933a..856ab96 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -371,7 +371,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -384,7 +383,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -398,7 +396,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 4, 0, 2, 3 },
.gpiomute = 1,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -414,7 +411,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -427,7 +423,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0, 1, 0, 1 },
.gpiomute = 3,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -440,7 +435,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x0f,
.gpiomux = { 0x0c, 0x04, 0x08, 0x04 },
/* 0x04 for some cards ?? */
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= avermedia_tvphone_audio,
@@ -454,7 +448,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -469,7 +462,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0xc00, 0x800, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -482,7 +474,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 3,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 1, 2, 3 },
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
@@ -496,7 +487,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 0, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -510,7 +500,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20001,0x10001, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -524,7 +513,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 14, 11, 7 },
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -536,7 +524,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 14, 11, 7 },
- .needs_tvaudio = 1,
.msp34xx_alt = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -553,7 +540,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 2, 1, 3 }, /* old: {0, 1, 2, 3, 4} */
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -567,7 +553,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 1, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -583,7 +568,6 @@ struct tvcard bttv_tvcards[] = {
/* 2003-10-20 by "Anton A. Arapov" <arapov@mail.ru> */
.gpiomux = { 0x001e00, 0, 0x018000, 0x014000 },
.gpiomute = 0x002000,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -597,7 +581,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1, 0),
.gpiomux = { 0x4fa007,0xcfa007,0xcfa007,0xcfa007 },
.gpiomute = 0xcfa007,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
.volume_gpio = winview_volume,
@@ -611,7 +594,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 0, 0, 0 },
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -660,7 +642,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 0x800, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -691,7 +672,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = {0x400, 0x400, 0x400, 0x400 },
.gpiomute = 0xc00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -706,7 +686,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0 },
.gpiomute = 0x40000,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= terratv_audio,
@@ -720,7 +699,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 0, 1, 1),
.gpiomux = { 0, 1, 2, 3 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
},
@@ -748,7 +726,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0x00000 },
.gpiomute = 0x40000,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.audio_mode_gpio= terratv_audio,
@@ -793,7 +770,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.muxsel_hook = PXC200_muxsel,
@@ -834,7 +810,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -847,7 +822,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x500, 0, 0x300, 0x900 },
.gpiomute = 0x900,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -874,7 +848,6 @@ struct tvcard bttv_tvcards[] = {
Note: There exists another variant "Winfast 2000" with tv stereo !?
Note: eeprom only contains FF and pci subsystem id 107d:6606
*/
- .needs_tvaudio = 0,
.pll = PLL_28,
.has_radio = 1,
.tuner_type = TUNER_PHILIPS_PAL, /* default for now, gpio reads BFFF06 for Pal bg+dk */
@@ -934,7 +907,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0x551400, 0x551200, 0, 0 },
.gpiomute = 0x551c00,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -949,7 +921,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0xd0001, 0, 0 },
.gpiomute = 1,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -966,7 +937,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomux = { 4, 0, 2, 3 },
.gpiomute = 1,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -980,7 +950,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 15,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 13, 4, 11, 7 },
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -995,7 +964,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0, 0},
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
@@ -1066,7 +1034,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20000, 0x30000, 0x10000, 0 },
.gpiomute = 0x40000,
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_35,
.tuner_type = TUNER_PHILIPS_PAL_I,
@@ -1084,7 +1051,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = {2,0,0,0 },
.gpiomute = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1163,7 +1129,6 @@ struct tvcard bttv_tvcards[] = {
MUX2 (mask 0x30000):
0,2,3= from MSP34xx
1= FM stereo Radio from Tuner */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1179,7 +1144,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0x10, 8 },
.gpiomute = 4,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1218,7 +1182,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1250,7 +1213,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(3, 1),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_35,
.tuner_type = TUNER_ABSENT,
@@ -1266,7 +1228,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x400, 0x400, 0x400, 0x400 },
.gpiomute = 0x800,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_TEMIC_4036FY5_NTSC,
.tuner_addr = ADDR_UNSET,
@@ -1312,7 +1273,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2),
.gpiomux = { },
.no_msp34xx = 1,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -1329,7 +1289,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 1, 0, 4, 4 },
.gpiomute = 9,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1379,7 +1338,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomute = 0x1800,
.audio_mode_gpio= fv2000s_audio,
.no_msp34xx = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1393,7 +1351,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x500, 0x500, 0x300, 0x900 },
.gpiomute = 0x900,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -1477,7 +1434,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 11, 7 }, /* TV and Radio with same GPIO ! */
.gpiomute = 13,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_LG_PAL_I_FM,
.tuner_addr = ADDR_UNSET,
@@ -1514,7 +1470,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x01, 0x00, 0x03, 0x03 },
.gpiomute = 0x09,
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1540,7 +1495,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -1567,7 +1521,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 4,
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -1597,7 +1550,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -1619,7 +1571,6 @@ struct tvcard bttv_tvcards[] = {
* btwincap uses 0x80000/0x80003
*/
.gpiomute = 4,
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -1655,7 +1606,6 @@ struct tvcard bttv_tvcards[] = {
/* .audio_inputs= 1, */
.svhs = 2,
.muxsel = MUXSEL(2, 0, 1, 1),
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = UNSET,
.tuner_addr = ADDR_UNSET,
@@ -1875,7 +1825,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 3},
.gpiomute = 4,
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -1902,7 +1851,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -1920,7 +1868,6 @@ struct tvcard bttv_tvcards[] = {
/* Tuner, Radio, external, internal, off, on */
.gpiomux = { 0x08, 0x0f, 0x0a, 0x08 },
.gpiomute = 0x0f,
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_NTSC,
@@ -1936,7 +1883,6 @@ struct tvcard bttv_tvcards[] = {
.svhs = 2,
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 1),
- .needs_tvaudio = 1,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
@@ -2034,7 +1980,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -2049,7 +1994,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 0),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2062,7 +2006,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2079,7 +2022,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 0),
.muxsel_hook = phytec_muxsel,
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2094,7 +2036,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2, 3, 3, 3, 3, 1, 1),
.muxsel_hook = phytec_muxsel,
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2118,7 +2059,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
.svhs = NO_SVHS, /* card has no svhs */
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.no_tda7432 = 1,
.gpiomask = 0x00,
@@ -2168,7 +2108,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 3,
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 1, 1, 1, 1 },
- .needs_tvaudio = 1,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.pll = PLL_35,
@@ -2210,7 +2149,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 0),
.no_msp34xx = 1,
.no_tda7432 = 1,
- .needs_tvaudio = 0,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
},
@@ -2222,7 +2160,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
.svhs = 2,
- .needs_tvaudio = 0,
.gpiomask = 0x68,
.muxsel = MUXSEL(2, 3, 1),
.gpiomux = { 0x68, 0x68, 0x61, 0x61 },
@@ -2241,7 +2178,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 3,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -2265,7 +2201,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 2, 2, 2),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
.pll = PLL_28,
- .needs_tvaudio = 0,
.muxsel_hook = picolo_tetra_muxsel,/*Required as it doesn't follow the classic input selection policy*/
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2358,7 +2293,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL,
.tuner_addr = ADDR_UNSET,
@@ -2405,7 +2339,6 @@ struct tvcard bttv_tvcards[] = {
.tuner_addr = ADDR_UNSET,
.gpiomask = 0x008007,
.gpiomux = { 0, 0x000001,0,0 },
- .needs_tvaudio = 1,
.has_radio = 1,
},
[BTTV_BOARD_TIBET_CS16] = {
@@ -2518,7 +2451,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x001e00, 0, 0x018000, 0x014000 },
.gpiomute = 0x002000,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_YMEC_TVF66T5_B_DFF,
.tuner_addr = 0xc1 >>1,
@@ -2534,7 +2466,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 },
.gpiomute = 3,
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_TENA_9533_DI,
.tuner_addr = ADDR_UNSET,
@@ -2615,7 +2546,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 2, 0, 0, 0 },
.gpiomute = 1,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_NTSC,
.tuner_addr = ADDR_UNSET,
@@ -2714,7 +2644,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0x20001,0x10001, 0, 0 },
.gpiomute = 10,
- .needs_tvaudio = 1,
.pll = PLL_28,
.tuner_type = TUNER_PHILIPS_PAL_I,
.tuner_addr = ADDR_UNSET,
@@ -2746,7 +2675,6 @@ struct tvcard bttv_tvcards[] = {
.muxsel = MUXSEL(2, 3, 1, 1),
.gpiomux = { 0, 1, 2, 2 }, /* CONTVFMi */
.gpiomute = 3, /* CONTVFMi */
- .needs_tvaudio = 0,
.tuner_type = TUNER_PHILIPS_FM1216ME_MK3, /* TCL MK3 */
.tuner_addr = ADDR_UNSET,
.pll = PLL_28,
@@ -2785,7 +2713,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(0, 2, 3, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2799,7 +2726,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(2, 3, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2813,7 +2739,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0x00,
.muxsel = MUXSEL(3, 2, 1),
.gpiomux = { 0, 0, 0, 0 }, /* card has no audio */
- .needs_tvaudio = 0,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
.tuner_addr = ADDR_UNSET,
@@ -2877,7 +2802,6 @@ struct tvcard bttv_tvcards[] = {
.gpiomask = 0,
.muxsel = MUXSEL(2, 3),
.gpiomux = { 0 },
- .needs_tvaudio = 0,
.no_msp34xx = 1,
.pll = PLL_28,
.tuner_type = TUNER_ABSENT,
@@ -3649,7 +3573,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
struct tuner_setup tun_setup;
/* Load tuner module before issuing tuner config call! */
- if (bttv_tvcards[btv->c.type].has_radio)
+ if (btv->has_radio)
v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
&btv->c.i2c_adap, "tuner",
0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
@@ -3664,7 +3588,7 @@ void __devinit bttv_init_tuner(struct bttv *btv)
tun_setup.type = btv->tuner_type;
tun_setup.addr = addr;
- if (bttv_tvcards[btv->c.type].has_radio)
+ if (btv->has_radio)
tun_setup.mode_mask |= T_RADIO;
bttv_call_all(btv, tuner, s_type_addr, &tun_setup);
@@ -3724,6 +3648,10 @@ static void __devinit hauppauge_eeprom(struct bttv *btv)
bttv_tvcards[BTTV_BOARD_HAUPPAUGE_IMPACTVCB].name);
btv->c.type = BTTV_BOARD_HAUPPAUGE_IMPACTVCB;
}
+
+ /* The 61334 needs the msp3410 to do the radio demod to get sound */
+ if (tv.model == 61334)
+ btv->radio_uses_msp_demodulator = 1;
}
static int terratec_active_radio_upgrade(struct bttv *btv)
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index a9cfb0f..ff7a589 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -1218,6 +1218,11 @@ audio_mux(struct bttv *btv, int input, int mute)
For now this is sufficient. */
switch (input) {
case TVAUDIO_INPUT_RADIO:
+ /* Some boards need the msp do to the radio demod */
+ if (btv->radio_uses_msp_demodulator) {
+ in = MSP_INPUT_DEFAULT;
+ break;
+ }
in = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
break;
diff --git a/drivers/media/video/bt8xx/bttv.h b/drivers/media/video/bt8xx/bttv.h
index c517161..acfe2f3 100644
--- a/drivers/media/video/bt8xx/bttv.h
+++ b/drivers/media/video/bt8xx/bttv.h
@@ -236,7 +236,6 @@ struct tvcard {
/* i2c audio flags */
unsigned int no_msp34xx:1;
unsigned int no_tda7432:1;
- unsigned int needs_tvaudio:1;
unsigned int msp34xx_alt:1;
/* Note: currently no card definition needs to mark the presence
of a RDS saa6588 chip. If this is ever needed, then add a new
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h
index db943a8d..70fd4f23 100644
--- a/drivers/media/video/bt8xx/bttvp.h
+++ b/drivers/media/video/bt8xx/bttvp.h
@@ -440,6 +440,7 @@ struct bttv {
/* radio data/state */
int has_radio;
int radio_user;
+ int radio_uses_msp_demodulator;
/* miro/pinnacle + Aimslab VHX
philips matchbox (tea5757 radio tuner) support */
diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c
index 2520219..5b75a64 100644
--- a/drivers/media/video/bw-qcam.c
+++ b/drivers/media/video/bw-qcam.c
@@ -607,8 +607,9 @@ static long qc_capture(struct qcam *q, char __user *buf, unsigned long len)
}
o = i * pixels_per_line + pixels_read + k;
if (o < len) {
+ u8 ch = invert - buffer[k];
got++;
- put_user((invert - buffer[k]) << shift, buf + o);
+ put_user(ch << shift, buf + o);
}
}
pixels_read += bytes;
@@ -648,8 +649,8 @@ static int qcam_querycap(struct file *file, void *priv,
struct qcam *qcam = video_drvdata(file);
strlcpy(vcap->driver, qcam->v4l2_dev.name, sizeof(vcap->driver));
- strlcpy(vcap->card, "B&W Quickcam", sizeof(vcap->card));
- strlcpy(vcap->bus_info, "parport", sizeof(vcap->bus_info));
+ strlcpy(vcap->card, "Connectix B&W Quickcam", sizeof(vcap->card));
+ strlcpy(vcap->bus_info, qcam->pport->name, sizeof(vcap->bus_info));
vcap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE;
vcap->capabilities = vcap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
@@ -688,8 +689,8 @@ static int qcam_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
pix->height = qcam->height / qcam->transfer_scale;
pix->pixelformat = (qcam->bpp == 4) ? V4L2_PIX_FMT_Y4 : V4L2_PIX_FMT_Y6;
pix->field = V4L2_FIELD_NONE;
- pix->bytesperline = qcam->width;
- pix->sizeimage = qcam->width * qcam->height;
+ pix->bytesperline = pix->width;
+ pix->sizeimage = pix->width * pix->height;
/* Just a guess */
pix->colorspace = V4L2_COLORSPACE_SRGB;
return 0;
@@ -757,7 +758,7 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
"4-Bit Monochrome", V4L2_PIX_FMT_Y4,
{ 0, 0, 0, 0 }
},
- { 0, 0, 0,
+ { 1, 0, 0,
"6-Bit Monochrome", V4L2_PIX_FMT_Y6,
{ 0, 0, 0, 0 }
},
@@ -772,6 +773,25 @@ static int qcam_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
return 0;
}
+static int qcam_enum_framesizes(struct file *file, void *fh,
+ struct v4l2_frmsizeenum *fsize)
+{
+ static const struct v4l2_frmsize_discrete sizes[] = {
+ { 80, 60 },
+ { 160, 120 },
+ { 320, 240 },
+ };
+
+ if (fsize->index > 2)
+ return -EINVAL;
+ if (fsize->pixel_format != V4L2_PIX_FMT_Y4 &&
+ fsize->pixel_format != V4L2_PIX_FMT_Y6)
+ return -EINVAL;
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete = sizes[fsize->index];
+ return 0;
+}
+
static ssize_t qcam_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -795,6 +815,11 @@ static ssize_t qcam_read(struct file *file, char __user *buf,
return len;
}
+static unsigned int qcam_poll(struct file *filp, poll_table *wait)
+{
+ return v4l2_ctrl_poll(filp, wait) | POLLIN | POLLRDNORM;
+}
+
static int qcam_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct qcam *qcam =
@@ -828,7 +853,7 @@ static const struct v4l2_file_operations qcam_fops = {
.owner = THIS_MODULE,
.open = v4l2_fh_open,
.release = v4l2_fh_release,
- .poll = v4l2_ctrl_poll,
+ .poll = qcam_poll,
.unlocked_ioctl = video_ioctl2,
.read = qcam_read,
};
@@ -839,6 +864,7 @@ static const struct v4l2_ioctl_ops qcam_ioctl_ops = {
.vidioc_s_input = qcam_s_input,
.vidioc_enum_input = qcam_enum_input,
.vidioc_enum_fmt_vid_cap = qcam_enum_fmt_vid_cap,
+ .vidioc_enum_framesizes = qcam_enum_framesizes,
.vidioc_g_fmt_vid_cap = qcam_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = qcam_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = qcam_try_fmt_vid_cap,
@@ -864,9 +890,9 @@ static struct qcam *qcam_init(struct parport *port)
return NULL;
v4l2_dev = &qcam->v4l2_dev;
- strlcpy(v4l2_dev->name, "bw-qcam", sizeof(v4l2_dev->name));
+ snprintf(v4l2_dev->name, sizeof(v4l2_dev->name), "bw-qcam%d", num_cams);
- if (v4l2_device_register(NULL, v4l2_dev) < 0) {
+ if (v4l2_device_register(port->dev, v4l2_dev) < 0) {
v4l2_err(v4l2_dev, "Could not register v4l2_device\n");
kfree(qcam);
return NULL;
@@ -886,7 +912,7 @@ static struct qcam *qcam_init(struct parport *port)
return NULL;
}
qcam->pport = port;
- qcam->pdev = parport_register_device(port, "bw-qcam", NULL, NULL,
+ qcam->pdev = parport_register_device(port, v4l2_dev->name, NULL, NULL,
NULL, 0, NULL);
if (qcam->pdev == NULL) {
v4l2_err(v4l2_dev, "couldn't register for %s.\n", port->name);
@@ -975,6 +1001,7 @@ static int init_bwqcam(struct parport *port)
return -ENODEV;
}
qc_calibrate(qcam);
+ v4l2_ctrl_handler_setup(&qcam->hdl);
parport_release(qcam->pdev);
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index b55d57c..7e5ffd6 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -838,10 +838,10 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
}
CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%lx\n",
+ "irq: %d, latency: %d, memory: 0x%llx\n",
cx->pci_dev->device, cx->card_rev, pci_dev->bus->number,
PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
- cx->pci_dev->irq, pci_latency, (unsigned long)cx->base_addr);
+ cx->pci_dev->irq, pci_latency, (u64)cx->base_addr);
return 0;
}
@@ -938,7 +938,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
if (retval)
goto err;
- CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
+ CX18_DEBUG_INFO("base addr: 0x%llx\n", (u64)cx->base_addr);
/* PCI Device Setup */
retval = cx18_setup_pci(cx, pci_dev, pci_id);
@@ -946,8 +946,8 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
goto free_workqueues;
/* map io memory */
- CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
+ CX18_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
CX18_MEM_SIZE);
if (!cx->enc_mem) {
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 7a37e0e..2767c64 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -622,7 +622,7 @@ struct cx18 {
unique ID. Starts at 1, so 0 can be used as
uninitialized value in the stream->id. */
- u32 base_addr;
+ resource_size_t base_addr;
u8 card_rev;
void __iomem *enc_mem, *reg_mem;
diff --git a/drivers/media/video/cx18/cx18-firmware.c b/drivers/media/video/cx18/cx18-firmware.c
index 1b3fb50..b85c292 100644
--- a/drivers/media/video/cx18/cx18-firmware.c
+++ b/drivers/media/video/cx18/cx18-firmware.c
@@ -164,8 +164,13 @@ static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx,
apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
while (offset + sizeof(seghdr) < fw->size) {
- /* TODO: byteswapping */
- memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
+ const u32 *shptr = src + offset / 4;
+
+ seghdr.sync1 = le32_to_cpu(shptr[0]);
+ seghdr.sync2 = le32_to_cpu(shptr[1]);
+ seghdr.addr = le32_to_cpu(shptr[2]);
+ seghdr.size = le32_to_cpu(shptr[3]);
+
offset += sizeof(seghdr);
if (seghdr.sync1 != APU_ROM_SYNC1 ||
seghdr.sync2 != APU_ROM_SYNC2) {
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
index ed81183..eabf00c 100644
--- a/drivers/media/video/cx18/cx18-mailbox.c
+++ b/drivers/media/video/cx18/cx18-mailbox.c
@@ -434,6 +434,7 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
{
u32 handle, mdl_ack_offset, mdl_ack_count;
struct cx18_mailbox *mb;
+ int i;
mb = &order->mb;
handle = mb->args[0];
@@ -447,8 +448,9 @@ static int epu_dma_done_irq(struct cx18 *cx, struct cx18_in_work_order *order)
return -1;
}
- cx18_memcpy_fromio(cx, order->mdl_ack, cx->enc_mem + mdl_ack_offset,
- sizeof(struct cx18_mdl_ack) * mdl_ack_count);
+ for (i = 0; i < sizeof(struct cx18_mdl_ack) * mdl_ack_count; i += sizeof(u32))
+ ((u32 *)order->mdl_ack)[i / sizeof(u32)] =
+ cx18_readl(cx, cx->enc_mem + mdl_ack_offset + i);
if ((order->flags & CX18_F_EWO_MB_STALE) == 0)
mb_ack_irq(cx, order);
@@ -538,6 +540,7 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
struct cx18_mailbox *order_mb;
struct cx18_in_work_order *order;
int submit;
+ int i;
switch (rpu) {
case CPU:
@@ -562,10 +565,12 @@ void cx18_api_epu_cmd_irq(struct cx18 *cx, int rpu)
order_mb = &order->mb;
/* mb->cmd and mb->args[0] through mb->args[2] */
- cx18_memcpy_fromio(cx, &order_mb->cmd, &mb->cmd, 4 * sizeof(u32));
+ for (i = 0; i < 4; i++)
+ (&order_mb->cmd)[i] = cx18_readl(cx, &mb->cmd + i);
+
/* mb->request and mb->ack. N.B. we want to read mb->ack last */
- cx18_memcpy_fromio(cx, &order_mb->request, &mb->request,
- 2 * sizeof(u32));
+ for (i = 0; i < 2; i++)
+ (&order_mb->request)[i] = cx18_readl(cx, &mb->request + i);
if (order_mb->request == order_mb->ack) {
CX18_DEBUG_WARN("Possibly falling behind: %s self-ack'ed our "
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index 068f78d..b4c99c7 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -307,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
urb->context = dev;
urb->pipe = usb_rcvisocpipe(dev->udev,
dev->adev.end_point_addr);
- urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->interval = 1;
urb->complete = cx231xx_audio_isocirq;
@@ -368,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
urb->context = dev;
urb->pipe = usb_rcvbulkpipe(dev->udev,
dev->adev.end_point_addr);
- urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = 0;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->complete = cx231xx_audio_bulkirq;
urb->transfer_buffer_length = sb_size;
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 3d15314..ac7db52 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -448,7 +448,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
return -ENOMEM;
}
dev->vbi_mode.bulk_ctl.urb[i] = urb;
- urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = 0;
dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
kzalloc(sb_size, GFP_KERNEL);
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 13739e0..080e111 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -127,22 +127,37 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR1250] = {
.name = "Hauppauge WinTV-HVR1250",
+ .porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .tuner_bus = 1,
+#endif
+ .force_bff = 1,
.input = {{
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
.type = CX23885_VMUX_TELEVISION,
- .vmux = 0,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1,
+ .amux = CX25840_AUDIO8,
.gpio0 = 0xff00,
}, {
- .type = CX23885_VMUX_DEBUG,
- .vmux = 0,
- .gpio0 = 0xff01,
- }, {
+#endif
.type = CX23885_VMUX_COMPOSITE1,
- .vmux = 1,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN6_CH1,
+ .amux = CX25840_AUDIO7,
.gpio0 = 0xff02,
}, {
.type = CX23885_VMUX_SVIDEO,
- .vmux = 2,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
.gpio0 = 0xff02,
} },
},
@@ -267,7 +282,55 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR1255] = {
.name = "Hauppauge WinTV-HVR1255",
+ .porta = CX23885_ANALOG_VIDEO,
+ .portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN6_CH1,
+ .amux = CX25840_AUDIO7,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ } },
+ },
+ [CX23885_BOARD_HAUPPAUGE_HVR1255_22111] = {
+ .name = "Hauppauge WinTV-HVR1255",
+ .porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ } },
},
[CX23885_BOARD_HAUPPAUGE_HVR1210] = {
.name = "Hauppauge WinTV-HVR1210",
@@ -624,7 +687,7 @@ struct cx23885_subid cx23885_subids[] = {
}, {
.subvendor = 0x0070,
.subdevice = 0x2259,
- .card = CX23885_BOARD_HAUPPAUGE_HVR1255,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1255_22111,
}, {
.subvendor = 0x0070,
.subdevice = 0x2291,
@@ -900,7 +963,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
struct cx23885_dev *dev = port->dev;
u32 bitmask = 0;
- if (command == XC2028_RESET_CLK)
+ if ((command == XC2028_RESET_CLK) || (command == XC2028_I2C_FLUSH))
return 0;
if (command != 0) {
@@ -1130,6 +1193,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
/* GPIO-6 I2C Gate which can isolate the demod from the bus */
@@ -1267,6 +1331,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* FIXME: Implement me */
break;
@@ -1424,6 +1489,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1511,6 +1577,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1526,10 +1593,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
*/
switch (dev->board) {
case CX23885_BOARD_TEVII_S470:
- case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Currently only enabled for the integrated IR controller */
if (!enable_885_ir)
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1539,6 +1606,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_MYGICA_X8506:
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index a80a92c..cd54268 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -712,6 +712,7 @@ static int dvb_register(struct cx23885_tsport *port)
}
break;
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
i2c_bus = &dev->i2c_bus[0];
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&hcw_s5h1411_config,
@@ -721,6 +722,11 @@ static int dvb_register(struct cx23885_tsport *port)
0x60, &dev->i2c_bus[1].i2c_adap,
&hauppauge_tda18271_config);
}
+
+ tda18271_attach(&dev->ts1.analog_fe,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_tda18271_config);
+
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index c654bdc..22f8e7f 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -505,6 +505,9 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
(dev->board == CX23885_BOARD_MPX885) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
@@ -1578,7 +1581,9 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
fe = vfe->dvb.frontend;
- if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)
+ if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111))
fe = &dev->ts1.analog_fe;
if (fe && fe->ops.tuner_ops.set_analog_params) {
@@ -1608,6 +1613,8 @@ int cx23885_set_frequency(struct file *file, void *priv,
int ret;
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
ret = cx23885_set_freq_via_ops(dev, f);
break;
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index d884784..13c37ec 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -90,6 +90,7 @@
#define CX23885_BOARD_MYGICA_X8507 33
#define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
#define CX23885_BOARD_TEVII_S471 35
+#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111 36
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
index 83c1aa6..f11f6f0 100644
--- a/drivers/media/video/cx25821/cx25821-core.c
+++ b/drivers/media/video/cx25821/cx25821-core.c
@@ -904,9 +904,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
list_add_tail(&dev->devlist, &cx25821_devlist);
mutex_unlock(&cx25821_devlist_mutex);
- strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
- strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
-
if (dev->pci->device != 0x8210) {
pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
__func__, dev->pci->device);
diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
index b9aa801..029f293 100644
--- a/drivers/media/video/cx25821/cx25821.h
+++ b/drivers/media/video/cx25821/cx25821.h
@@ -187,7 +187,7 @@ enum port {
};
struct cx25821_board {
- char *name;
+ const char *name;
enum port porta;
enum port portb;
enum port portc;
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index fc1ff69..d8eac3e 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -84,7 +84,7 @@ MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
/* ----------------------------------------------------------------------- */
-static void cx23885_std_setup(struct i2c_client *client);
+static void cx23888_std_setup(struct i2c_client *client);
int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
{
@@ -638,10 +638,13 @@ static void cx23885_initialize(struct i2c_client *client)
finish_wait(&state->fw_wait, &wait);
destroy_workqueue(q);
- /* Call the cx23885 specific std setup func, we no longer rely on
+ /* Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
*/
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
+ else
+ cx25840_std_setup(client);
/* (re)set input */
set_input(client, state->vid_input, state->aud_input);
@@ -1103,9 +1106,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write4(client, 0x410, 0xffff0dbf);
cx25840_write4(client, 0x414, 0x00137d03);
- cx25840_write4(client, 0x418, 0x01008080);
+
+ /* on the 887, 0x418 is HSCALE_CTRL, on the 888 it is
+ CHROMA_CTRL */
+ if (is_cx23888(state))
+ cx25840_write4(client, 0x418, 0x01008080);
+ else
+ cx25840_write4(client, 0x418, 0x01000000);
+
cx25840_write4(client, 0x41c, 0x00000000);
- cx25840_write4(client, 0x420, 0x001c3e0f);
+
+ /* on the 887, 0x420 is CHROMA_CTRL, on the 888 it is
+ CRUSH_CTRL */
+ if (is_cx23888(state))
+ cx25840_write4(client, 0x420, 0x001c3e0f);
+ else
+ cx25840_write4(client, 0x420, 0x001c8282);
+
cx25840_write4(client, 0x42c, 0x42600000);
cx25840_write4(client, 0x430, 0x0000039b);
cx25840_write4(client, 0x438, 0x00000000);
@@ -1233,7 +1250,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write4(client, 0x8d0, 0x1f063870);
}
- if (is_cx2388x(state)) {
+ if (is_cx23888(state)) {
/* HVR1850 */
/* AUD_IO_CTRL - I2S Input, Parallel1*/
/* - Channel 1 src - Parallel1 (Merlin out) */
@@ -1298,8 +1315,8 @@ static int set_v4lstd(struct i2c_client *client)
}
cx25840_and_or(client, 0x400, ~0xf, fmt);
cx25840_and_or(client, 0x403, ~0x3, pal_m);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
else
cx25840_std_setup(client);
if (!is_cx2583x(state))
@@ -1312,6 +1329,7 @@ static int set_v4lstd(struct i2c_client *client)
static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
+ struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
switch (ctrl->id) {
@@ -1324,12 +1342,20 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_SATURATION:
- cx25840_write(client, 0x420, ctrl->val << 1);
- cx25840_write(client, 0x421, ctrl->val << 1);
+ if (is_cx23888(state)) {
+ cx25840_write(client, 0x418, ctrl->val << 1);
+ cx25840_write(client, 0x419, ctrl->val << 1);
+ } else {
+ cx25840_write(client, 0x420, ctrl->val << 1);
+ cx25840_write(client, 0x421, ctrl->val << 1);
+ }
break;
case V4L2_CID_HUE:
- cx25840_write(client, 0x422, ctrl->val);
+ if (is_cx23888(state))
+ cx25840_write(client, 0x41a, ctrl->val);
+ else
+ cx25840_write(client, 0x422, ctrl->val);
break;
default:
@@ -1354,11 +1380,21 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
fmt->field = V4L2_FIELD_INTERLACED;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
- Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
- Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+ if (is_cx23888(state)) {
+ Vsrc = (cx25840_read(client, 0x42a) & 0x3f) << 4;
+ Vsrc |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
+ } else {
+ Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
+ Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+ }
- Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
- Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+ if (is_cx23888(state)) {
+ Hsrc = (cx25840_read(client, 0x426) & 0x3f) << 4;
+ Hsrc |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
+ } else {
+ Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
+ Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+ }
Vlines = fmt->height + (is_50Hz ? 4 : 7);
@@ -1782,8 +1818,8 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
return set_input(client, input, state->aud_input);
}
@@ -1794,8 +1830,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
return set_input(client, state->vid_input, input);
}
@@ -4939,7 +4975,7 @@ void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
}
}
-static void cx23885_std_setup(struct i2c_client *client)
+static void cx23888_std_setup(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
v4l2_std_id std = state->std;
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index e46446a..ed7b2aa 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -471,7 +471,7 @@ static int blackbird_load_firmware(struct cx8802_dev *dev)
dprintk(1,"Loading firmware ...\n");
dataptr = (u32*)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
- value = *dataptr;
+ value = le32_to_cpu(*dataptr);
checksum += ~value;
memory_write(dev->core, i, value);
dataptr++;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 20a7e24..862c657 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -974,6 +974,7 @@ struct em28xx_board em28xx_boards[] = {
[EM2884_BOARD_CINERGY_HTC_STICK] = {
.name = "Terratec Cinergy HTC Stick",
.has_dvb = 1,
+ .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS,
#if 0
.tuner_type = TUNER_PHILIPS_TDA8290,
.tuner_addr = 0x41,
@@ -2892,7 +2893,7 @@ static void request_module_async(struct work_struct *work)
if (dev->board.has_dvb)
request_module("em28xx-dvb");
- if (dev->board.has_ir_i2c && !disable_ir)
+ if (dev->board.ir_codes && !disable_ir)
request_module("em28xx-rc");
}
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index fce5f76..5e30c4f 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -527,6 +527,8 @@ static int em28xx_ir_init(struct em28xx *dev)
if (dev->board.ir_codes == NULL) {
/* No remote control support */
+ em28xx_warn("Remote control support is not available for "
+ "this card.\n");
return 0;
}
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index 137166d..31721ea 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1653,7 +1653,7 @@ static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type buf_type)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
- int ret;
+ int i, ret;
if (buf_type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1678,6 +1678,8 @@ static int vidioc_streamoff(struct file *file, void *priv,
wake_up_interruptible(&gspca_dev->wq);
/* empty the transfer queues */
+ for (i = 0; i < gspca_dev->nframes; i++)
+ gspca_dev->frame[i].v4l2_buf.flags &= ~BUF_ALL_FLAGS;
atomic_set(&gspca_dev->fr_q, 0);
atomic_set(&gspca_dev->fr_i, 0);
gspca_dev->fr_o = 0;
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index b5acb1e..80c81dd 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -96,7 +96,7 @@ static void setbrightness(struct gspca_dev *gspca_dev);
static void setcontrast(struct gspca_dev *gspca_dev);
static void setgain(struct gspca_dev *gspca_dev);
static void setexposure(struct gspca_dev *gspca_dev);
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val);
+static void setagc(struct gspca_dev *gspca_dev);
static void setawb(struct gspca_dev *gspca_dev);
static void setaec(struct gspca_dev *gspca_dev);
static void setsharpness(struct gspca_dev *gspca_dev);
@@ -189,7 +189,7 @@ static const struct ctrl sd_ctrls[] = {
.step = 1,
.default_value = 1,
},
- .set = sd_setagc
+ .set_control = setagc
},
[AWB] = {
{
@@ -851,6 +851,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
+ msleep(10);
data = ov534_reg_read(gspca_dev, OV534_REG_STATUS);
switch (data) {
@@ -1242,10 +1243,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->ctrls = sd->ctrls;
- /* the auto white balance control works only when auto gain is set */
- if (sd_ctrls[AGC].qctrl.default_value == 0)
- gspca_dev->ctrl_inac |= (1 << AWB);
-
cam->cam_mode = ov772x_mode;
cam->nmodes = ARRAY_SIZE(ov772x_mode);
@@ -1486,29 +1483,6 @@ scan_next:
} while (remaining_len > 0);
}
-static int sd_setagc(struct gspca_dev *gspca_dev, __s32 val)
-{
- struct sd *sd = (struct sd *) gspca_dev;
-
- sd->ctrls[AGC].val = val;
-
- /* the auto white balance control works only
- * when auto gain is set */
- if (val) {
- gspca_dev->ctrl_inac &= ~(1 << AWB);
- } else {
- gspca_dev->ctrl_inac |= (1 << AWB);
- if (sd->ctrls[AWB].val) {
- sd->ctrls[AWB].val = 0;
- if (gspca_dev->streaming)
- setawb(gspca_dev);
- }
- }
- if (gspca_dev->streaming)
- setagc(gspca_dev);
- return gspca_dev->usb_err;
-}
-
static int sd_querymenu(struct gspca_dev *gspca_dev,
struct v4l2_querymenu *menu)
{
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index b579730..1fd41f0 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1008,6 +1008,7 @@ static int sccb_check_status(struct gspca_dev *gspca_dev)
int i;
for (i = 0; i < 5; i++) {
+ msleep(10);
data = reg_r(gspca_dev, OV534_REG_STATUS);
switch (data) {
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 2cb7d95..115da16 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -418,7 +418,7 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
gspca_dev->vdev.ctrl_handler = hdl;
- v4l2_ctrl_handler_init(hdl, 4);
+ v4l2_ctrl_handler_init(hdl, 5);
sd->contrast = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_CONTRAST, 0, 15, 1, 7);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index ad09820..b9c6f17 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -1761,7 +1761,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_SATURATION, 0, 255, 1, 127);
sd->hue = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_HUE, -180, 180, 1, 0);
- v4l2_ctrl_cluster(4, &sd->brightness);
sd->gamma = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_GAMMA, 0, 255, 1, 0x10);
@@ -1770,7 +1769,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_BLUE_BALANCE, 0, 127, 1, 0x28);
sd->red = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_RED_BALANCE, 0, 127, 1, 0x28);
- v4l2_ctrl_cluster(2, &sd->blue);
if (sd->sensor != SENSOR_OV9655 && sd->sensor != SENSOR_SOI968 &&
sd->sensor != SENSOR_OV7670 && sd->sensor != SENSOR_MT9M001 &&
@@ -1779,7 +1777,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_HFLIP, 0, 1, 1, 0);
sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
- v4l2_ctrl_cluster(2, &sd->hflip);
}
if (sd->sensor != SENSOR_SOI968 && sd->sensor != SENSOR_MT9VPRB &&
@@ -1794,6 +1791,20 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
V4L2_CID_GAIN, 0, 28, 1, 0);
sd->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
+ }
+
+ sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+
+ v4l2_ctrl_cluster(4, &sd->brightness);
+ v4l2_ctrl_cluster(2, &sd->blue);
+ if (sd->hflip)
+ v4l2_ctrl_cluster(2, &sd->hflip);
+ if (sd->autogain) {
if (sd->sensor == SENSOR_SOI968)
/* this sensor doesn't have the exposure control and
autogain is clustered with gain instead. This works
@@ -1803,13 +1814,6 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
/* Otherwise autogain is clustered with exposure. */
v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
}
-
- sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
- V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
- if (hdl->error) {
- pr_err("Could not initialize controls\n");
- return hdl->error;
- }
return 0;
}
@@ -2066,10 +2070,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue),
v4l2_ctrl_g_ctrl(sd->red));
- set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
- set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
- set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
- v4l2_ctrl_g_ctrl(sd->vflip));
+ if (sd->gain)
+ set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+ if (sd->exposure)
+ set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+ if (sd->hflip)
+ set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+ v4l2_ctrl_g_ctrl(sd->vflip));
reg_w1(gspca_dev, 0x1007, 0x20);
reg_w1(gspca_dev, 0x1061, 0x03);
@@ -2172,7 +2179,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int avg_lum;
- if (!v4l2_ctrl_g_ctrl(sd->autogain))
+ if (sd->autogain == NULL || !v4l2_ctrl_g_ctrl(sd->autogain))
return;
avg_lum = atomic_read(&sd->avg_lum);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 4d1696d..f38faa9 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -3120,7 +3120,7 @@ static const struct sd_desc sd_desc = {
| (SENSOR_ ## sensor << 8) \
| (flags)
static const struct usb_device_id device_table[] = {
- {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)},
+ {USB_DEVICE(0x0458, 0x7025), BSF(SN9C120, MI0360B, F_PDN_INV)},
{USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)},
{USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)},
{USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)},
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 057929e..5462ce2 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -866,10 +866,10 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
pci_write_config_dword(pdev, 0x40, 0xffff);
IVTV_DEBUG_INFO("%d (rev %d) at %02x:%02x.%x, "
- "irq: %d, latency: %d, memory: 0x%lx\n",
+ "irq: %d, latency: %d, memory: 0x%llx\n",
pdev->device, pdev->revision, pdev->bus->number,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pdev->irq, pci_latency, (unsigned long)itv->base_addr);
+ pdev->irq, pci_latency, (u64)itv->base_addr);
return 0;
}
@@ -1007,7 +1007,7 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
itv->cxhdl.priv = itv;
itv->cxhdl.func = ivtv_api_func;
- IVTV_DEBUG_INFO("base addr: 0x%08x\n", itv->base_addr);
+ IVTV_DEBUG_INFO("base addr: 0x%llx\n", (u64)itv->base_addr);
/* PCI Device Setup */
retval = ivtv_setup_pci(itv, pdev, pci_id);
@@ -1017,8 +1017,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
goto free_mem;
/* map io memory */
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_ENCODER_OFFSET, IVTV_ENCODER_SIZE);
itv->enc_mem = ioremap_nocache(itv->base_addr + IVTV_ENCODER_OFFSET,
IVTV_ENCODER_SIZE);
if (!itv->enc_mem) {
@@ -1034,8 +1034,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
}
if (itv->has_cx23415) {
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
itv->dec_mem = ioremap_nocache(itv->base_addr + IVTV_DECODER_OFFSET,
IVTV_DECODER_SIZE);
if (!itv->dec_mem) {
@@ -1056,8 +1056,8 @@ static int __devinit ivtv_probe(struct pci_dev *pdev,
}
/* map registers memory */
- IVTV_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
- itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
+ IVTV_DEBUG_INFO("attempting ioremap at 0x%llx len 0x%08x\n",
+ (u64)itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
itv->reg_mem =
ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
if (!itv->reg_mem) {
diff --git a/drivers/media/video/ivtv/ivtv-driver.h b/drivers/media/video/ivtv/ivtv-driver.h
index 2e22002..a7e00f8 100644
--- a/drivers/media/video/ivtv/ivtv-driver.h
+++ b/drivers/media/video/ivtv/ivtv-driver.h
@@ -622,7 +622,7 @@ struct ivtv {
struct v4l2_subdev *sd_video; /* controlling video decoder subdev */
struct v4l2_subdev *sd_audio; /* controlling audio subdev */
struct v4l2_subdev *sd_muxer; /* controlling audio muxer subdev */
- u32 base_addr; /* PCI resource base address */
+ resource_size_t base_addr; /* PCI resource base address */
volatile void __iomem *enc_mem; /* pointer to mapped encoder memory */
volatile void __iomem *dec_mem; /* pointer to mapped decoder memory */
volatile void __iomem *reg_mem; /* pointer to mapped registers */
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index d2dec58..3945556 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -110,22 +110,6 @@ enum {
V4L2_M2M_DST = 1,
};
-/* Source and destination queue data */
-static struct m2mtest_q_data q_data[2];
-
-static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
-{
- switch (type) {
- case V4L2_BUF_TYPE_VIDEO_OUTPUT:
- return &q_data[V4L2_M2M_SRC];
- case V4L2_BUF_TYPE_VIDEO_CAPTURE:
- return &q_data[V4L2_M2M_DST];
- default:
- BUG();
- }
- return NULL;
-}
-
#define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE
#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1)
@@ -198,8 +182,26 @@ struct m2mtest_ctx {
int aborting;
struct v4l2_m2m_ctx *m2m_ctx;
+
+ /* Source and destination queue data */
+ struct m2mtest_q_data q_data[2];
};
+static struct m2mtest_q_data *get_q_data(struct m2mtest_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ switch (type) {
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ return &ctx->q_data[V4L2_M2M_SRC];
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ return &ctx->q_data[V4L2_M2M_DST];
+ default:
+ BUG();
+ }
+ return NULL;
+}
+
+
static struct v4l2_queryctrl *get_ctrl(int id)
{
int i;
@@ -223,7 +225,7 @@ static int device_process(struct m2mtest_ctx *ctx,
int tile_w, bytes_left;
int width, height, bytesperline;
- q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
width = q_data->width;
height = q_data->height;
@@ -436,7 +438,7 @@ static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
if (!vq)
return -EINVAL;
- q_data = get_q_data(f->type);
+ q_data = get_q_data(ctx, f->type);
f->fmt.pix.width = q_data->width;
f->fmt.pix.height = q_data->height;
@@ -535,7 +537,7 @@ static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
if (!vq)
return -EINVAL;
- q_data = get_q_data(f->type);
+ q_data = get_q_data(ctx, f->type);
if (!q_data)
return -EINVAL;
@@ -747,7 +749,7 @@ static int m2mtest_queue_setup(struct vb2_queue *vq,
struct m2mtest_q_data *q_data;
unsigned int size, count = *nbuffers;
- q_data = get_q_data(vq->type);
+ q_data = get_q_data(ctx, vq->type);
size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
@@ -775,7 +777,7 @@ static int m2mtest_buf_prepare(struct vb2_buffer *vb)
dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
- q_data = get_q_data(vb->vb2_queue->type);
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
@@ -860,6 +862,9 @@ static int m2mtest_open(struct file *file)
ctx->transtime = MEM2MEM_DEF_TRANSTIME;
ctx->num_processed = 0;
+ ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0];
+ ctx->q_data[V4L2_M2M_DST].fmt = &formats[0];
+
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
@@ -986,9 +991,6 @@ static int m2mtest_probe(struct platform_device *pdev)
goto err_m2m;
}
- q_data[V4L2_M2M_SRC].fmt = &formats[0];
- q_data[V4L2_M2M_DST].fmt = &formats[0];
-
return 0;
v4l2_m2m_release(dev->m2m_dev);
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 4296a83..d2e6f82 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -43,6 +43,7 @@
#include <asm/fiq.h>
#include <mach/dma-mx1-mx2.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <mach/mx1_camera.h>
/*
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index ded26b7..637bde8 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -83,6 +83,7 @@
#define CSICR1_INV_DATA (1 << 3)
#define CSICR1_INV_PCLK (1 << 2)
#define CSICR1_REDGE (1 << 1)
+#define CSICR1_FMT_MASK (CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
#define SHIFT_STATFF_LEVEL 22
#define SHIFT_RXFF_LEVEL 19
@@ -230,6 +231,7 @@ struct mx2_prp_cfg {
u32 src_pixel;
u32 ch1_pixel;
u32 irq_flags;
+ u32 csicr1;
};
/* prp resizing parameters */
@@ -330,6 +332,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
.ch1_pixel = 0x2ca00565, /* RGB565 */
.irq_flags = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+ .csicr1 = 0,
}
},
{
@@ -343,6 +346,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
.irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
PRP_INTR_CH2FC | PRP_INTR_LBOVF |
PRP_INTR_CH2OVF,
+ .csicr1 = CSICR1_PACK_DIR,
}
},
{
@@ -356,6 +360,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
.irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
PRP_INTR_CH2FC | PRP_INTR_LBOVF |
PRP_INTR_CH2OVF,
+ .csicr1 = CSICR1_SWAP16_EN,
}
},
};
@@ -984,7 +989,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct mx2_camera_dev *pcdev = ici->priv;
struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,};
- const struct soc_camera_format_xlate *xlate;
unsigned long common_flags;
int ret;
int bytesperline;
@@ -1029,24 +1033,7 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
return ret;
}
- xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
- if (!xlate) {
- dev_warn(icd->parent, "Format %x not found\n", pixfmt);
- return -EINVAL;
- }
-
- if (xlate->code == V4L2_MBUS_FMT_YUYV8_2X8) {
- csicr1 |= CSICR1_PACK_DIR;
- csicr1 &= ~CSICR1_SWAP16_EN;
- dev_dbg(icd->parent, "already yuyv format, don't convert\n");
- } else if (xlate->code == V4L2_MBUS_FMT_UYVY8_2X8) {
- csicr1 &= ~CSICR1_PACK_DIR;
- csicr1 |= CSICR1_SWAP16_EN;
- dev_dbg(icd->parent, "convert uyvy mbus format into yuyv\n");
- } else {
- dev_warn(icd->parent, "mbus format not supported\n");
- return -EINVAL;
- }
+ csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
@@ -1155,18 +1142,6 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
}
}
- if (code == V4L2_MBUS_FMT_UYVY8_2X8) {
- formats++;
- if (xlate) {
- xlate->host_fmt =
- soc_mbus_get_fmtdesc(V4L2_MBUS_FMT_YUYV8_2X8);
- xlate->code = code;
- dev_dbg(dev, "Providing host format %s for sensor code %d\n",
- xlate->host_fmt->name, code);
- xlate++;
- }
- }
-
/* Generic pass-trough */
formats++;
if (xlate) {
diff --git a/drivers/media/video/omap3isp/isppreview.c b/drivers/media/video/omap3isp/isppreview.c
index 8a4935e..dd91da2 100644
--- a/drivers/media/video/omap3isp/isppreview.c
+++ b/drivers/media/video/omap3isp/isppreview.c
@@ -888,12 +888,12 @@ static const struct preview_update update_attrs[] = {
preview_config_contrast,
NULL,
offsetof(struct prev_params, contrast),
- 0, true,
+ 0, 0, true,
}, /* OMAP3ISP_PREV_BRIGHTNESS */ {
preview_config_brightness,
NULL,
offsetof(struct prev_params, brightness),
- 0, true,
+ 0, 0, true,
},
};
@@ -1102,7 +1102,7 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
- if (format->code == V4L2_MBUS_FMT_Y10_1X10) {
+ if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
sph -= 2;
eph += 2;
slv -= 2;
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index af2d908..b4c679b 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -26,9 +26,11 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/isa.h>
#include <asm/io.h>
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 3545745..725812a 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -350,7 +350,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
if (pixm)
sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
else
- sizes[i] = size;
+ sizes[i] = max_t(u32, size, frame->payload[i]);
+
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
@@ -479,37 +480,39 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc);
static int fimc_capture_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
- int ret = v4l2_fh_open(file);
-
- if (ret)
- return ret;
+ int ret;
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
- /* Return if the corresponding video mem2mem node is already opened. */
if (fimc_m2m_active(fimc))
return -EBUSY;
set_bit(ST_CAPT_BUSY, &fimc->state);
- pm_runtime_get_sync(&fimc->pdev->dev);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ return ret;
- if (++fimc->vid_cap.refcnt == 1) {
- ret = fimc_pipeline_initialize(&fimc->pipeline,
- &fimc->vid_cap.vfd->entity, true);
- if (ret < 0) {
- dev_err(&fimc->pdev->dev,
- "Video pipeline initialization failed\n");
- pm_runtime_put_sync(&fimc->pdev->dev);
- fimc->vid_cap.refcnt--;
- v4l2_fh_release(file);
- clear_bit(ST_CAPT_BUSY, &fimc->state);
- return ret;
- }
- ret = fimc_capture_ctrls_create(fimc);
+ ret = v4l2_fh_open(file);
+ if (ret)
+ return ret;
- if (!ret && !fimc->vid_cap.user_subdev_api)
- ret = fimc_capture_set_default_format(fimc);
+ if (++fimc->vid_cap.refcnt != 1)
+ return 0;
+
+ ret = fimc_pipeline_initialize(&fimc->pipeline,
+ &fimc->vid_cap.vfd->entity, true);
+ if (ret < 0) {
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ fimc->vid_cap.refcnt--;
+ v4l2_fh_release(file);
+ return ret;
}
+ ret = fimc_capture_ctrls_create(fimc);
+
+ if (!ret && !fimc->vid_cap.user_subdev_api)
+ ret = fimc_capture_set_default_format(fimc);
+
return ret;
}
@@ -818,9 +821,6 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
-
return fimc_fill_format(&ctx->d_frame, f);
}
@@ -833,9 +833,6 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_mbus_framefmt mf;
struct fimc_fmt *ffmt = NULL;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
-
if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
fimc_capture_try_format(ctx, &pix->width, &pix->height,
NULL, &pix->pixelformat,
@@ -887,8 +884,6 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
struct fimc_fmt *s_fmt = NULL;
int ret, i;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
if (vb2_is_busy(&fimc->vid_cap.vbq))
return -EBUSY;
@@ -924,10 +919,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
pix->width = mf->width;
pix->height = mf->height;
}
+
fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
for (i = 0; i < ff->fmt->colplanes; i++)
- ff->payload[i] =
- (pix->width * pix->height * ff->fmt->depth[i]) / 8;
+ ff->payload[i] = pix->plane_fmt[i].sizeimage;
set_frame_bounds(ff, pix->width, pix->height);
/* Reset the composition rectangle if not yet configured */
@@ -1045,18 +1040,22 @@ static int fimc_cap_streamon(struct file *file, void *priv,
{
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_pipeline *p = &fimc->pipeline;
+ struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
int ret;
if (fimc_capture_active(fimc))
return -EBUSY;
- media_entity_pipeline_start(&p->subdevs[IDX_SENSOR]->entity,
- p->m_pipeline);
+ ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline);
+ if (ret < 0)
+ return ret;
if (fimc->vid_cap.user_subdev_api) {
ret = fimc_pipeline_validate(fimc);
- if (ret)
+ if (ret < 0) {
+ media_entity_pipeline_stop(&sd->entity);
return ret;
+ }
}
return vb2_streamon(&fimc->vid_cap.vbq, type);
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index fedcd56..a4646ca 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -153,7 +153,7 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12M,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
@@ -161,7 +161,7 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV420M,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 2, 2 },
@@ -169,7 +169,7 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+ .name = "YUV 4:2:0 non-contig. 2p, tiled",
.fourcc = V4L2_PIX_FMT_NV12MT,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
@@ -615,7 +615,7 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
ctx->effect.type = FIMC_REG_CIIMGEFF_FIN_BYPASS;
if (!handler->error) {
- v4l2_ctrl_cluster(3, &ctrls->colorfx);
+ v4l2_ctrl_cluster(2, &ctrls->colorfx);
ctrls->ready = true;
}
@@ -641,7 +641,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
if (!ctrls->ready)
return;
- mutex_lock(&ctrls->handler.lock);
+ mutex_lock(ctrls->handler.lock);
v4l2_ctrl_activate(ctrls->rotate, active);
v4l2_ctrl_activate(ctrls->hflip, active);
v4l2_ctrl_activate(ctrls->vflip, active);
@@ -660,7 +660,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
ctx->hflip = 0;
ctx->vflip = 0;
}
- mutex_unlock(&ctrls->handler.lock);
+ mutex_unlock(ctrls->handler.lock);
}
/* Update maximum value of the alpha color control */
@@ -741,8 +741,8 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
pix->width = width;
for (i = 0; i < pix->num_planes; ++i) {
- u32 bpl = pix->plane_fmt[i].bytesperline;
- u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
+ struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+ u32 bpl = plane_fmt->bytesperline;
if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
bpl = pix->width; /* Planar */
@@ -754,8 +754,9 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
if (i == 0) /* Same bytesperline for each plane. */
bytesperline = bpl;
- pix->plane_fmt[i].bytesperline = bytesperline;
- *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
+ plane_fmt->bytesperline = bytesperline;
+ plane_fmt->sizeimage = max((pix->width * pix->height *
+ fmt->depth[i]) / 8, plane_fmt->sizeimage);
}
}
diff --git a/drivers/media/video/s5p-fimc/fimc-lite.c b/drivers/media/video/s5p-fimc/fimc-lite.c
index 400d701a..74ff310 100644
--- a/drivers/media/video/s5p-fimc/fimc-lite.c
+++ b/drivers/media/video/s5p-fimc/fimc-lite.c
@@ -451,34 +451,44 @@ static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
static int fimc_lite_open(struct file *file)
{
struct fimc_lite *fimc = video_drvdata(file);
- int ret = v4l2_fh_open(file);
+ int ret;
- if (ret)
- return ret;
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
set_bit(ST_FLITE_IN_USE, &fimc->state);
- pm_runtime_get_sync(&fimc->pdev->dev);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ goto done;
- if (++fimc->ref_count != 1 || fimc->out_path != FIMC_IO_DMA)
- return ret;
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto done;
- ret = fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity,
- true);
- if (ret < 0) {
- v4l2_err(fimc->vfd, "Video pipeline initialization failed\n");
- pm_runtime_put_sync(&fimc->pdev->dev);
- fimc->ref_count--;
- v4l2_fh_release(file);
- clear_bit(ST_FLITE_IN_USE, &fimc->state);
- }
+ if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+ ret = fimc_pipeline_initialize(&fimc->pipeline,
+ &fimc->vfd->entity, true);
+ if (ret < 0) {
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ fimc->ref_count--;
+ v4l2_fh_release(file);
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+ }
- fimc_lite_clear_event_counters(fimc);
+ fimc_lite_clear_event_counters(fimc);
+ }
+done:
+ mutex_unlock(&fimc->lock);
return ret;
}
static int fimc_lite_close(struct file *file)
{
struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
clear_bit(ST_FLITE_IN_USE, &fimc->state);
@@ -492,20 +502,39 @@ static int fimc_lite_close(struct file *file)
if (fimc->ref_count == 0)
vb2_queue_release(&fimc->vb_queue);
- return v4l2_fh_release(file);
+ ret = v4l2_fh_release(file);
+
+ mutex_unlock(&fimc->lock);
+ return ret;
}
static unsigned int fimc_lite_poll(struct file *file,
struct poll_table_struct *wait)
{
struct fimc_lite *fimc = video_drvdata(file);
- return vb2_poll(&fimc->vb_queue, file, wait);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return POLL_ERR;
+
+ ret = vb2_poll(&fimc->vb_queue, file, wait);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
}
static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fimc_lite *fimc = video_drvdata(file);
- return vb2_mmap(&fimc->vb_queue, vma);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = vb2_mmap(&fimc->vb_queue, vma);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
}
static const struct v4l2_file_operations fimc_lite_fops = {
@@ -762,7 +791,9 @@ static int fimc_lite_streamon(struct file *file, void *priv,
if (fimc_lite_active(fimc))
return -EBUSY;
- media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+ ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+ if (ret < 0)
+ return ret;
ret = fimc_pipeline_validate(fimc);
if (ret) {
@@ -1508,7 +1539,7 @@ static int fimc_lite_suspend(struct device *dev)
return 0;
ret = fimc_lite_stop_capture(fimc, suspend);
- if (ret)
+ if (ret < 0 || !fimc_lite_active(fimc))
return ret;
return fimc_pipeline_shutdown(&fimc->pipeline);
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 6753c45..52cef48 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -193,9 +193,13 @@ int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
int fimc_pipeline_shutdown(struct fimc_pipeline *p)
{
- struct media_entity *me = &p->subdevs[IDX_SENSOR]->entity;
+ struct media_entity *me;
int ret;
+ if (!p || !p->subdevs[IDX_SENSOR])
+ return -EINVAL;
+
+ me = &p->subdevs[IDX_SENSOR]->entity;
mutex_lock(&me->parent->graph_mutex);
ret = __fimc_pipeline_shutdown(p);
mutex_unlock(&me->parent->graph_mutex);
@@ -498,12 +502,12 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
* @source: the source entity to create links to all fimc entities from
* @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
* @pad: the source entity pad index
- * @fimc_id: index of the fimc device for which link should be enabled
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
*/
static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
struct media_entity *source,
struct v4l2_subdev *sensor,
- int pad, int fimc_id)
+ int pad, int link_mask)
{
struct fimc_sensor_info *s_info;
struct media_entity *sink;
@@ -520,7 +524,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (!fmd->fimc[i]->variant->has_cam_if)
continue;
- flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+ flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
sink = &fmd->fimc[i]->vid_cap.subdev.entity;
ret = media_entity_create_link(source, pad, sink,
@@ -552,7 +556,10 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (!fmd->fimc_lite[i])
continue;
- flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+ if (link_mask & (1 << (i + FIMC_MAX_DEVS)))
+ flags = MEDIA_LNK_FL_ENABLED;
+ else
+ flags = 0;
sink = &fmd->fimc_lite[i]->subdev.entity;
ret = media_entity_create_link(source, pad, sink,
@@ -614,9 +621,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
struct s5p_fimc_isp_info *pdata;
struct fimc_sensor_info *s_info;
struct media_entity *source, *sink;
- int i, pad, fimc_id = 0;
- int ret = 0;
- u32 flags;
+ int i, pad, fimc_id = 0, ret = 0;
+ u32 flags, link_mask = 0;
for (i = 0; i < fmd->num_sensors; i++) {
if (fmd->sensor[i].subdev == NULL)
@@ -668,19 +674,20 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (source == NULL)
continue;
+ link_mask = 1 << fimc_id++;
ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
- pad, fimc_id++);
+ pad, link_mask);
}
- fimc_id = 0;
for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
if (fmd->csis[i].sd == NULL)
continue;
source = &fmd->csis[i].sd->entity;
pad = CSIS_PAD_SOURCE;
+ link_mask = 1 << fimc_id++;
ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
- pad, fimc_id++);
+ pad, link_mask);
}
/* Create immutable links between each FIMC's subdev and video node */
@@ -734,8 +741,8 @@ static void fimc_md_put_clocks(struct fimc_md *fmd)
}
static int __fimc_md_set_camclk(struct fimc_md *fmd,
- struct fimc_sensor_info *s_info,
- bool on)
+ struct fimc_sensor_info *s_info,
+ bool on)
{
struct s5p_fimc_isp_info *pdata = s_info->pdata;
struct fimc_camclk_info *camclk;
@@ -744,12 +751,10 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
return -EINVAL;
- if (s_info->clk_on == on)
- return 0;
camclk = &fmd->camclk[pdata->clk_id];
- dbg("camclk %d, f: %lu, clk: %p, on: %d",
- pdata->clk_id, pdata->clk_frequency, camclk, on);
+ dbg("camclk %d, f: %lu, use_count: %d, on: %d",
+ pdata->clk_id, pdata->clk_frequency, camclk->use_count, on);
if (on) {
if (camclk->use_count > 0 &&
@@ -760,11 +765,9 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
clk_set_rate(camclk->clock, pdata->clk_frequency);
camclk->frequency = pdata->clk_frequency;
ret = clk_enable(camclk->clock);
+ dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+ clk_get_rate(camclk->clock));
}
- s_info->clk_on = 1;
- dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
- clk_get_rate(camclk->clock));
-
return ret;
}
@@ -773,7 +776,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
if (--camclk->use_count == 0) {
clk_disable(camclk->clock);
- s_info->clk_on = 0;
dbg("Disabled camclk %d", pdata->clk_id);
}
return ret;
@@ -789,8 +791,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
* devices to which sensors can be attached, either directly or through
* the MIPI CSI receiver. The clock is allowed here to be used by
* multiple sensors concurrently if they use same frequency.
- * The per sensor subdev clk_on attribute helps to synchronize accesses
- * to the sclk_cam clocks from the video and media device nodes.
* This function should only be called when the graph mutex is held.
*/
int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.h b/drivers/media/video/s5p-fimc/fimc-mdevice.h
index 3b8a349..1f5dbaf 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.h
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.h
@@ -47,7 +47,6 @@ struct fimc_camclk_info {
* @pdata: sensor's atrributes passed as media device's platform data
* @subdev: image sensor v4l2 subdev
* @host: fimc device the sensor is currently linked to
- * @clk_on: sclk_cam clock's state associated with this subdev
*
* This data structure applies to image sensor and the writeback subdevs.
*/
@@ -55,7 +54,6 @@ struct fimc_sensor_info {
struct s5p_fimc_isp_info *pdata;
struct v4l2_subdev *subdev;
struct fimc_dev *host;
- bool clk_on;
};
/**
diff --git a/drivers/media/video/s5p-mfc/regs-mfc.h b/drivers/media/video/s5p-mfc/regs-mfc.h
index 053a8a8..a19bece 100644
--- a/drivers/media/video/s5p-mfc/regs-mfc.h
+++ b/drivers/media/video/s5p-mfc/regs-mfc.h
@@ -164,10 +164,15 @@
decoded pic */
#define S5P_FIMV_SI_DISPLAY_Y_ADR 0x2010 /* luma addr of displayed pic */
#define S5P_FIMV_SI_DISPLAY_C_ADR 0x2014 /* chroma addrof displayed pic */
+
#define S5P_FIMV_SI_CONSUMED_BYTES 0x2018 /* Consumed number of bytes to
decode a frame */
#define S5P_FIMV_SI_DISPLAY_STATUS 0x201c /* status of decoded picture */
+#define S5P_FIMV_SI_DECODE_Y_ADR 0x2024 /* luma addr of decoded pic */
+#define S5P_FIMV_SI_DECODE_C_ADR 0x2028 /* chroma addrof decoded pic */
+#define S5P_FIMV_SI_DECODE_STATUS 0x202c /* status of decoded picture */
+
#define S5P_FIMV_SI_CH0_SB_ST_ADR 0x2044 /* start addr of stream buf */
#define S5P_FIMV_SI_CH0_SB_FRM_SIZE 0x2048 /* size of stream buf */
#define S5P_FIMV_SI_CH0_DESC_ADR 0x204c /* addr of descriptor buf */
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index c25ec02..feea867 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -627,13 +627,13 @@ static int s5p_mfc_dec_s_ctrl(struct v4l2_ctrl *ctrl)
switch (ctrl->id) {
case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY:
- ctx->loop_filter_mpeg4 = ctrl->val;
+ ctx->display_delay = ctrl->val;
break;
case V4L2_CID_MPEG_MFC51_VIDEO_DECODER_H264_DISPLAY_DELAY_ENABLE:
ctx->display_delay_enable = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:
- ctx->display_delay = ctrl->val;
+ ctx->loop_filter_mpeg4 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:
ctx->slice_interface = ctrl->val;
@@ -996,6 +996,7 @@ int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
for (i = 0; i < NUM_CTRLS; i++) {
if (IS_MFC51_PRIV(controls[i].id)) {
+ memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
cfg.ops = &s5p_mfc_dec_ctrl_ops;
cfg.id = controls[i].id;
cfg.min = controls[i].minimum;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index acedb20..158b789 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -243,12 +243,6 @@ static struct mfc_control controls[] = {
.minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
.maximum = V4L2_MPEG_VIDEO_H264_LEVEL_4_0,
.default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0,
- .menu_skip_mask = ~(
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_1) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_4_2) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_0) |
- (1 << V4L2_MPEG_VIDEO_H264_LEVEL_5_1)
- ),
},
{
.id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL,
@@ -494,7 +488,7 @@ static struct mfc_control controls[] = {
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
.maximum = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_EXTENDED,
- .default_value = 0,
+ .default_value = V4L2_MPEG_VIDEO_H264_VUI_SAR_IDC_UNSPECIFIED,
.menu_skip_mask = 0,
},
{
@@ -534,7 +528,7 @@ static struct mfc_control controls[] = {
.type = V4L2_CTRL_TYPE_MENU,
.minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
.maximum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE,
- .default_value = 0,
+ .default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE,
.menu_skip_mask = 0,
},
{
@@ -907,6 +901,8 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mfc_err("failed to try output format\n");
return -EINVAL;
}
+ v4l_bound_align_image(&pix_fmt_mp->width, 8, 1920, 1,
+ &pix_fmt_mp->height, 4, 1080, 1, 0);
} else {
mfc_err("invalid buf type\n");
return -EINVAL;
@@ -1777,6 +1773,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
}
for (i = 0; i < NUM_CTRLS; i++) {
if (IS_MFC51_PRIV(controls[i].id)) {
+ memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
cfg.ops = &s5p_mfc_enc_ctrl_ops;
cfg.id = controls[i].id;
cfg.min = controls[i].minimum;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
index db83836..5932d1c 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_opr.h
@@ -57,10 +57,12 @@ void s5p_mfc_cleanup_queue(struct list_head *lh, struct vb2_queue *vq);
S5P_FIMV_SI_DISPLAY_Y_ADR) << \
MFC_OFFSET_SHIFT)
#define s5p_mfc_get_dec_y_adr() (readl(dev->regs_base + \
- S5P_FIMV_SI_DISPLAY_Y_ADR) << \
+ S5P_FIMV_SI_DECODE_Y_ADR) << \
MFC_OFFSET_SHIFT)
#define s5p_mfc_get_dspl_status() readl(dev->regs_base + \
S5P_FIMV_SI_DISPLAY_STATUS)
+#define s5p_mfc_get_dec_status() readl(dev->regs_base + \
+ S5P_FIMV_SI_DECODE_STATUS)
#define s5p_mfc_get_frame_type() (readl(dev->regs_base + \
S5P_FIMV_DECODE_FRAME_TYPE) \
& S5P_FIMV_DECODE_FRAME_MASK)
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
index 764eac6..cf962a4 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_shm.h
@@ -13,8 +13,7 @@
#ifndef S5P_MFC_SHM_H_
#define S5P_MFC_SHM_H_
-enum MFC_SHM_OFS
-{
+enum MFC_SHM_OFS {
EXTENEDED_DECODE_STATUS = 0x00, /* D */
SET_FRAME_TAG = 0x04, /* D */
GET_FRAME_TAG_TOP = 0x08, /* D */
diff --git a/drivers/media/video/smiapp/Kconfig b/drivers/media/video/smiapp/Kconfig
index f7b35ff..fb99ff1 100644
--- a/drivers/media/video/smiapp/Kconfig
+++ b/drivers/media/video/smiapp/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_SMIAPP
tristate "SMIA++/SMIA sensor support"
- depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAVE_CLK
select VIDEO_SMIAPP_PLL
---help---
This is a generic driver for SMIA++/SMIA camera modules.
diff --git a/drivers/media/video/smiapp/smiapp-core.c b/drivers/media/video/smiapp/smiapp-core.c
index f518026..9cf5bda 100644
--- a/drivers/media/video/smiapp/smiapp-core.c
+++ b/drivers/media/video/smiapp/smiapp-core.c
@@ -31,7 +31,9 @@
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-device.h>
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 3e050e1..1ad5ab6 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -1178,7 +1178,7 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
return 0;
if (vt->type == t->mode && analog_ops->get_afc)
vt->afc = analog_ops->get_afc(&t->fe);
- if (t->mode != V4L2_TUNER_RADIO) {
+ if (vt->type != V4L2_TUNER_RADIO) {
vt->capability |= V4L2_TUNER_CAP_NORM;
vt->rangelow = tv_range[0] * 16;
vt->rangehigh = tv_range[1] * 16;
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 5ccbd46..0cbada1 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -656,7 +656,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_TRY_ENCODER_CMD, vidioc_try_encoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_DECODER_CMD, vidioc_decoder_cmd);
SET_VALID_IOCTL(ops, VIDIOC_TRY_DECODER_CMD, vidioc_try_decoder_cmd);
- if (ops->vidioc_g_parm || vdev->current_norm)
+ if (ops->vidioc_g_parm || vdev->vfl_type == VFL_TYPE_GRABBER)
set_bit(_IOC_NR(VIDIOC_G_PARM), valid_ioctls);
SET_VALID_IOCTL(ops, VIDIOC_S_PARM, vidioc_s_parm);
SET_VALID_IOCTL(ops, VIDIOC_G_TUNER, vidioc_g_tuner);
@@ -679,6 +679,9 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_PRESET, vidioc_query_dv_preset);
SET_VALID_IOCTL(ops, VIDIOC_S_DV_TIMINGS, vidioc_s_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
/* yes, really vidioc_subscribe_event */
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 91be4e8..d7fa896 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1680,6 +1680,7 @@ static long __video_do_ioctl(struct file *file,
break;
ret = 0;
+ p->parm.capture.readbuffers = 2;
if (ops->vidioc_g_std)
ret = ops->vidioc_g_std(file, fh, &std);
if (ret == 0)
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index 4d7391e..aae1720 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -2561,7 +2561,7 @@ static int vino_acquire_input(struct vino_channel_settings *vcs)
} else if (vino_drvdata->decoder
&& (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
int input;
- int data_norm;
+ int data_norm = 0;
v4l2_std_id norm;
input = VINO_INPUT_COMPOSITE;
@@ -2651,7 +2651,7 @@ static int vino_set_input(struct vino_channel_settings *vcs, int input)
}
if (vino_drvdata->decoder_owner == vcs->channel) {
- int data_norm;
+ int data_norm = 0;
v4l2_std_id norm;
ret = decoder_call(video, s_routing,
diff --git a/drivers/media/video/vivi.c b/drivers/media/video/vivi.c
index 0960d7f..08c1024 100644
--- a/drivers/media/video/vivi.c
+++ b/drivers/media/video/vivi.c
@@ -1149,10 +1149,14 @@ static ssize_t
vivi_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct vivi_dev *dev = video_drvdata(file);
+ int err;
dprintk(dev, 1, "read called\n");
- return vb2_read(&dev->vb_vidq, data, count, ppos,
+ mutex_lock(&dev->mutex);
+ err = vb2_read(&dev->vb_vidq, data, count, ppos,
file->f_flags & O_NONBLOCK);
+ mutex_unlock(&dev->mutex);
+ return err;
}
static unsigned int
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index d7166af..ca2754a 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -172,8 +172,10 @@ struct zoran_jpg_settings {
struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */
};
+struct zoran_fh;
+
struct zoran_mapping {
- struct file *file;
+ struct zoran_fh *fh;
int count;
};
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index c573109..c6ccdeb 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -2811,7 +2811,7 @@ static void
zoran_vm_close (struct vm_area_struct *vma)
{
struct zoran_mapping *map = vma->vm_private_data;
- struct zoran_fh *fh = map->file->private_data;
+ struct zoran_fh *fh = map->fh;
struct zoran *zr = fh->zr;
int i;
@@ -2938,7 +2938,7 @@ zoran_mmap (struct file *file,
res = -ENOMEM;
goto mmap_unlock_and_return;
}
- map->file = file;
+ map->fh = fh;
map->count = 1;
vma->vm_ops = &zoran_vm_ops;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a5c591f..d99db56 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1653,7 +1653,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
unsigned long port;
u32 msize;
u32 psize;
- u8 revision;
int r = -ENODEV;
struct pci_dev *pdev;
@@ -1670,8 +1669,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
return r;
}
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
-
if (sizeof(dma_addr_t) > 4) {
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
@@ -1779,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
MPT_ADAPTER *ioc;
u8 cb_idx;
int r = -ENODEV;
- u8 revision;
u8 pcixcmd;
static int mpt_ids = 0;
#ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
ioc->name, &ioc->facts, &ioc->pfacts[0]));
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name);
+ mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
+ ioc->prod_name);
switch (pdev->device)
{
@@ -1903,7 +1899,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
break;
case MPI_MANUFACTPAGE_DEVICEID_FC929X:
- if (revision < XL_929) {
+ if (pdev->revision < XL_929) {
/* 929X Chip Fix. Set Split transactions level
* for PCIX. Set MOST bits to zero.
*/
@@ -1934,7 +1930,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
/* 1030 Chip Fix. Disable Split transactions
* for PCIX. Set MOST bits to zero if Rev < C0( = 8).
*/
- if (revision < C0_1030) {
+ if (pdev->revision < C0_1030) {
pci_read_config_byte(pdev, 0x6a, &pcixcmd);
pcixcmd &= 0x8F;
pci_write_config_byte(pdev, 0x6a, pcixcmd);
@@ -6483,6 +6479,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
printk(MYIOC_s_INFO_FMT "%s: host reset in"
" progress mpt_config timed out.!!\n",
__func__, ioc->name);
+ mutex_unlock(&ioc->mptbase_cmds.mutex);
return -EFAULT;
}
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 6e6e16a..b383b69 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1250,7 +1250,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
int iocnum;
unsigned int port;
int cim_rev;
- u8 revision;
struct scsi_device *sdev;
VirtDevice *vdevice;
@@ -1324,8 +1323,7 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
pdev = (struct pci_dev *) ioc->pcidev;
karg->pciId = pdev->device;
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
- karg->hwRev = revision;
+ karg->hwRev = pdev->revision;
karg->subSystemDevice = pdev->subsystem_device;
karg->subSystemVendor = pdev->subsystem_vendor;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index e129c82..92144ed 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -286,6 +286,7 @@ config TWL6040_CORE
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
+ select IRQ_DOMAIN
default n
help
Say yes here if you want support for Texas Instruments TWL6040 audio
diff --git a/drivers/mfd/ab5500-core.h b/drivers/mfd/ab5500-core.h
deleted file mode 100644
index 63b30b1..0000000
--- a/drivers/mfd/ab5500-core.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2011 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Shared definitions and data structures for the AB5500 MFD driver
- */
-
-/* Read/write operation values. */
-#define AB5500_PERM_RD (0x01)
-#define AB5500_PERM_WR (0x02)
-
-/* Read/write permissions. */
-#define AB5500_PERM_RO (AB5500_PERM_RD)
-#define AB5500_PERM_RW (AB5500_PERM_RD | AB5500_PERM_WR)
-
-#define AB5500_MASK_BASE (0x60)
-#define AB5500_MASK_END (0x79)
-#define AB5500_CHIP_ID (0x20)
-
-/**
- * struct ab5500_reg_range
- * @first: the first address of the range
- * @last: the last address of the range
- * @perm: access permissions for the range
- */
-struct ab5500_reg_range {
- u8 first;
- u8 last;
- u8 perm;
-};
-
-/**
- * struct ab5500_i2c_ranges
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_ranges {
- u8 nranges;
- u8 bankid;
- const struct ab5500_reg_range *range;
-};
-
-/**
- * struct ab5500_i2c_banks
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_banks {
- u8 nbanks;
- const struct ab5500_i2c_ranges *bank;
-};
-
-/**
- * struct ab5500_bank
- * @slave_addr: I2C slave_addr found in AB5500 specification
- * @name: Documentation name of the bank. For reference
- */
-struct ab5500_bank {
- u8 slave_addr;
- const char *name;
-};
-
-static const struct ab5500_bank bankinfo[AB5500_NUM_BANKS] = {
- [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
- AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP, "VIT_IO_I2C_CLK_TST_OTP"},
- [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
- AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST, "VDDDIG_IO_I2C_CLK_TST"},
- [AB5500_BANK_VDENC] = {AB5500_ADDR_VDENC, "VDENC"},
- [AB5500_BANK_SIM_USBSIM] = {AB5500_ADDR_SIM_USBSIM, "SIM_USBSIM"},
- [AB5500_BANK_LED] = {AB5500_ADDR_LED, "LED"},
- [AB5500_BANK_ADC] = {AB5500_ADDR_ADC, "ADC"},
- [AB5500_BANK_RTC] = {AB5500_ADDR_RTC, "RTC"},
- [AB5500_BANK_STARTUP] = {AB5500_ADDR_STARTUP, "STARTUP"},
- [AB5500_BANK_DBI_ECI] = {AB5500_ADDR_DBI_ECI, "DBI-ECI"},
- [AB5500_BANK_CHG] = {AB5500_ADDR_CHG, "CHG"},
- [AB5500_BANK_FG_BATTCOM_ACC] = {
- AB5500_ADDR_FG_BATTCOM_ACC, "FG_BATCOM_ACC"},
- [AB5500_BANK_USB] = {AB5500_ADDR_USB, "USB"},
- [AB5500_BANK_IT] = {AB5500_ADDR_IT, "IT"},
- [AB5500_BANK_VIBRA] = {AB5500_ADDR_VIBRA, "VIBRA"},
- [AB5500_BANK_AUDIO_HEADSETUSB] = {
- AB5500_ADDR_AUDIO_HEADSETUSB, "AUDIO_HEADSETUSB"},
-};
-
-int ab5500_get_register_interruptible_raw(struct ab5500 *ab, u8 bank, u8 reg,
- u8 *value);
-int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
- u8 reg, u8 bitmask, u8 bitvalues);
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 671c8bc..50e83dc5 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2735,6 +2735,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
REGULATOR_SUPPLY("vcore", "uart2"),
REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
+ REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
};
static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 3fcdab3..03df422 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -49,10 +49,72 @@ static struct regmap_config mc13xxx_regmap_spi_config = {
.reg_bits = 7,
.pad_bits = 1,
.val_bits = 24,
+ .write_flag_mask = 0x80,
.max_register = MC13XXX_NUMREGS,
.cache_type = REGCACHE_NONE,
+ .use_single_rw = 1,
+};
+
+static int mc13xxx_spi_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ unsigned char w[4] = { *((unsigned char *) reg), 0, 0, 0};
+ unsigned char r[4];
+ unsigned char *p = val;
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_transfer t = {
+ .tx_buf = w,
+ .rx_buf = r,
+ .len = 4,
+ };
+
+ struct spi_message m;
+ int ret;
+
+ if (val_size != 3 || reg_size != 1)
+ return -ENOTSUPP;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+
+ memcpy(p, &r[1], 3);
+
+ return ret;
+}
+
+static int mc13xxx_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ if (count != 4)
+ return -ENOTSUPP;
+
+ return spi_write(spi, data, count);
+}
+
+/*
+ * We cannot use regmap-spi generic bus implementation here.
+ * The MC13783 chip will get corrupted if CS signal is deasserted
+ * and on i.Mx31 SoC (the target SoC for MC13783 PMIC) the SPI controller
+ * has the following errata (DSPhl22960):
+ * "The CSPI negates SS when the FIFO becomes empty with
+ * SSCTL= 0. Software cannot guarantee that the FIFO will not
+ * drain because of higher priority interrupts and the
+ * non-realtime characteristics of the operating system. As a
+ * result, the SS will negate before all of the data has been
+ * transferred to/from the peripheral."
+ * We workaround this by accessing the SPI controller with a
+ * single transfert.
+ */
+
+static struct regmap_bus regmap_mc13xxx_bus = {
+ .write = mc13xxx_spi_write,
+ .read = mc13xxx_spi_read,
};
static int mc13xxx_spi_probe(struct spi_device *spi)
@@ -73,12 +135,13 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
dev_set_drvdata(&spi->dev, mc13xxx);
spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
- spi->bits_per_word = 32;
mc13xxx->dev = &spi->dev;
mutex_init(&mc13xxx->lock);
- mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+ mc13xxx->regmap = regmap_init(&spi->dev, &regmap_mc13xxx_bus, &spi->dev,
+ &mc13xxx_regmap_spi_config);
+
if (IS_ERR(mc13xxx->regmap)) {
ret = PTR_ERR(mc13xxx->regmap);
dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 7e96bb2..41088ec 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,6 +25,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
+#include <linux/gpio.h>
#include <plat/cpu.h>
#include <plat/usb.h>
#include <linux/pm_runtime.h>
@@ -500,8 +501,21 @@ static void omap_usbhs_init(struct device *dev)
dev_dbg(dev, "starting TI HSUSB Controller\n");
pm_runtime_get_sync(dev);
- spin_lock_irqsave(&omap->lock, flags);
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+ GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+ GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+ /* Hold the PHY in RESET for enough time till DIR is high */
+ udelay(10);
+ }
+
+ spin_lock_irqsave(&omap->lock, flags);
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
@@ -581,9 +595,39 @@ static void omap_usbhs_init(struct device *dev)
}
spin_unlock_irqrestore(&omap->lock, flags);
+
+ if (pdata->ehci_data->phy_reset) {
+ /* Hold the PHY in RESET for enough time till
+ * PHY is settled and ready
+ */
+ udelay(10);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_set_value_cansleep
+ (pdata->ehci_data->reset_gpio_port[0], 1);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_set_value_cansleep
+ (pdata->ehci_data->reset_gpio_port[1], 1);
+ }
+
pm_runtime_put_sync(dev);
}
+static void omap_usbhs_deinit(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ }
+}
+
/**
* usbhs_omap_probe - initialize TI-based HCDs
@@ -767,6 +811,7 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
goto end_probe;
err_alloc:
+ omap_usbhs_deinit(&pdev->dev);
iounmap(omap->tll_base);
err_tll:
@@ -818,6 +863,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
{
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+ omap_usbhs_deinit(&pdev->dev);
iounmap(omap->tll_base);
iounmap(omap->uhh_base);
clk_put(omap->init_60m_fclk);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 00c0aba..c4a69f1 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -356,7 +356,14 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
}
}
- ret = regmap_add_irq_chip(palmas->regmap[1], palmas->irq,
+ /* Change IRQ into clear on read mode for efficiency */
+ slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
+ addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
+ reg = PALMAS_INT_CTRL_INT_CLEAR;
+
+ regmap_write(palmas->regmap[slave], addr, reg);
+
+ ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
IRQF_ONESHOT | IRQF_TRIGGER_LOW, -1, &palmas_irq_chip,
&palmas->irq_data);
if (ret < 0)
@@ -441,6 +448,9 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
goto err;
}
+ children[PALMAS_PMIC_ID].platform_data = pdata->pmic_pdata;
+ children[PALMAS_PMIC_ID].pdata_size = sizeof(*pdata->pmic_pdata);
+
ret = mfd_add_devices(palmas->dev, -1,
children, ARRAY_SIZE(palmas_children),
NULL, regmap_irq_chip_get_base(palmas->irq_data));
@@ -472,6 +482,7 @@ static const struct i2c_device_id palmas_i2c_id[] = {
{ "twl6035", },
{ "twl6037", },
{ "tps65913", },
+ { /* end */ }
};
MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 373f423..947a06a 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
*/
#include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index afd4590..9edfe86 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
* Copyright (C) ST Microelectronics SA 2011
*
* License Terms: GNU General Public License, version 2
- * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
*/
#include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index db194e4..61c097a 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/err.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65217.h>
@@ -132,6 +133,61 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
}
EXPORT_SYMBOL_GPL(tps65217_clear_bits);
+#ifdef CONFIG_OF
+static struct of_regulator_match reg_matches[] = {
+ { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 },
+ { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 },
+ { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 },
+ { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 },
+ { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 },
+ { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 },
+ { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 },
+};
+
+static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client)
+{
+ struct device_node *node = client->dev.of_node;
+ struct tps65217_board *pdata;
+ struct device_node *regs;
+ int count = ARRAY_SIZE(reg_matches);
+ int ret, i;
+
+ regs = of_find_node_by_name(node, "regulators");
+ if (!regs)
+ return NULL;
+
+ ret = of_regulator_match(&client->dev, regs, reg_matches, count);
+ of_node_put(regs);
+ if ((ret < 0) || (ret > count))
+ return NULL;
+
+ count = ret;
+ pdata = devm_kzalloc(&client->dev, count * sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ for (i = 0; i < count; i++) {
+ if (!reg_matches[i].init_data || !reg_matches[i].of_node)
+ continue;
+
+ pdata->tps65217_init_data[i] = reg_matches[i].init_data;
+ pdata->of_node[i] = reg_matches[i].of_node;
+ }
+
+ return pdata;
+}
+
+static struct of_device_id tps65217_of_match[] = {
+ { .compatible = "ti,tps65217", },
+ { },
+};
+#else
+static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client)
+{
+ return NULL;
+}
+#endif
+
static struct regmap_config tps65217_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -141,10 +197,14 @@ static int __devinit tps65217_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
struct tps65217 *tps;
+ struct regulator_init_data *reg_data;
struct tps65217_board *pdata = client->dev.platform_data;
int i, ret;
unsigned int version;
+ if (!pdata && client->dev.of_node)
+ pdata = tps65217_parse_dt(client);
+
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
@@ -182,8 +242,9 @@ static int __devinit tps65217_probe(struct i2c_client *client,
}
pdev->dev.parent = tps->dev;
- platform_device_add_data(pdev, &pdata->tps65217_init_data[i],
- sizeof(pdata->tps65217_init_data[i]));
+ pdev->dev.of_node = pdata->of_node[i];
+ reg_data = pdata->tps65217_init_data[i];
+ platform_device_add_data(pdev, reg_data, sizeof(*reg_data));
tps->regulator_pdev[i] = pdev;
platform_device_add(pdev);
@@ -212,6 +273,8 @@ MODULE_DEVICE_TABLE(i2c, tps65217_id_table);
static struct i2c_driver tps65217_driver = {
.driver = {
.name = "tps65217",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tps65217_of_match),
},
.id_table = tps65217_id_table,
.probe = tps65217_probe,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2661f6e..154f3ef 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -511,7 +511,6 @@ config USB_SWITCH_FSA9480
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
-source "drivers/misc/iwmc3200top/Kconfig"
source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 456972f..b88df7a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
-obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
deleted file mode 100644
index 9e4b88f..0000000
--- a/drivers/misc/iwmc3200top/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
-config IWMC3200TOP
- tristate "Intel Wireless MultiCom Top Driver"
- depends on MMC && EXPERIMENTAL
- select FW_LOADER
- ---help---
- Intel Wireless MultiCom 3200 Top driver is responsible for
- for firmware load and enabled coms enumeration
-
-config IWMC3200TOP_DEBUG
- bool "Enable full debug output of iwmc3200top Driver"
- depends on IWMC3200TOP
- ---help---
- Enable full debug output of iwmc3200top Driver
-
-config IWMC3200TOP_DEBUGFS
- bool "Enable Debugfs debugging interface for iwmc3200top"
- depends on IWMC3200TOP
- ---help---
- Enable creation of debugfs files for iwmc3200top
-
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
deleted file mode 100644
index fbf53fb..0000000
--- a/drivers/misc/iwmc3200top/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
-# drivers/misc/iwmc3200top/Makefile
-#
-# Copyright (C) 2009 Intel Corporation. All rights reserved.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License version
-# 2 as published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-#
-# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
-# -
-#
-#
-
-obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
-iwmc3200top-objs := main.o fw-download.o
-iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
-iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
deleted file mode 100644
index 62fbaec..0000000
--- a/drivers/misc/iwmc3200top/debugfs.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/debufs.c
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio.h>
-#include <linux/debugfs.h>
-
-#include "iwmc3200top.h"
-#include "fw-msg.h"
-#include "log.h"
-#include "debugfs.h"
-
-
-
-/* Constants definition */
-#define HEXADECIMAL_RADIX 16
-
-/* Functions definition */
-
-
-#define DEBUGFS_ADD(name, parent) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
- &iwmct_dbgfs_##name##_ops); \
-} while (0)
-
-#define DEBUGFS_RM(name) do { \
- debugfs_remove(name); \
- name = NULL; \
-} while (0)
-
-#define DEBUGFS_READ_FUNC(name) \
-ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name) \
- static const struct file_operations iwmct_dbgfs_##name##_ops = { \
- .read = iwmct_dbgfs_##name##_read, \
- .open = iwmct_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
- };
-
-#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name) \
- static const struct file_operations iwmct_dbgfs_##name##_ops = { \
- .write = iwmct_dbgfs_##name##_write, \
- .open = iwmct_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
- };
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name) \
- DEBUGFS_WRITE_FUNC(name) \
- static const struct file_operations iwmct_dbgfs_##name##_ops = {\
- .write = iwmct_dbgfs_##name##_write, \
- .read = iwmct_dbgfs_##name##_read, \
- .open = iwmct_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
- };
-
-
-/* Debugfs file ops definitions */
-
-/*
- * Create the debugfs files and directories
- *
- */
-void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
-{
- struct iwmct_debugfs *dbgfs;
-
- dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
- if (!dbgfs) {
- LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
- sizeof(struct iwmct_debugfs));
- return;
- }
-
- priv->dbgfs = dbgfs;
- dbgfs->name = name;
- dbgfs->dir_drv = debugfs_create_dir(name, NULL);
- if (!dbgfs->dir_drv) {
- LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
- return;
- }
-
- return;
-}
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
-{
- if (!dbgfs)
- return;
-
- DEBUGFS_RM(dbgfs->dir_drv);
- kfree(dbgfs);
- dbgfs = NULL;
-}
-
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
deleted file mode 100644
index 71d4575..0000000
--- a/drivers/misc/iwmc3200top/debugfs.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/debufs.h
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#ifndef __DEBUGFS_H__
-#define __DEBUGFS_H__
-
-
-#ifdef CONFIG_IWMC3200TOP_DEBUGFS
-
-struct iwmct_debugfs {
- const char *name;
- struct dentry *dir_drv;
- struct dir_drv_files {
- } dbgfs_drv_files;
-};
-
-void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
-void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
-
-#else /* CONFIG_IWMC3200TOP_DEBUGFS */
-
-struct iwmct_debugfs;
-
-static inline void
-iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
-{}
-
-static inline void
-iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
-{}
-
-#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
-
-#endif /* __DEBUGFS_H__ */
-
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
deleted file mode 100644
index e27afde..0000000
--- a/drivers/misc/iwmc3200top/fw-download.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/fw-download.c
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#include <linux/firmware.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/slab.h>
-#include <asm/unaligned.h>
-
-#include "iwmc3200top.h"
-#include "log.h"
-#include "fw-msg.h"
-
-#define CHECKSUM_BYTES_NUM sizeof(u32)
-
-/**
- init parser struct with file
- */
-static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
- size_t file_size, size_t block_size)
-{
- struct iwmct_parser *parser = &priv->parser;
- struct iwmct_fw_hdr *fw_hdr = &parser->versions;
-
- LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
-
- LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
-
- parser->file = file;
- parser->file_size = file_size;
- parser->cur_pos = 0;
- parser->entry_point = 0;
- parser->buf = kzalloc(block_size, GFP_KERNEL);
- if (!parser->buf) {
- LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
- return -ENOMEM;
- }
- parser->buf_size = block_size;
-
- /* extract fw versions */
- memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
- LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
- "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
- fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
- fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
- fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
- fw_hdr->tic_name);
-
- parser->cur_pos += sizeof(struct iwmct_fw_hdr);
-
- LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
- return 0;
-}
-
-static bool iwmct_checksum(struct iwmct_priv *priv)
-{
- struct iwmct_parser *parser = &priv->parser;
- __le32 *file = (__le32 *)parser->file;
- int i, pad, steps;
- u32 accum = 0;
- u32 checksum;
- u32 mask = 0xffffffff;
-
- pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
- steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
-
- LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
-
- for (i = 0; i < steps; i++)
- accum += le32_to_cpu(file[i]);
-
- if (pad) {
- mask <<= 8 * (4 - pad);
- accum += le32_to_cpu(file[steps]) & mask;
- }
-
- checksum = get_unaligned_le32((__le32 *)(parser->file +
- parser->file_size - CHECKSUM_BYTES_NUM));
-
- LOG_INFO(priv, FW_DOWNLOAD,
- "compare checksum accum=0x%x to checksum=0x%x\n",
- accum, checksum);
-
- return checksum == accum;
-}
-
-static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
- size_t *sec_size, __le32 *sec_addr)
-{
- struct iwmct_parser *parser = &priv->parser;
- struct iwmct_dbg *dbg = &priv->dbg;
- struct iwmct_fw_sec_hdr *sec_hdr;
-
- LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
-
- while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
- <= parser->file_size) {
-
- sec_hdr = (struct iwmct_fw_sec_hdr *)
- (parser->file + parser->cur_pos);
- parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
-
- LOG_INFO(priv, FW_DOWNLOAD,
- "sec hdr: type=%s addr=0x%x size=%d\n",
- sec_hdr->type, sec_hdr->target_addr,
- sec_hdr->data_size);
-
- if (strcmp(sec_hdr->type, "ENT") == 0)
- parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
- else if (strcmp(sec_hdr->type, "LBL") == 0)
- strcpy(dbg->label_fw, parser->file + parser->cur_pos);
- else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
- (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
- ((strcmp(sec_hdr->type, "GPS") == 0) &&
- (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
- ((strcmp(sec_hdr->type, "BTH") == 0) &&
- (priv->barker & BARKER_DNLOAD_BT_MSK))) {
- *sec_addr = sec_hdr->target_addr;
- *sec_size = le32_to_cpu(sec_hdr->data_size);
- *p_sec = parser->file + parser->cur_pos;
- parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
- return 1;
- } else if (strcmp(sec_hdr->type, "LOG") != 0)
- LOG_WARNING(priv, FW_DOWNLOAD,
- "skipping section type %s\n",
- sec_hdr->type);
-
- parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
- LOG_INFO(priv, FW_DOWNLOAD,
- "finished with section cur_pos=%zd\n", parser->cur_pos);
- }
-
- LOG_TRACE(priv, INIT, "<--\n");
- return 0;
-}
-
-static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
- size_t sec_size, __le32 addr)
-{
- struct iwmct_parser *parser = &priv->parser;
- struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
- const u8 *cur_block = p_sec;
- size_t sent = 0;
- int cnt = 0;
- int ret = 0;
- u32 cmd = 0;
-
- LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
- LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
- addr, sec_size);
-
- while (sent < sec_size) {
- int i;
- u32 chksm = 0;
- u32 reset = atomic_read(&priv->reset);
- /* actual FW data */
- u32 data_size = min(parser->buf_size - sizeof(*hdr),
- sec_size - sent);
- /* Pad to block size */
- u32 trans_size = (data_size + sizeof(*hdr) +
- IWMC_SDIO_BLK_SIZE - 1) &
- ~(IWMC_SDIO_BLK_SIZE - 1);
- ++cnt;
-
- /* in case of reset, interrupt FW DOWNLAOD */
- if (reset) {
- LOG_INFO(priv, FW_DOWNLOAD,
- "Reset detected. Abort FW download!!!");
- ret = -ECANCELED;
- goto exit;
- }
-
- memset(parser->buf, 0, parser->buf_size);
- cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
- cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
- cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
- cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
- hdr->data_size = cpu_to_le32(data_size);
- hdr->target_addr = addr;
-
- /* checksum is allowed for sizes divisible by 4 */
- if (data_size & 0x3)
- cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
-
- memcpy(hdr->data, cur_block, data_size);
-
-
- if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
-
- chksm = data_size + le32_to_cpu(addr) + cmd;
- for (i = 0; i < data_size >> 2; i++)
- chksm += ((u32 *)cur_block)[i];
-
- hdr->block_chksm = cpu_to_le32(chksm);
- LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
- hdr->block_chksm);
- }
-
- LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
- "sec_size=%zd, startAddress 0x%X\n",
- cnt, trans_size, sent, sec_size, addr);
-
- if (priv->dbg.dump)
- LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
-
-
- hdr->cmd = cpu_to_le32(cmd);
- /* send it down */
- /* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, parser->buf, trans_size);
- if (ret != 0) {
- LOG_INFO(priv, FW_DOWNLOAD,
- "iwmct_tx returned %d\n", ret);
- goto exit;
- }
-
- addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
- sent += data_size;
- cur_block = p_sec + sent;
-
- if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
- LOG_INFO(priv, FW_DOWNLOAD,
- "Block number limit is reached [%d]\n",
- priv->dbg.blocks);
- break;
- }
- }
-
- if (sent < sec_size)
- ret = -EINVAL;
-exit:
- LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
- return ret;
-}
-
-static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
-{
- struct iwmct_parser *parser = &priv->parser;
- struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
- int ret;
- u32 cmd;
-
- LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
-
- memset(parser->buf, 0, parser->buf_size);
- cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
- if (jump) {
- cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
- hdr->target_addr = cpu_to_le32(parser->entry_point);
- LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
- parser->entry_point);
- } else {
- cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
- LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
- }
-
- hdr->cmd = cpu_to_le32(cmd);
-
- LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
- /* send it down */
- /* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
- if (ret)
- LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
-
- LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
- return 0;
-}
-
-int iwmct_fw_load(struct iwmct_priv *priv)
-{
- const u8 *fw_name = FW_NAME(FW_API_VER);
- const struct firmware *raw;
- const u8 *pdata;
- size_t len;
- __le32 addr;
- int ret;
-
-
- LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
- priv->barker);
- LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
- LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
- LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
-
-
- /* get the firmware */
- ret = request_firmware(&raw, fw_name, &priv->func->dev);
- if (ret < 0) {
- LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
- fw_name, ret);
- goto exit;
- }
-
- if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
- LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
- fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
- goto exit;
- }
-
- LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
-
- /* clear parser struct */
- ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
- if (ret < 0) {
- LOG_ERROR(priv, FW_DOWNLOAD,
- "iwmct_parser_init failed: Reason %d\n", ret);
- goto exit;
- }
-
- if (!iwmct_checksum(priv)) {
- LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
- ret = -EINVAL;
- goto exit;
- }
-
- /* download firmware to device */
- while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
- ret = iwmct_download_section(priv, pdata, len, addr);
- if (ret) {
- LOG_ERROR(priv, FW_DOWNLOAD,
- "%s download section failed\n", fw_name);
- goto exit;
- }
- }
-
- ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
-
-exit:
- kfree(priv->parser.buf);
- release_firmware(raw);
- return ret;
-}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
deleted file mode 100644
index 9e26b75..0000000
--- a/drivers/misc/iwmc3200top/fw-msg.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/fw-msg.h
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#ifndef __FWMSG_H__
-#define __FWMSG_H__
-
-#define COMM_TYPE_D2H 0xFF
-#define COMM_TYPE_H2D 0xEE
-
-#define COMM_CATEGORY_OPERATIONAL 0x00
-#define COMM_CATEGORY_DEBUG 0x01
-#define COMM_CATEGORY_TESTABILITY 0x02
-#define COMM_CATEGORY_DIAGNOSTICS 0x03
-
-#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
-
-#define FW_LOG_SRC_MAX 32
-#define FW_LOG_SRC_ALL 255
-
-#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
-
-#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
-#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
-#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
-#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
-#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
-#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
-#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
-#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
-#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
-#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
-
-#define OP_OPR_ALIVE cpu_to_le16(0x0010)
-#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
-#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
-#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
-
-#define CMD_FLAG_PADDING_256 0x80
-
-#define FW_HCMD_BLOCK_SIZE 256
-
-struct msg_hdr {
- u8 type;
- u8 category;
- __le16 opcode;
- u8 seqnum;
- u8 flags;
- __le16 length;
-} __attribute__((__packed__));
-
-struct log_hdr {
- __le32 timestamp;
- u8 severity;
- u8 logsource;
- __le16 reserved;
-} __attribute__((__packed__));
-
-struct mdump_hdr {
- u8 dmpid;
- u8 frag;
- __le16 size;
- __le32 addr;
-} __attribute__((__packed__));
-
-struct top_msg {
- struct msg_hdr hdr;
- union {
- /* D2H messages */
- struct {
- struct log_hdr log_hdr;
- u8 data[1];
- } __attribute__((__packed__)) log;
-
- struct {
- struct log_hdr log_hdr;
- struct mdump_hdr md_hdr;
- u8 data[1];
- } __attribute__((__packed__)) mdump;
-
- /* H2D messages */
- struct {
- u8 logsource;
- u8 sevmask;
- } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
- struct mdump_hdr mdump_req;
- } u;
-} __attribute__((__packed__));
-
-
-#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
deleted file mode 100644
index 620973e..0000000
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/iwmc3200top.h
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#ifndef __IWMC3200TOP_H__
-#define __IWMC3200TOP_H__
-
-#include <linux/workqueue.h>
-
-#define DRV_NAME "iwmc3200top"
-#define FW_API_VER 1
-#define _FW_NAME(api) DRV_NAME "." #api ".fw"
-#define FW_NAME(api) _FW_NAME(api)
-
-#define IWMC_SDIO_BLK_SIZE 256
-#define IWMC_DEFAULT_TR_BLK 64
-#define IWMC_SDIO_DATA_ADDR 0x0
-#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
-#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
-#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
-#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
-
-#define COMM_HUB_HEADER_LENGTH 16
-#define LOGGER_HEADER_LENGTH 10
-
-
-#define BARKER_DNLOAD_BT_POS 0
-#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
-#define BARKER_DNLOAD_GPS_POS 1
-#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
-#define BARKER_DNLOAD_TOP_POS 2
-#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
-#define BARKER_DNLOAD_RESERVED1_POS 3
-#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
-#define BARKER_DNLOAD_JUMP_POS 4
-#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
-#define BARKER_DNLOAD_SYNC_POS 5
-#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
-#define BARKER_DNLOAD_RESERVED2_POS 6
-#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
-#define BARKER_DNLOAD_BARKER_POS 8
-#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
-
-#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
-/* whole field barker */
-#define IWMC_BARKER_ACK 0xfeedbabe
-
-#define IWMC_CMD_SIGNATURE 0xcbbc
-
-#define CMD_HDR_OPCODE_POS 0
-#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
-#define CMD_HDR_RESPONSE_CODE_POS 4
-#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
-#define CMD_HDR_USE_CHECKSUM_POS 8
-#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
-#define CMD_HDR_RESPONSE_REQUIRED_POS 9
-#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
-#define CMD_HDR_DIRECT_ACCESS_POS 10
-#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
-#define CMD_HDR_RESERVED_POS 11
-#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
-#define CMD_HDR_SIGNATURE_POS 16
-#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
-
-enum {
- IWMC_OPCODE_PING = 0,
- IWMC_OPCODE_READ = 1,
- IWMC_OPCODE_WRITE = 2,
- IWMC_OPCODE_JUMP = 3,
- IWMC_OPCODE_REBOOT = 4,
- IWMC_OPCODE_PERSISTENT_WRITE = 5,
- IWMC_OPCODE_PERSISTENT_READ = 6,
- IWMC_OPCODE_READ_MODIFY_WRITE = 7,
- IWMC_OPCODE_LAST_COMMAND = 15
-};
-
-struct iwmct_fw_load_hdr {
- __le32 cmd;
- __le32 target_addr;
- __le32 data_size;
- __le32 block_chksm;
- u8 data[0];
-};
-
-/**
- * struct iwmct_fw_hdr
- * holds all sw components versions
- */
-struct iwmct_fw_hdr {
- u8 top_major;
- u8 top_minor;
- u8 top_revision;
- u8 gps_major;
- u8 gps_minor;
- u8 gps_revision;
- u8 bt_major;
- u8 bt_minor;
- u8 bt_revision;
- u8 tic_name[31];
-};
-
-/**
- * struct iwmct_fw_sec_hdr
- * @type: function type
- * @data_size: section's data size
- * @target_addr: download address
- */
-struct iwmct_fw_sec_hdr {
- u8 type[4];
- __le32 data_size;
- __le32 target_addr;
-};
-
-/**
- * struct iwmct_parser
- * @file: fw image
- * @file_size: fw size
- * @cur_pos: position in file
- * @buf: temp buf for download
- * @buf_size: size of buf
- * @entry_point: address to jump in fw kick-off
- */
-struct iwmct_parser {
- const u8 *file;
- size_t file_size;
- size_t cur_pos;
- u8 *buf;
- size_t buf_size;
- u32 entry_point;
- struct iwmct_fw_hdr versions;
-};
-
-
-struct iwmct_work_struct {
- struct list_head list;
- ssize_t iosize;
-};
-
-struct iwmct_dbg {
- int blocks;
- bool dump;
- bool jump;
- bool direct;
- bool checksum;
- bool fw_download;
- int block_size;
- int download_trans_blks;
-
- char label_fw[256];
-};
-
-struct iwmct_debugfs;
-
-struct iwmct_priv {
- struct sdio_func *func;
- struct iwmct_debugfs *dbgfs;
- struct iwmct_parser parser;
- atomic_t reset;
- atomic_t dev_sync;
- u32 trans_len;
- u32 barker;
- struct iwmct_dbg dbg;
-
- /* drivers work items */
- struct work_struct bus_rescan_worker;
- struct work_struct isr_worker;
-
- /* drivers wait queue */
- wait_queue_head_t wait_q;
-
- /* rx request list */
- struct list_head read_req_list;
-};
-
-extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
-extern int iwmct_fw_load(struct iwmct_priv *priv);
-
-extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
-extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
-extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
-extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
-
-#endif /* __IWMC3200TOP_H__ */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
deleted file mode 100644
index a36a55a..0000000
--- a/drivers/misc/iwmc3200top/log.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/log.c
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include "fw-msg.h"
-#include "iwmc3200top.h"
-#include "log.h"
-
-/* Maximal hexadecimal string size of the FW memdump message */
-#define LOG_MSG_SIZE_MAX 12400
-
-/* iwmct_logdefs is a global used by log macros */
-u8 iwmct_logdefs[LOG_SRC_MAX];
-static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
-
-
-static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
-{
- int i;
-
- if (src < size)
- logdefs[src] = logmask;
- else if (src == LOG_SRC_ALL)
- for (i = 0; i < size; i++)
- logdefs[i] = logmask;
- else
- return -1;
-
- return 0;
-}
-
-
-int iwmct_log_set_filter(u8 src, u8 logmask)
-{
- return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
-}
-
-
-int iwmct_log_set_fw_filter(u8 src, u8 logmask)
-{
- return _log_set_log_filter(iwmct_fw_logdefs,
- FW_LOG_SRC_MAX, src, logmask);
-}
-
-
-static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
- int ilen, char *pref)
-{
- int pos = 0;
- int i;
- int len;
-
- for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
- str[pos] = pref[i];
-
- for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
- len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
-
- if (i < ilen)
- return -1;
-
- return 0;
-}
-
-/* NOTE: This function is not thread safe.
- Currently it's called only from sdio rx worker - no race there
-*/
-void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
-{
- struct top_msg *msg;
- static char logbuf[LOG_MSG_SIZE_MAX];
-
- msg = (struct top_msg *)buf;
-
- if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
- LOG_ERROR(priv, FW_MSG, "Log message from TOP "
- "is too short %d (expected %zd)\n",
- len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
- return;
- }
-
- if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
- BIT(msg->u.log.log_hdr.severity)) ||
- !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
- return;
-
- switch (msg->hdr.category) {
- case COMM_CATEGORY_TESTABILITY:
- if (!(iwmct_logdefs[LOG_SRC_TST] &
- BIT(msg->u.log.log_hdr.severity)))
- return;
- if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
- le16_to_cpu(msg->hdr.length) +
- sizeof(msg->hdr), "<TST>"))
- LOG_WARNING(priv, TST,
- "TOP TST message is too long, truncating...");
- LOG_WARNING(priv, TST, "%s\n", logbuf);
- break;
- case COMM_CATEGORY_DEBUG:
- if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
- LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
- ((u8 *)msg) + sizeof(msg->hdr)
- + sizeof(msg->u.log.log_hdr));
- else {
- if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
- le16_to_cpu(msg->hdr.length)
- + sizeof(msg->hdr),
- "<DBG>"))
- LOG_WARNING(priv, FW_MSG,
- "TOP DBG message is too long,"
- "truncating...");
- LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
- }
- break;
- default:
- break;
- }
-}
-
-static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
-{
- int i, pos, len;
- for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
- len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
- i, logdefs[i]);
- pos += len;
- }
- buf[pos-1] = '\n';
- buf[pos] = '\0';
-
- if (i < logdefsz)
- return -1;
- return 0;
-}
-
-int log_get_filter_str(char *buf, int size)
-{
- return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
-}
-
-int log_get_fw_filter_str(char *buf, int size)
-{
- return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
-}
-
-#define HEXADECIMAL_RADIX 16
-#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
-
-ssize_t show_iwmct_log_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- char *str_buf;
- int buf_size;
- ssize_t ret;
-
- buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
- str_buf = kzalloc(buf_size, GFP_KERNEL);
- if (!str_buf) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to allocate %d bytes\n", buf_size);
- ret = -ENOMEM;
- goto exit;
- }
-
- if (log_get_filter_str(str_buf, buf_size) < 0) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = sprintf(buf, "%s", str_buf);
-
-exit:
- kfree(str_buf);
- return ret;
-}
-
-ssize_t store_iwmct_log_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- char *token, *str_buf = NULL;
- long val;
- ssize_t ret = count;
- u8 src, mask;
-
- if (!count)
- goto exit;
-
- str_buf = kzalloc(count, GFP_KERNEL);
- if (!str_buf) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to allocate %zd bytes\n", count);
- ret = -ENOMEM;
- goto exit;
- }
-
- memcpy(str_buf, buf, count);
-
- while ((token = strsep(&str_buf, ",")) != NULL) {
- while (isspace(*token))
- ++token;
- if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to convert string to long %s\n",
- token);
- ret = -EINVAL;
- goto exit;
- }
-
- mask = val & 0xFF;
- src = (val & 0XFF00) >> 8;
- iwmct_log_set_filter(src, mask);
- }
-
-exit:
- kfree(str_buf);
- return ret;
-}
-
-ssize_t show_iwmct_log_level_fw(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- char *str_buf;
- int buf_size;
- ssize_t ret;
-
- buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
-
- str_buf = kzalloc(buf_size, GFP_KERNEL);
- if (!str_buf) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to allocate %d bytes\n", buf_size);
- ret = -ENOMEM;
- goto exit;
- }
-
- if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = sprintf(buf, "%s", str_buf);
-
-exit:
- kfree(str_buf);
- return ret;
-}
-
-ssize_t store_iwmct_log_level_fw(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- struct top_msg cmd;
- char *token, *str_buf = NULL;
- ssize_t ret = count;
- u16 cmdlen = 0;
- int i;
- long val;
- u8 src, mask;
-
- if (!count)
- goto exit;
-
- str_buf = kzalloc(count, GFP_KERNEL);
- if (!str_buf) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to allocate %zd bytes\n", count);
- ret = -ENOMEM;
- goto exit;
- }
-
- memcpy(str_buf, buf, count);
-
- cmd.hdr.type = COMM_TYPE_H2D;
- cmd.hdr.category = COMM_CATEGORY_DEBUG;
- cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
-
- for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
- (i < FW_LOG_SRC_MAX); i++) {
-
- while (isspace(*token))
- ++token;
-
- if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to convert string to long %s\n",
- token);
- ret = -EINVAL;
- goto exit;
- }
-
- mask = val & 0xFF; /* LSB */
- src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
- iwmct_log_set_fw_filter(src, mask);
-
- cmd.u.logdefs[i].logsource = src;
- cmd.u.logdefs[i].sevmask = mask;
- }
-
- cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
- cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
-
- ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
- if (ret) {
- LOG_ERROR(priv, DEBUGFS,
- "Failed to send %d bytes of fwcmd, ret=%zd\n",
- cmdlen, ret);
- goto exit;
- } else
- LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
-
- ret = count;
-
-exit:
- kfree(str_buf);
- return ret;
-}
-
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
deleted file mode 100644
index 4434bb1..0000000
--- a/drivers/misc/iwmc3200top/log.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/log.h
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#ifndef __LOG_H__
-#define __LOG_H__
-
-
-/* log severity:
- * The log levels here match FW log levels
- * so values need to stay as is */
-#define LOG_SEV_CRITICAL 0
-#define LOG_SEV_ERROR 1
-#define LOG_SEV_WARNING 2
-#define LOG_SEV_INFO 3
-#define LOG_SEV_INFOEX 4
-
-/* Log levels not defined for FW */
-#define LOG_SEV_TRACE 5
-#define LOG_SEV_DUMP 6
-
-#define LOG_SEV_FW_FILTER_ALL \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING) | \
- BIT(LOG_SEV_INFO) | \
- BIT(LOG_SEV_INFOEX))
-
-#define LOG_SEV_FILTER_ALL \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING) | \
- BIT(LOG_SEV_INFO) | \
- BIT(LOG_SEV_INFOEX) | \
- BIT(LOG_SEV_TRACE) | \
- BIT(LOG_SEV_DUMP))
-
-/* log source */
-#define LOG_SRC_INIT 0
-#define LOG_SRC_DEBUGFS 1
-#define LOG_SRC_FW_DOWNLOAD 2
-#define LOG_SRC_FW_MSG 3
-#define LOG_SRC_TST 4
-#define LOG_SRC_IRQ 5
-
-#define LOG_SRC_MAX 6
-#define LOG_SRC_ALL 0xFF
-
-/**
- * Default intitialization runtime log level
- */
-#ifndef LOG_SEV_FILTER_RUNTIME
-#define LOG_SEV_FILTER_RUNTIME \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING))
-#endif
-
-#ifndef FW_LOG_SEV_FILTER_RUNTIME
-#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
-#endif
-
-#ifdef CONFIG_IWMC3200TOP_DEBUG
-/**
- * Log macros
- */
-
-#define priv2dev(priv) (&(priv->func)->dev)
-
-#define LOG_CRITICAL(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
- dev_crit(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_ERROR(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
- dev_err(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_WARNING(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
- dev_warn(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_INFO(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
- dev_info(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_TRACE(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
- dev_dbg(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_HEXDUMP(src, ptr, len) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
- 16, 1, ptr, len, false); \
-} while (0)
-
-void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
-
-extern u8 iwmct_logdefs[];
-
-int iwmct_log_set_filter(u8 src, u8 logmask);
-int iwmct_log_set_fw_filter(u8 src, u8 logmask);
-
-ssize_t show_iwmct_log_level(struct device *d,
- struct device_attribute *attr, char *buf);
-ssize_t store_iwmct_log_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count);
-ssize_t show_iwmct_log_level_fw(struct device *d,
- struct device_attribute *attr, char *buf);
-ssize_t store_iwmct_log_level_fw(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count);
-
-#else
-
-#define LOG_CRITICAL(priv, src, fmt, args...)
-#define LOG_ERROR(priv, src, fmt, args...)
-#define LOG_WARNING(priv, src, fmt, args...)
-#define LOG_INFO(priv, src, fmt, args...)
-#define LOG_TRACE(priv, src, fmt, args...)
-#define LOG_HEXDUMP(src, ptr, len)
-
-static inline void iwmct_log_top_message(struct iwmct_priv *priv,
- u8 *buf, int len) {}
-static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
-static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
-
-#endif /* CONFIG_IWMC3200TOP_DEBUG */
-
-int log_get_filter_str(char *buf, int size);
-int log_get_fw_filter_str(char *buf, int size);
-
-#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
deleted file mode 100644
index 701eb60..0000000
--- a/drivers/misc/iwmc3200top/main.c
+++ /dev/null
@@ -1,662 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/main.c
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/debugfs.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio.h>
-
-#include "iwmc3200top.h"
-#include "log.h"
-#include "fw-msg.h"
-#include "debugfs.h"
-
-
-#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
-#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
-
-#define DRIVER_VERSION "0.1.62"
-
-MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR(DRIVER_COPYRIGHT);
-MODULE_FIRMWARE(FW_NAME(FW_API_VER));
-
-
-static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
-{
- return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
-
-}
-int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
-{
- int ret;
- sdio_claim_host(priv->func);
- ret = __iwmct_tx(priv, src, count);
- sdio_release_host(priv->func);
- return ret;
-}
-/*
- * This workers main task is to wait for OP_OPR_ALIVE
- * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
- * When OP_OPR_ALIVE received it will issue
- * a call to "bus_rescan_devices".
- */
-static void iwmct_rescan_worker(struct work_struct *ws)
-{
- struct iwmct_priv *priv;
- int ret;
-
- priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
-
- LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
-
- ret = bus_rescan_devices(priv->func->dev.bus);
- if (ret < 0)
- LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
-}
-
-static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
-{
- switch (msg->hdr.opcode) {
- case OP_OPR_ALIVE:
- LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
- schedule_work(&priv->bus_rescan_worker);
- break;
- default:
- LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
- msg->hdr.opcode);
- break;
- }
-}
-
-
-static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
-{
- struct top_msg *msg;
-
- msg = (struct top_msg *)buf;
-
- if (msg->hdr.type != COMM_TYPE_D2H) {
- LOG_ERROR(priv, FW_MSG,
- "Message from TOP with invalid message type 0x%X\n",
- msg->hdr.type);
- return;
- }
-
- if (len < sizeof(msg->hdr)) {
- LOG_ERROR(priv, FW_MSG,
- "Message from TOP is too short for message header "
- "received %d bytes, expected at least %zd bytes\n",
- len, sizeof(msg->hdr));
- return;
- }
-
- if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
- LOG_ERROR(priv, FW_MSG,
- "Message length (%d bytes) is shorter than "
- "in header (%d bytes)\n",
- len, le16_to_cpu(msg->hdr.length));
- return;
- }
-
- switch (msg->hdr.category) {
- case COMM_CATEGORY_OPERATIONAL:
- op_top_message(priv, (struct top_msg *)buf);
- break;
-
- case COMM_CATEGORY_DEBUG:
- case COMM_CATEGORY_TESTABILITY:
- case COMM_CATEGORY_DIAGNOSTICS:
- iwmct_log_top_message(priv, buf, len);
- break;
-
- default:
- LOG_ERROR(priv, FW_MSG,
- "Message from TOP with unknown category 0x%X\n",
- msg->hdr.category);
- break;
- }
-}
-
-int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
-{
- int ret;
- u8 *buf;
-
- LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
-
- /* add padding to 256 for IWMC */
- ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
-
- LOG_HEXDUMP(FW_MSG, cmd, len);
-
- if (len > FW_HCMD_BLOCK_SIZE) {
- LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
- len, FW_HCMD_BLOCK_SIZE);
- return -1;
- }
-
- buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
- if (!buf) {
- LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
- FW_HCMD_BLOCK_SIZE);
- return -1;
- }
-
- memcpy(buf, cmd, len);
- ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
-
- kfree(buf);
- return ret;
-}
-
-
-static void iwmct_irq_read_worker(struct work_struct *ws)
-{
- struct iwmct_priv *priv;
- struct iwmct_work_struct *read_req;
- __le32 *buf = NULL;
- int ret;
- int iosize;
- u32 barker;
- bool is_barker;
-
- priv = container_of(ws, struct iwmct_priv, isr_worker);
-
- LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
-
- /* --------------------- Handshake with device -------------------- */
- sdio_claim_host(priv->func);
-
- /* all list manipulations have to be protected by
- * sdio_claim_host/sdio_release_host */
- if (list_empty(&priv->read_req_list)) {
- LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
- goto exit_release;
- }
-
- read_req = list_entry(priv->read_req_list.next,
- struct iwmct_work_struct, list);
-
- list_del(&read_req->list);
- iosize = read_req->iosize;
- kfree(read_req);
-
- buf = kzalloc(iosize, GFP_KERNEL);
- if (!buf) {
- LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
- goto exit_release;
- }
-
- LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
- iosize, buf, priv->func->num);
-
- /* read from device */
- ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
- if (ret) {
- LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
- goto exit_release;
- }
-
- LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
-
- barker = le32_to_cpu(buf[0]);
-
- /* Verify whether it's a barker and if not - treat as regular Rx */
- if (barker == IWMC_BARKER_ACK ||
- (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
-
- /* Valid Barker is equal on first 4 dwords */
- is_barker = (buf[1] == buf[0]) &&
- (buf[2] == buf[0]) &&
- (buf[3] == buf[0]);
-
- if (!is_barker) {
- LOG_WARNING(priv, IRQ,
- "Potentially inconsistent barker "
- "%08X_%08X_%08X_%08X\n",
- le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
- le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
- }
- } else {
- is_barker = false;
- }
-
- /* Handle Top CommHub message */
- if (!is_barker) {
- sdio_release_host(priv->func);
- handle_top_message(priv, (u8 *)buf, iosize);
- goto exit;
- } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
- if (atomic_read(&priv->dev_sync) == 0) {
- LOG_ERROR(priv, IRQ,
- "ACK barker arrived out-of-sync\n");
- goto exit_release;
- }
-
- /* Continuing to FW download (after Sync is completed)*/
- atomic_set(&priv->dev_sync, 0);
- LOG_INFO(priv, IRQ, "ACK barker arrived "
- "- starting FW download\n");
- } else { /* REBOOT barker */
- LOG_INFO(priv, IRQ, "Received reboot barker: %x\n", barker);
- priv->barker = barker;
-
- if (barker & BARKER_DNLOAD_SYNC_MSK) {
- /* Send the same barker back */
- ret = __iwmct_tx(priv, buf, iosize);
- if (ret) {
- LOG_ERROR(priv, IRQ,
- "error %d echoing barker\n", ret);
- goto exit_release;
- }
- LOG_INFO(priv, IRQ, "Echoing barker to device\n");
- atomic_set(&priv->dev_sync, 1);
- goto exit_release;
- }
-
- /* Continuing to FW download (without Sync) */
- LOG_INFO(priv, IRQ, "No sync requested "
- "- starting FW download\n");
- }
-
- sdio_release_host(priv->func);
-
- if (priv->dbg.fw_download)
- iwmct_fw_load(priv);
- else
- LOG_ERROR(priv, IRQ, "FW download not allowed\n");
-
- goto exit;
-
-exit_release:
- sdio_release_host(priv->func);
-exit:
- kfree(buf);
- LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
-}
-
-static void iwmct_irq(struct sdio_func *func)
-{
- struct iwmct_priv *priv;
- int val, ret;
- int iosize;
- int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
- struct iwmct_work_struct *read_req;
-
- priv = sdio_get_drvdata(func);
-
- LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
-
- /* read the function's status register */
- val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
-
- LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
-
- if (!val) {
- LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
- goto exit_clear_intr;
- }
-
-
- /*
- * read 2 bytes of the transaction size
- * IMPORTANT: sdio transaction size has to be read before clearing
- * sdio interrupt!!!
- */
- val = sdio_readb(priv->func, addr++, &ret);
- iosize = val;
- val = sdio_readb(priv->func, addr++, &ret);
- iosize += val << 8;
-
- LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
-
- if (iosize == 0) {
- LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
- goto exit_clear_intr;
- }
-
- /* allocate a work structure to pass iosize to the worker */
- read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
- if (!read_req) {
- LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
- goto exit_clear_intr;
- }
-
- INIT_LIST_HEAD(&read_req->list);
- read_req->iosize = iosize;
-
- list_add_tail(&priv->read_req_list, &read_req->list);
-
- /* clear the function's interrupt request bit (write 1 to clear) */
- sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
-
- schedule_work(&priv->isr_worker);
-
- LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
-
- return;
-
-exit_clear_intr:
- /* clear the function's interrupt request bit (write 1 to clear) */
- sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
-}
-
-
-static int blocks;
-module_param(blocks, int, 0604);
-MODULE_PARM_DESC(blocks, "max_blocks_to_send");
-
-static bool dump;
-module_param(dump, bool, 0604);
-MODULE_PARM_DESC(dump, "dump_hex_content");
-
-static bool jump = 1;
-module_param(jump, bool, 0604);
-
-static bool direct = 1;
-module_param(direct, bool, 0604);
-
-static bool checksum = 1;
-module_param(checksum, bool, 0604);
-
-static bool fw_download = 1;
-module_param(fw_download, bool, 0604);
-
-static int block_size = IWMC_SDIO_BLK_SIZE;
-module_param(block_size, int, 0404);
-
-static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
-module_param(download_trans_blks, int, 0604);
-
-static bool rubbish_barker;
-module_param(rubbish_barker, bool, 0604);
-
-#ifdef CONFIG_IWMC3200TOP_DEBUG
-static int log_level[LOG_SRC_MAX];
-static unsigned int log_level_argc;
-module_param_array(log_level, int, &log_level_argc, 0604);
-MODULE_PARM_DESC(log_level, "log_level");
-
-static int log_level_fw[FW_LOG_SRC_MAX];
-static unsigned int log_level_fw_argc;
-module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
-MODULE_PARM_DESC(log_level_fw, "log_level_fw");
-#endif
-
-void iwmct_dbg_init_params(struct iwmct_priv *priv)
-{
-#ifdef CONFIG_IWMC3200TOP_DEBUG
- int i;
-
- for (i = 0; i < log_level_argc; i++) {
- dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
- i, log_level[i]);
- iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
- log_level[i] & 0xFF);
- }
- for (i = 0; i < log_level_fw_argc; i++) {
- dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
- i, log_level_fw[i]);
- iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
- log_level_fw[i] & 0xFF);
- }
-#endif
-
- priv->dbg.blocks = blocks;
- LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
- priv->dbg.dump = (bool)dump;
- LOG_INFO(priv, INIT, "dump=%d\n", dump);
- priv->dbg.jump = (bool)jump;
- LOG_INFO(priv, INIT, "jump=%d\n", jump);
- priv->dbg.direct = (bool)direct;
- LOG_INFO(priv, INIT, "direct=%d\n", direct);
- priv->dbg.checksum = (bool)checksum;
- LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
- priv->dbg.fw_download = (bool)fw_download;
- LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
- priv->dbg.block_size = block_size;
- LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
- priv->dbg.download_trans_blks = download_trans_blks;
- LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
-}
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-static ssize_t show_iwmct_fw_version(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%s\n", priv->dbg.label_fw);
-}
-static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
-
-#ifdef CONFIG_IWMC3200TOP_DEBUG
-static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
- show_iwmct_log_level, store_iwmct_log_level);
-static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
- show_iwmct_log_level_fw, store_iwmct_log_level_fw);
-#endif
-
-static struct attribute *iwmct_sysfs_entries[] = {
- &dev_attr_cc_label_fw.attr,
-#ifdef CONFIG_IWMC3200TOP_DEBUG
- &dev_attr_log_level.attr,
- &dev_attr_log_level_fw.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwmct_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwmct_sysfs_entries,
-};
-
-
-static int iwmct_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- struct iwmct_priv *priv;
- int ret;
- int val = 1;
- int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
-
- dev_dbg(&func->dev, "enter iwmct_probe\n");
-
- dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
- jiffies_to_msecs(2147483647), HZ);
-
- priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&func->dev, "kzalloc error\n");
- return -ENOMEM;
- }
- priv->func = func;
- sdio_set_drvdata(func, priv);
-
- INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
- INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
-
- init_waitqueue_head(&priv->wait_q);
-
- sdio_claim_host(func);
- /* FIXME: Remove after it is fixed in the Boot ROM upgrade */
- func->enable_timeout = 10;
-
- /* In our HW, setting the block size also wakes up the boot rom. */
- ret = sdio_set_block_size(func, priv->dbg.block_size);
- if (ret) {
- LOG_ERROR(priv, INIT,
- "sdio_set_block_size() failure: %d\n", ret);
- goto error_sdio_enable;
- }
-
- ret = sdio_enable_func(func);
- if (ret) {
- LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
- goto error_sdio_enable;
- }
-
- /* init reset and dev_sync states */
- atomic_set(&priv->reset, 0);
- atomic_set(&priv->dev_sync, 0);
-
- /* init read req queue */
- INIT_LIST_HEAD(&priv->read_req_list);
-
- /* process configurable parameters */
- iwmct_dbg_init_params(priv);
- ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
- if (ret) {
- LOG_ERROR(priv, INIT, "Failed to register attributes and "
- "initialize module_params\n");
- goto error_dev_attrs;
- }
-
- iwmct_dbgfs_register(priv, DRV_NAME);
-
- if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
- LOG_INFO(priv, INIT,
- "Reducing transaction to 8 blocks = 2K (from %d)\n",
- priv->dbg.download_trans_blks);
- priv->dbg.download_trans_blks = 8;
- }
- priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
- LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
-
- ret = sdio_claim_irq(func, iwmct_irq);
- if (ret) {
- LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
- goto error_claim_irq;
- }
-
-
- /* Enable function's interrupt */
- sdio_writeb(priv->func, val, addr, &ret);
- if (ret) {
- LOG_ERROR(priv, INIT, "Failure writing to "
- "Interrupt Enable Register (%d): %d\n", addr, ret);
- goto error_enable_int;
- }
-
- sdio_release_host(func);
-
- LOG_INFO(priv, INIT, "exit iwmct_probe\n");
-
- return ret;
-
-error_enable_int:
- sdio_release_irq(func);
-error_claim_irq:
- sdio_disable_func(func);
-error_dev_attrs:
- iwmct_dbgfs_unregister(priv->dbgfs);
- sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
-error_sdio_enable:
- sdio_release_host(func);
- return ret;
-}
-
-static void iwmct_remove(struct sdio_func *func)
-{
- struct iwmct_work_struct *read_req;
- struct iwmct_priv *priv = sdio_get_drvdata(func);
-
- LOG_INFO(priv, INIT, "enter\n");
-
- sdio_claim_host(func);
- sdio_release_irq(func);
- sdio_release_host(func);
-
- /* Make sure works are finished */
- flush_work_sync(&priv->bus_rescan_worker);
- flush_work_sync(&priv->isr_worker);
-
- sdio_claim_host(func);
- sdio_disable_func(func);
- sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
- iwmct_dbgfs_unregister(priv->dbgfs);
- sdio_release_host(func);
-
- /* free read requests */
- while (!list_empty(&priv->read_req_list)) {
- read_req = list_entry(priv->read_req_list.next,
- struct iwmct_work_struct, list);
-
- list_del(&read_req->list);
- kfree(read_req);
- }
-
- kfree(priv);
-}
-
-
-static const struct sdio_device_id iwmct_ids[] = {
- /* Intel Wireless MultiCom 3200 Top Driver */
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
- { }, /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(sdio, iwmct_ids);
-
-static struct sdio_driver iwmct_driver = {
- .probe = iwmct_probe,
- .remove = iwmct_remove,
- .name = DRV_NAME,
- .id_table = iwmct_ids,
-};
-
-static int __init iwmct_init(void)
-{
- int rc;
-
- /* Default log filter settings */
- iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
- iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
- iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
-
- rc = sdio_register_driver(&iwmct_driver);
-
- return rc;
-}
-
-static void __exit iwmct_exit(void)
-{
- sdio_unregister_driver(&iwmct_driver);
-}
-
-module_init(iwmct_init);
-module_exit(iwmct_exit);
-
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 93936f1..23f5463 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
struct mei_cl *cl,
struct mei_io_list *cmpl_list)
{
- if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) +
+ if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
sizeof(struct hbm_flow_control))) {
/* return the cancel routine */
list_del(&cb_pos->cb_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index c703332..783fcd7 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
err = request_threaded_irq(pdev->irq,
NULL,
mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
+ IRQF_ONESHOT, mei_driver_name, dev);
else
err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
pdev->irq);
- goto unmap_memory;
+ goto disable_msi;
}
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@ release_irq:
mei_disable_interrupts(dev);
flush_scheduled_work();
free_irq(pdev->irq, dev);
+disable_msi:
pci_disable_msi(pdev);
-unmap_memory:
pci_iounmap(pdev, dev->mem_addr);
free_device:
kfree(dev);
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ misc_deregister(&mei_misc_device);
}
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
@@ -1145,7 +1147,7 @@ static int mei_pci_resume(struct device *device)
err = request_threaded_irq(pdev->irq,
NULL,
mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
+ IRQF_ONESHOT, mei_driver_name, dev);
else
err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
@@ -1216,7 +1218,6 @@ module_init(mei_init_module);
*/
static void __exit mei_exit_module(void)
{
- misc_deregister(&mei_misc_device);
pci_unregister_driver(&mei_driver);
pr_debug("unloaded successfully.\n");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 6be5605..e2ec050 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = {
};
static const struct watchdog_info wd_info = {
.identity = INTEL_AMT_WATCHDOG_ID,
- .options = WDIOF_KEEPALIVEPING,
+ .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
};
static struct watchdog_device amt_wd_dev = {
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 17bbacb..87b251a 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -452,9 +452,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
if (msg->activate_gru_mq_desc_gpa !=
part_uv->activate_gru_mq_desc_gpa) {
- spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ spin_lock(&part_uv->flags_lock);
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
- spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ spin_unlock(&part_uv->flags_lock);
part_uv->activate_gru_mq_desc_gpa =
msg->activate_gru_mq_desc_gpa;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dd2d374..f1c84de 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -554,7 +554,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
- unsigned int timeout_us;
struct scatterlist sg;
@@ -574,23 +573,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- data.timeout_ns = card->csd.tacc_ns * 100;
- data.timeout_clks = card->csd.tacc_clks * 100;
-
- timeout_us = data.timeout_ns / 1000;
- timeout_us += data.timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
-
- if (timeout_us > 100000) {
- data.timeout_ns = 100000000;
- data.timeout_clks = 0;
- }
-
data.blksz = 4;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
+ mmc_set_data_timeout(&data, card);
mrq.cmd = &cmd;
mrq.data = &data;
@@ -862,9 +850,7 @@ out:
goto retry;
if (!err)
mmc_blk_reset_success(md, type);
- spin_lock_irq(&md->lock);
- __blk_end_request(req, err, blk_rq_bytes(req));
- spin_unlock_irq(&md->lock);
+ blk_end_request(req, err, blk_rq_bytes(req));
return err ? 0 : 1;
}
@@ -946,9 +932,7 @@ out_retry:
if (!err)
mmc_blk_reset_success(md, type);
out:
- spin_lock_irq(&md->lock);
- __blk_end_request(req, err, blk_rq_bytes(req));
- spin_unlock_irq(&md->lock);
+ blk_end_request(req, err, blk_rq_bytes(req));
return err ? 0 : 1;
}
@@ -963,9 +947,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
if (ret)
ret = -EIO;
- spin_lock_irq(&md->lock);
- __blk_end_request_all(req, ret);
- spin_unlock_irq(&md->lock);
+ blk_end_request_all(req, ret);
return ret ? 0 : 1;
}
@@ -1264,14 +1246,10 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
blocks = mmc_sd_num_wr_blocks(card);
if (blocks != (u32)-1) {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, blocks << 9);
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, 0, blocks << 9);
}
} else {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
}
return ret;
}
@@ -1323,10 +1301,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* A block was successfully transferred.
*/
mmc_blk_reset_success(md, type);
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0,
+ ret = blk_end_request(req, 0,
brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
@@ -1376,10 +1352,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* time, so we only reach here after trying to
* read a single sector.
*/
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO,
+ ret = blk_end_request(req, -EIO,
brq->data.blksz);
- spin_unlock_irq(&md->lock);
if (!ret)
goto start_new_req;
break;
@@ -1400,12 +1374,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 1;
cmd_abort:
- spin_lock_irq(&md->lock);
if (mmc_card_removed(card))
req->cmd_flags |= REQ_QUIET;
while (ret)
- ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
start_new_req:
if (rqc) {
@@ -1429,9 +1401,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_part_switch(card, md);
if (ret) {
if (req) {
- spin_lock_irq(&md->lock);
- __blk_end_request_all(req, -EIO);
- spin_unlock_irq(&md->lock);
+ blk_end_request_all(req, -EIO);
}
ret = 0;
goto out;
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index dca4428..38ed210 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,6 +7,6 @@ mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- quirks.o cd-gpio.o
+ quirks.o slot-gpio.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
deleted file mode 100644
index f13e38d..0000000
--- a/drivers/mmc/core/cd-gpio.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Generic GPIO card-detect helper
- *
- * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/mmc/cd-gpio.h>
-#include <linux/mmc/host.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-struct mmc_cd_gpio {
- unsigned int gpio;
- char label[0];
-};
-
-static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id)
-{
- /* Schedule a card detection after a debounce timeout */
- mmc_detect_change(dev_id, msecs_to_jiffies(100));
- return IRQ_HANDLED;
-}
-
-int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
-{
- size_t len = strlen(dev_name(host->parent)) + 4;
- struct mmc_cd_gpio *cd;
- int irq = gpio_to_irq(gpio);
- int ret;
-
- if (irq < 0)
- return irq;
-
- cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL);
- if (!cd)
- return -ENOMEM;
-
- snprintf(cd->label, len, "%s cd", dev_name(host->parent));
-
- ret = gpio_request_one(gpio, GPIOF_DIR_IN, cd->label);
- if (ret < 0)
- goto egpioreq;
-
- ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- cd->label, host);
- if (ret < 0)
- goto eirqreq;
-
- cd->gpio = gpio;
- host->hotplug.irq = irq;
- host->hotplug.handler_priv = cd;
-
- return 0;
-
-eirqreq:
- gpio_free(gpio);
-egpioreq:
- kfree(cd);
- return ret;
-}
-EXPORT_SYMBOL(mmc_cd_gpio_request);
-
-void mmc_cd_gpio_free(struct mmc_host *host)
-{
- struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
-
- if (!cd)
- return;
-
- free_irq(host->hotplug.irq, host);
- gpio_free(cd->gpio);
- kfree(cd);
-}
-EXPORT_SYMBOL(mmc_cd_gpio_free);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0b6141d..8ac5246 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -404,6 +404,7 @@ int mmc_interrupt_hpi(struct mmc_card *card)
{
int err;
u32 status;
+ unsigned long prg_wait;
BUG_ON(!card);
@@ -419,30 +420,38 @@ int mmc_interrupt_hpi(struct mmc_card *card)
goto out;
}
- /*
- * If the card status is in PRG-state, we can send the HPI command.
- */
- if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
- do {
- /*
- * We don't know when the HPI command will finish
- * processing, so we need to resend HPI until out
- * of prg-state, and keep checking the card status
- * with SEND_STATUS. If a timeout error occurs when
- * sending the HPI command, we are already out of
- * prg-state.
- */
- err = mmc_send_hpi_cmd(card, &status);
- if (err)
- pr_debug("%s: abort HPI (%d error)\n",
- mmc_hostname(card->host), err);
+ switch (R1_CURRENT_STATE(status)) {
+ case R1_STATE_IDLE:
+ case R1_STATE_READY:
+ case R1_STATE_STBY:
+ /*
+ * In idle states, HPI is not needed and the caller
+ * can issue the next intended command immediately
+ */
+ goto out;
+ case R1_STATE_PRG:
+ break;
+ default:
+ /* In all other states, it's illegal to issue HPI */
+ pr_debug("%s: HPI cannot be sent. Card state=%d\n",
+ mmc_hostname(card->host), R1_CURRENT_STATE(status));
+ err = -EINVAL;
+ goto out;
+ }
- err = mmc_send_status(card, &status);
- if (err)
- break;
- } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
- } else
- pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
+ err = mmc_send_hpi_cmd(card, &status);
+ if (err)
+ goto out;
+
+ prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
+ do {
+ err = mmc_send_status(card, &status);
+
+ if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
+ break;
+ if (time_after(jiffies, prg_wait))
+ err = -ETIMEDOUT;
+ } while (!err);
out:
mmc_release_host(card->host);
@@ -941,7 +950,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply)
return result;
}
-EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
+EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
/**
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
@@ -1011,7 +1020,30 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
"could not set regulator OCR (%d)\n", result);
return result;
}
-EXPORT_SYMBOL(mmc_regulator_set_ocr);
+EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
+
+int mmc_regulator_get_supply(struct mmc_host *mmc)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct regulator *supply;
+ int ret;
+
+ supply = devm_regulator_get(dev, "vmmc");
+ mmc->supply.vmmc = supply;
+ mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc");
+
+ if (IS_ERR(supply))
+ return PTR_ERR(supply);
+
+ ret = mmc_regulator_get_ocrmask(supply);
+ if (ret > 0)
+ mmc->ocr_avail = ret;
+ else
+ dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
#endif /* CONFIG_REGULATOR */
@@ -1180,6 +1212,9 @@ static void mmc_power_up(struct mmc_host *host)
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
+ /* Set signal voltage to 3.3V */
+ mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
+
/*
* This delay should be sufficient to allow the power supply
* to reach the minimum voltage.
@@ -1931,9 +1966,6 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
*/
mmc_hw_reset_for_init(host);
- /* Initialization should be done at 3.3 V I/O voltage. */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
/*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
@@ -2075,6 +2107,7 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
host->f_init = max(freqs[0], host->f_min);
+ host->rescan_disable = 0;
mmc_power_up(host);
mmc_detect_change(host, 0);
}
@@ -2088,6 +2121,7 @@ void mmc_stop_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
#endif
+ host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 91c84c7..597f189 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -32,6 +32,7 @@
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ mutex_destroy(&host->slot.lock);
kfree(host);
}
@@ -312,6 +313,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
if (!host)
return NULL;
+ /* scanning will be enabled when we're ready */
+ host->rescan_disable = 1;
spin_lock(&mmc_host_lock);
err = idr_get_new(&mmc_host_idr, host, &host->index);
spin_unlock(&mmc_host_lock);
@@ -327,6 +330,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
mmc_host_clk_init(host);
+ mutex_init(&host->slot.lock);
+ host->slot.cd_irq = -EINVAL;
+
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2d4a4b7..396b258 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -717,10 +717,6 @@ static int mmc_select_powerclass(struct mmc_card *card,
card->ext_csd.generic_cmd6_time);
}
- if (err)
- pr_err("%s: power class selection for ext_csd_bus_width %d"
- " failed\n", mmc_hostname(card->host), bus_width);
-
return err;
}
@@ -822,9 +818,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host))
mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
- /* Initialization should be done at 3.3 V I/O voltage. */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
/*
* Since we're changing the OCR value, we seem to
* need to tell some cards to go back to the idle
@@ -1104,7 +1097,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
if (err)
- goto err;
+ pr_warning("%s: power class selection to bus width %d"
+ " failed\n", mmc_hostname(card->host),
+ 1 << bus_width);
}
/*
@@ -1136,7 +1131,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
ext_csd);
if (err)
- goto err;
+ pr_warning("%s: power class selection to "
+ "bus width %d failed\n",
+ mmc_hostname(card->host),
+ 1 << bus_width);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
@@ -1164,7 +1162,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
ext_csd);
if (err)
- goto err;
+ pr_warning("%s: power class selection to "
+ "bus width %d ddr %d failed\n",
+ mmc_hostname(card->host),
+ 1 << bus_width, ddr);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
@@ -1326,7 +1327,7 @@ static int mmc_suspend(struct mmc_host *host)
if (!err)
mmc_card_set_sleep(host->card);
} else if (!mmc_host_is_spi(host))
- mmc_deselect_cards(host);
+ err = mmc_deselect_cards(host);
host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_release_host(host);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 69370f4..0ed2cc5 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -569,7 +569,6 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
cmd.opcode = opcode;
cmd.arg = card->rca << 16 | 1;
- cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c272c686..74972c2 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -244,7 +244,7 @@ static int mmc_read_ssr(struct mmc_card *card)
* bitfield positions accordingly.
*/
au = UNSTUFF_BITS(ssr, 428 - 384, 4);
- if (au > 0 || au <= 9) {
+ if (au > 0 && au <= 9) {
card->ssr.au = 1 << (au + 4);
es = UNSTUFF_BITS(ssr, 408 - 384, 16);
et = UNSTUFF_BITS(ssr, 402 - 384, 6);
@@ -290,8 +290,12 @@ static int mmc_read_switch(struct mmc_card *card)
return -ENOMEM;
}
- /* Find out the supported Bus Speed Modes. */
- err = mmc_sd_switch(card, 0, 0, 1, status);
+ /*
+ * Find out the card's support bits with a mode 0 operation.
+ * The argument does not matter, as the support bits do not
+ * change with the arguments.
+ */
+ err = mmc_sd_switch(card, 0, 0, 0, status);
if (err) {
/*
* If the host or the card can't do the switch,
@@ -312,46 +316,8 @@ static int mmc_read_switch(struct mmc_card *card)
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
-
- /* Find out Driver Strengths supported by the card */
- err = mmc_sd_switch(card, 0, 2, 1, status);
- if (err) {
- /*
- * If the host or the card can't do the switch,
- * fail more gracefully.
- */
- if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
- goto out;
-
- pr_warning("%s: problem reading "
- "Driver Strength.\n",
- mmc_hostname(card->host));
- err = 0;
-
- goto out;
- }
-
+ /* Driver Strengths supported by the card */
card->sw_caps.sd3_drv_type = status[9];
-
- /* Find out Current Limits supported by the card */
- err = mmc_sd_switch(card, 0, 3, 1, status);
- if (err) {
- /*
- * If the host or the card can't do the switch,
- * fail more gracefully.
- */
- if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
- goto out;
-
- pr_warning("%s: problem reading "
- "Current Limit.\n",
- mmc_hostname(card->host));
- err = 0;
-
- goto out;
- }
-
- card->sw_caps.sd3_curr_limit = status[7];
}
out:
@@ -551,60 +517,80 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
return 0;
}
+/* Get host's max current setting at its current voltage */
+static u32 sd_get_host_max_current(struct mmc_host *host)
+{
+ u32 voltage, max_current;
+
+ voltage = 1 << host->ios.vdd;
+ switch (voltage) {
+ case MMC_VDD_165_195:
+ max_current = host->max_current_180;
+ break;
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ max_current = host->max_current_300;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ max_current = host->max_current_330;
+ break;
+ default:
+ max_current = 0;
+ }
+
+ return max_current;
+}
+
static int sd_set_current_limit(struct mmc_card *card, u8 *status)
{
- int current_limit = 0;
+ int current_limit = SD_SET_CURRENT_NO_CHANGE;
int err;
+ u32 max_current;
/*
* Current limit switch is only defined for SDR50, SDR104, and DDR50
- * bus speed modes. For other bus speed modes, we set the default
- * current limit of 200mA.
+ * bus speed modes. For other bus speed modes, we do not change the
+ * current limit.
*/
- if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) ||
- (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) ||
- (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) {
- if (card->host->caps & MMC_CAP_MAX_CURRENT_800) {
- if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
- current_limit = SD_SET_CURRENT_LIMIT_800;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_600)
- current_limit = SD_SET_CURRENT_LIMIT_600;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_400)
- current_limit = SD_SET_CURRENT_LIMIT_400;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) {
- if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
- current_limit = SD_SET_CURRENT_LIMIT_600;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_400)
- current_limit = SD_SET_CURRENT_LIMIT_400;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) {
- if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
- current_limit = SD_SET_CURRENT_LIMIT_400;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) {
- if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- }
- } else
+ if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) &&
+ (card->sd_bus_speed != UHS_SDR104_BUS_SPEED) &&
+ (card->sd_bus_speed != UHS_DDR50_BUS_SPEED))
+ return 0;
+
+ /*
+ * Host has different current capabilities when operating at
+ * different voltages, so find out its max current first.
+ */
+ max_current = sd_get_host_max_current(card->host);
+
+ /*
+ * We only check host's capability here, if we set a limit that is
+ * higher than the card's maximum current, the card will be using its
+ * maximum current, e.g. if the card's maximum current is 300ma, and
+ * when we set current limit to 200ma, the card will draw 200ma, and
+ * when we set current limit to 400/600/800ma, the card will draw its
+ * maximum 300ma from the host.
+ */
+ if (max_current >= 800)
+ current_limit = SD_SET_CURRENT_LIMIT_800;
+ else if (max_current >= 600)
+ current_limit = SD_SET_CURRENT_LIMIT_600;
+ else if (max_current >= 400)
+ current_limit = SD_SET_CURRENT_LIMIT_400;
+ else if (max_current >= 200)
current_limit = SD_SET_CURRENT_LIMIT_200;
- err = mmc_sd_switch(card, 1, 3, current_limit, status);
- if (err)
- return err;
+ if (current_limit != SD_SET_CURRENT_NO_CHANGE) {
+ err = mmc_sd_switch(card, 1, 3, current_limit, status);
+ if (err)
+ return err;
- if (((status[15] >> 4) & 0x0F) != current_limit)
- pr_warning("%s: Problem setting current limit!\n",
- mmc_hostname(card->host));
+ if (((status[15] >> 4) & 0x0F) != current_limit)
+ pr_warning("%s: Problem setting current limit!\n",
+ mmc_hostname(card->host));
+
+ }
return 0;
}
@@ -726,6 +712,7 @@ struct device_type sd_type = {
int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
{
int err;
+ u32 max_current;
/*
* Since we're changing the OCR value, we seem to
@@ -753,9 +740,12 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
ocr |= SD_OCR_S18R;
- /* If the host can supply more than 150mA, XPC should be set to 1. */
- if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
- MMC_CAP_SET_XPC_180))
+ /*
+ * If the host can supply more than 150mA at current voltage,
+ * XPC should be set to 1.
+ */
+ max_current = sd_get_host_max_current(host);
+ if (max_current > 150)
ocr |= SD_OCR_XPC;
try_again:
@@ -911,9 +901,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
BUG_ON(!host);
WARN_ON(!host->claimed);
- /* The initialization should be done at 3.3 V I/O voltage. */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
@@ -1075,16 +1062,18 @@ static void mmc_sd_detect(struct mmc_host *host)
*/
static int mmc_sd_suspend(struct mmc_host *host)
{
+ int err = 0;
+
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
if (!mmc_host_is_spi(host))
- mmc_deselect_cards(host);
+ err = mmc_deselect_cards(host);
host->card->state &= ~MMC_STATE_HIGHSPEED;
mmc_release_host(host);
- return 0;
+ return err;
}
/*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 13d0e95..d4619e2 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card)
if (ret)
return ret;
+ if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
+ pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
+ mmc_hostname(card->host), ctrl);
+
+ /* set as 4-bit bus width */
+ ctrl &= ~SDIO_BUS_WIDTH_MASK;
ctrl |= SDIO_BUS_WIDTH_4BIT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -585,9 +591,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
* Inform the card of the voltage
*/
if (!powered_resume) {
- /* The initialization should be done at 3.3 V I/O voltage. */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
err = mmc_send_io_op_cond(host, host->ocr, &ocr);
if (err)
goto err;
@@ -1000,10 +1003,6 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
* restore the correct voltage setting of the card.
*/
- /* The initialization should be done at 3.3 V I/O voltage. */
- if (!mmc_card_keep_power(host))
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index f1c7ed8..8e94e55 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -313,7 +313,7 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
if (ret == -ENOENT) {
/* warn about unknown tuples */
- pr_warning("%s: queuing unknown"
+ pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
mmc_hostname(card->host),
tpl_code, tpl_link);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
new file mode 100644
index 0000000..0582429
--- /dev/null
+++ b/drivers/mmc/core/slot-gpio.c
@@ -0,0 +1,188 @@
+/*
+ * Generic GPIO card-detect helper
+ *
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+struct mmc_gpio {
+ int ro_gpio;
+ int cd_gpio;
+ char *ro_label;
+ char cd_label[0];
+};
+
+static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
+{
+ /* Schedule a card detection after a debounce timeout */
+ mmc_detect_change(dev_id, msecs_to_jiffies(100));
+ return IRQ_HANDLED;
+}
+
+static int mmc_gpio_alloc(struct mmc_host *host)
+{
+ size_t len = strlen(dev_name(host->parent)) + 4;
+ struct mmc_gpio *ctx;
+
+ mutex_lock(&host->slot.lock);
+
+ ctx = host->slot.handler_priv;
+ if (!ctx) {
+ /*
+ * devm_kzalloc() can be called after device_initialize(), even
+ * before device_add(), i.e., between mmc_alloc_host() and
+ * mmc_add_host()
+ */
+ ctx = devm_kzalloc(&host->class_dev, sizeof(*ctx) + 2 * len,
+ GFP_KERNEL);
+ if (ctx) {
+ ctx->ro_label = ctx->cd_label + len;
+ snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent));
+ snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent));
+ ctx->cd_gpio = -EINVAL;
+ ctx->ro_gpio = -EINVAL;
+ host->slot.handler_priv = ctx;
+ }
+ }
+
+ mutex_unlock(&host->slot.lock);
+
+ return ctx ? 0 : -ENOMEM;
+}
+
+int mmc_gpio_get_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || !gpio_is_valid(ctx->ro_gpio))
+ return -ENOSYS;
+
+ return !gpio_get_value_cansleep(ctx->ro_gpio) ^
+ !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+}
+EXPORT_SYMBOL(mmc_gpio_get_ro);
+
+int mmc_gpio_get_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || !gpio_is_valid(ctx->cd_gpio))
+ return -ENOSYS;
+
+ return !gpio_get_value_cansleep(ctx->cd_gpio) ^
+ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
+}
+EXPORT_SYMBOL(mmc_gpio_get_cd);
+
+int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
+{
+ struct mmc_gpio *ctx;
+ int ret;
+
+ if (!gpio_is_valid(gpio))
+ return -EINVAL;
+
+ ret = mmc_gpio_alloc(host);
+ if (ret < 0)
+ return ret;
+
+ ctx = host->slot.handler_priv;
+
+ return gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label);
+}
+EXPORT_SYMBOL(mmc_gpio_request_ro);
+
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
+{
+ struct mmc_gpio *ctx;
+ int irq = gpio_to_irq(gpio);
+ int ret;
+
+ ret = mmc_gpio_alloc(host);
+ if (ret < 0)
+ return ret;
+
+ ctx = host->slot.handler_priv;
+
+ ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label);
+ if (ret < 0)
+ /*
+ * don't bother freeing memory. It might still get used by other
+ * slot functions, in any case it will be freed, when the device
+ * is destroyed.
+ */
+ return ret;
+
+ /*
+ * Even if gpio_to_irq() returns a valid IRQ number, the platform might
+ * still prefer to poll, e.g., because that IRQ number is already used
+ * by another unit and cannot be shared.
+ */
+ if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
+ irq = -EINVAL;
+
+ if (irq >= 0) {
+ ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ ctx->cd_label, host);
+ if (ret < 0)
+ irq = ret;
+ }
+
+ host->slot.cd_irq = irq;
+
+ if (irq < 0)
+ host->caps |= MMC_CAP_NEEDS_POLL;
+
+ ctx->cd_gpio = gpio;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpio_request_cd);
+
+void mmc_gpio_free_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int gpio;
+
+ if (!ctx || !gpio_is_valid(ctx->ro_gpio))
+ return;
+
+ gpio = ctx->ro_gpio;
+ ctx->ro_gpio = -EINVAL;
+
+ gpio_free(gpio);
+}
+EXPORT_SYMBOL(mmc_gpio_free_ro);
+
+void mmc_gpio_free_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int gpio;
+
+ if (!ctx || !gpio_is_valid(ctx->cd_gpio))
+ return;
+
+ if (host->slot.cd_irq >= 0) {
+ free_irq(host->slot.cd_irq, host);
+ host->slot.cd_irq = -EINVAL;
+ }
+
+ gpio = ctx->cd_gpio;
+ ctx->cd_gpio = -EINVAL;
+
+ gpio_free(gpio);
+}
+EXPORT_SYMBOL(mmc_gpio_free_cd);
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 787aba1..ab56f7d 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -140,4 +140,18 @@
#define atmci_writel(port,reg,value) \
__raw_writel((value), (port)->regs + reg)
+/*
+ * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
+ * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
+ *
+ * This can be done by finding most significant bit set.
+ */
+static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
+{
+ if (maxburst > 1)
+ return fls(maxburst) - 2;
+ else
+ return 0;
+}
+
#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 420aca6..322412c 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -391,11 +391,17 @@ static int atmci_regs_show(struct seq_file *s, void *v)
clk_disable(host->mck);
spin_unlock_bh(&host->lock);
- seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
+ seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
- buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "",
- buf[ATMCI_MR / 4] & 0xff);
+ buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
+ if (host->caps.has_odd_clk_div)
+ seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
+ ((buf[ATMCI_MR / 4] & 0xff) << 1)
+ | ((buf[ATMCI_MR / 4] >> 16) & 1));
+ else
+ seq_printf(s, "CLKDIV=%u\n",
+ (buf[ATMCI_MR / 4] & 0xff));
seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
@@ -910,6 +916,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
enum dma_data_direction direction;
enum dma_transfer_direction slave_dirn;
unsigned int sglen;
+ u32 maxburst;
u32 iflags;
data->error = -EINPROGRESS;
@@ -943,17 +950,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (!chan)
return -ENODEV;
- if (host->caps.has_dma)
- atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
-
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
+ maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
} else {
direction = DMA_TO_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
+ maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
}
+ atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
+
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
@@ -1683,7 +1691,6 @@ static void atmci_tasklet_func(unsigned long priv)
dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
host->cmd = NULL;
- host->data = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
atmci_command_complete(host, mrq->stop);
@@ -1697,6 +1704,7 @@ static void atmci_tasklet_func(unsigned long priv)
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
}
+ host->data = NULL;
break;
case STATE_END_REQUEST:
@@ -2314,6 +2322,8 @@ static int __init atmci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
+ setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
+
/* We need at least one slot to succeed */
nr_slots = 0;
ret = -ENODEV;
@@ -2352,8 +2362,6 @@ static int __init atmci_probe(struct platform_device *pdev)
}
}
- setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
-
dev_info(&pdev->dev,
"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
host->mapbase, irq, nr_slots);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 9bbf45f..72dc3cd 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -405,11 +405,23 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
static int dw_mci_idmac_init(struct dw_mci *host)
{
struct idmac_desc *p;
- int i;
+ int i, dma_support;
/* Number of descriptors in the ring buffer */
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+ /* Check if Hardware Configuration Register has support for DMA */
+ dma_support = (mci_readl(host, HCON) >> 16) & 0x3;
+
+ if (!dma_support || dma_support > 2) {
+ dev_err(&host->dev,
+ "Host Controller does not support IDMA Tx.\n");
+ host->dma_ops = NULL;
+ return -ENODEV;
+ }
+
+ dev_info(&host->dev, "Using internal DMA controller.\n");
+
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
@@ -418,6 +430,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
p->des3 = host->sg_dma;
p->des0 = IDMAC_DES0_ER;
+ mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
+
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
SDMMC_IDMAC_INT_TI);
@@ -615,14 +629,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
u32 div;
if (slot->clock != host->current_speed) {
- if (host->bus_hz % slot->clock)
+ div = host->bus_hz / slot->clock;
+ if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
/*
* move the + 1 after the divide to prevent
* over-clocking the card.
*/
- div = ((host->bus_hz / slot->clock) >> 1) + 1;
- else
- div = (host->bus_hz / slot->clock) >> 1;
+ div += 1;
+
+ div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -939,8 +954,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
mdelay(20);
if (cmd->data) {
- host->data = NULL;
dw_mci_stop_dma(host);
+ host->data = NULL;
}
}
}
@@ -1623,7 +1638,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
- set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
host->dma_ops->complete(host);
}
#endif
@@ -1725,7 +1739,8 @@ static void dw_mci_work_routine_card(struct work_struct *work)
#ifdef CONFIG_MMC_DW_IDMAC
ctrl = mci_readl(host, BMOD);
- ctrl |= 0x01; /* Software reset of DMA */
+ /* Software reset of DMA */
+ ctrl |= SDMMC_IDMAC_SWRESET;
mci_writel(host, BMOD, ctrl);
#endif
@@ -1873,7 +1888,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
/* Determine which DMA interface to use */
#ifdef CONFIG_MMC_DW_IDMAC
host->dma_ops = &dw_mci_idmac_ops;
- dev_info(&host->dev, "Using internal DMA controller.\n");
#endif
if (!host->dma_ops)
@@ -1950,10 +1964,6 @@ int dw_mci_probe(struct dw_mci *host)
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
-
- host->dma_ops = host->pdata->dma_ops;
- dw_mci_init_dma(host);
-
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values.
@@ -1981,10 +1991,11 @@ int dw_mci_probe(struct dw_mci *host)
}
/* Reset all blocks */
- if (!mci_wait_reset(&host->dev, host)) {
- ret = -ENODEV;
- goto err_dmaunmap;
- }
+ if (!mci_wait_reset(&host->dev, host))
+ return -ENODEV;
+
+ host->dma_ops = host->pdata->dma_ops;
+ dw_mci_init_dma(host);
/* Clear the interrupts for the host controller */
mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2170,14 +2181,14 @@ int dw_mci_resume(struct dw_mci *host)
if (host->vmmc)
regulator_enable(host->vmmc);
- if (host->dma_ops->init)
- host->dma_ops->init(host);
-
if (!mci_wait_reset(&host->dev, host)) {
ret = -ENODEV;
return ret;
}
+ if (host->use_dma && host->dma_ops->init)
+ host->dma_ops->init(host);
+
/* Restore the old value at FIFOTH register */
mci_writel(host, FIFOTH, host->fifoth_val);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f0fcce4..50ff19a 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1216,12 +1216,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np,
int bus_width = 0;
pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
- if (!pdata->gpio_wp)
- pdata->gpio_wp = -1;
-
pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
- if (!pdata->gpio_cd)
- pdata->gpio_cd = -1;
if (of_get_property(np, "cd-inverted", NULL))
pdata->cd_invert = true;
@@ -1276,6 +1271,12 @@ static int __devinit mmci_probe(struct amba_device *dev,
return -EINVAL;
}
+ if (!plat) {
+ plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+ }
+
if (np)
mmci_dt_populate_generic_pdata(np, plat);
@@ -1424,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
writel(0, host->base + MMCIMASK1);
writel(0xfff, host->base + MMCICLEAR);
+ if (plat->gpio_cd == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_gpio_cd;
+ }
if (gpio_is_valid(plat->gpio_cd)) {
ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
if (ret == 0)
@@ -1447,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
if (ret >= 0)
host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
}
+ if (plat->gpio_wp == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_gpio_wp;
+ }
if (gpio_is_valid(plat->gpio_wp)) {
ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
if (ret == 0)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 34a9026..a51f930 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -164,16 +164,23 @@ struct mxs_mmc_host {
spinlock_t lock;
int sdio_irq_en;
int wp_gpio;
+ bool wp_inverted;
};
static int mxs_mmc_get_ro(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
+ int ret;
if (!gpio_is_valid(host->wp_gpio))
return -EINVAL;
- return gpio_get_value(host->wp_gpio);
+ ret = gpio_get_value(host->wp_gpio);
+
+ if (host->wp_inverted)
+ ret = !ret;
+
+ return ret;
}
static int mxs_mmc_get_cd(struct mmc_host *mmc)
@@ -707,6 +714,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
struct pinctrl *pinctrl;
int ret = 0, irq_err, irq_dma;
dma_cap_mask_t mask;
+ struct regulator *reg_vmmc;
+ enum of_gpio_flags flags;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -747,6 +756,16 @@ static int mxs_mmc_probe(struct platform_device *pdev)
host->mmc = mmc;
host->sdio_irq_en = 0;
+ reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
+ if (!IS_ERR(reg_vmmc)) {
+ ret = regulator_enable(reg_vmmc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable vmmc regulator: %d\n", ret);
+ goto out_mmc_free;
+ }
+ }
+
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
@@ -785,7 +804,10 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_4_BIT_DATA;
else if (bus_width == 8)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0,
+ &flags);
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ host->wp_inverted = 1;
} else {
if (pdata->flags & SLOTF_8_BIT_CAPABLE)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
@@ -894,8 +916,8 @@ static struct platform_driver mxs_mmc_driver = {
.owner = THIS_MODULE,
#ifdef CONFIG_PM
.pm = &mxs_mmc_pm_ops,
- .of_match_table = mxs_mmc_dt_ids,
#endif
+ .of_match_table = mxs_mmc_dt_ids,
},
};
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 552196c..3e8dcf8 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
.set_ios = mmc_omap_set_ios,
};
-static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)
+static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
{
struct mmc_omap_slot *slot = NULL;
struct mmc_host *mmc;
@@ -1485,24 +1485,26 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
}
host->nr_slots = pdata->nr_slots;
+ host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
+
+ host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
+ if (!host->mmc_omap_wq)
+ goto err_plat_cleanup;
+
for (i = 0; i < pdata->nr_slots; i++) {
ret = mmc_omap_new_slot(host, i);
if (ret < 0) {
while (--i >= 0)
mmc_omap_remove_slot(host->slots[i]);
- goto err_plat_cleanup;
+ goto err_destroy_wq;
}
}
- host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
-
- host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
- if (!host->mmc_omap_wq)
- goto err_plat_cleanup;
-
return 0;
+err_destroy_wq:
+ destroy_workqueue(host->mmc_omap_wq);
err_plat_cleanup:
if (pdata->cleanup)
pdata->cleanup(&pdev->dev);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 9a7a60a..bc28627 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -85,7 +85,6 @@
#define BRR_ENABLE (1 << 5)
#define DTO_ENABLE (1 << 20)
#define INIT_STREAM (1 << 1)
-#define ACEN_ACMD12 (1 << 2)
#define DP_SELECT (1 << 21)
#define DDIR (1 << 4)
#define DMA_EN 0x1
@@ -117,7 +116,6 @@
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-#define AUTO_CMD12 (1 << 0) /* Auto CMD12 support */
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -177,7 +175,6 @@ struct omap_hsmmc_host {
int reqs_blocked;
int use_reg;
int req_in_progress;
- unsigned int flags;
struct omap_hsmmc_next next_data;
struct omap_mmc_platform_data *pdata;
@@ -773,8 +770,6 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
cmdtype = 0x3;
cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
- if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
- cmdreg |= ACEN_ACMD12;
if (data) {
cmdreg |= DP_SELECT | MSBS | BCE;
@@ -847,14 +842,11 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
else
data->bytes_xfered = 0;
- if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
- omap_hsmmc_start_command(host, data->stop, NULL);
- } else {
- if (data->stop)
- data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
- RSP76);
+ if (!data->stop) {
omap_hsmmc_request_done(host, data->mrq);
+ return;
}
+ omap_hsmmc_start_command(host, data->stop, NULL);
}
/*
@@ -1097,7 +1089,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
/* Disable the clocks */
pm_runtime_put_sync(host->dev);
if (host->dbclk)
- clk_disable(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
/* Turn the power off */
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
@@ -1108,7 +1100,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
vdd);
pm_runtime_get_sync(host->dev);
if (host->dbclk)
- clk_enable(host->dbclk);
+ clk_prepare_enable(host->dbclk);
if (ret != 0)
goto err;
@@ -1859,7 +1851,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
host->mapbase = res->start + pdata->reg_offset;
host->base = ioremap(host->mapbase, SZ_4K);
host->power_mode = MMC_POWER_OFF;
- host->flags = AUTO_CMD12;
host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
@@ -1908,7 +1899,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
if (IS_ERR(host->dbclk)) {
dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
host->dbclk = NULL;
- } else if (clk_enable(host->dbclk) != 0) {
+ } else if (clk_prepare_enable(host->dbclk) != 0) {
dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
clk_put(host->dbclk);
host->dbclk = NULL;
@@ -1940,6 +1931,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
if (!res) {
dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
+ ret = -ENXIO;
goto err_irq;
}
host->dma_line_tx = res->start;
@@ -1947,6 +1939,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
if (!res) {
dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
+ ret = -ENXIO;
goto err_irq;
}
host->dma_line_rx = res->start;
@@ -2032,7 +2025,7 @@ err_irq:
pm_runtime_disable(host->dev);
clk_put(host->fclk);
if (host->dbclk) {
- clk_disable(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
clk_put(host->dbclk);
}
err1:
@@ -2067,7 +2060,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_disable(host->dev);
clk_put(host->fclk);
if (host->dbclk) {
- clk_disable(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
clk_put(host->dbclk);
}
@@ -2125,7 +2118,7 @@ static int omap_hsmmc_suspend(struct device *dev)
}
if (host->dbclk)
- clk_disable(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
err:
pm_runtime_put_sync(host->dev);
return ret;
@@ -2146,7 +2139,7 @@ static int omap_hsmmc_resume(struct device *dev)
pm_runtime_get_sync(host->dev);
if (host->dbclk)
- clk_enable(host->dbclk);
+ clk_prepare_enable(host->dbclk);
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
omap_hsmmc_conf_bus_power(host);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index c3622a6..bd5a5cc 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -26,7 +26,6 @@
#include <mach/dma.h>
#include <mach/regs-sdi.h>
-#include <mach/regs-gpio.h>
#include <plat/mci.h>
@@ -1237,12 +1236,9 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_ON:
case MMC_POWER_UP:
- s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK);
- s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD);
- s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0);
- s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
- s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2);
- s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3);
+ /* Configure GPE5...GPE10 pins in SD mode */
+ s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2),
+ S3C_GPIO_PULL_NONE);
if (host->pdata->set_power)
host->pdata->set_power(ios->power_mode, ios->vdd);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 177f697..a6e53a1 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -20,11 +20,17 @@
*/
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
#include "sdhci-pltfm.h"
+struct sdhci_dove_priv {
+ struct clk *clk;
+};
+
static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
{
u16 ret;
@@ -66,16 +72,57 @@ static struct sdhci_pltfm_data sdhci_dove_pdata = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_FORCE_DMA,
+ SDHCI_QUIRK_FORCE_DMA |
+ SDHCI_QUIRK_NO_HISPD_BIT,
};
static int __devinit sdhci_dove_probe(struct platform_device *pdev)
{
- return sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_dove_priv *priv;
+ int ret;
+
+ ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+ if (ret)
+ goto sdhci_dove_register_fail;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to allocate private data");
+ ret = -ENOMEM;
+ goto sdhci_dove_allocate_fail;
+ }
+
+ host = platform_get_drvdata(pdev);
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = priv;
+
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+ return 0;
+
+sdhci_dove_allocate_fail:
+ sdhci_pltfm_unregister(pdev);
+sdhci_dove_register_fail:
+ return ret;
}
static int __devexit sdhci_dove_remove(struct platform_device *pdev)
{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_dove_priv *priv = pltfm_host->priv;
+
+ if (priv->clk) {
+ if (!IS_ERR(priv->clk)) {
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
+ }
+ devm_kfree(&pdev->dev, priv->clk);
+ }
return sdhci_pltfm_unregister(pdev);
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index ebbe984..e23f813 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -299,6 +299,8 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
u32 new_val;
switch (reg) {
@@ -315,8 +317,11 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
SDHCI_CTRL_D3CD);
/* ensure the endianess */
new_val |= ESDHC_HOST_CONTROL_LE;
- /* DMA mode bits are shifted */
- new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
+ /* bits 8&9 are reserved on mx25 */
+ if (!is_imx25_esdhc(imx_data)) {
+ /* DMA mode bits are shifted */
+ new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
+ }
esdhc_clrset_le(host, 0xffff, new_val, reg);
return;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 69ef0be..504da71 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -157,6 +157,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
static const struct sdhci_pci_fixes sdhci_cafe = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index dbb75bf..b6ee885 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -28,6 +28,9 @@
#include <linux/mmc/host.h>
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -121,6 +124,48 @@ static struct sdhci_ops pxav2_sdhci_ops = {
.platform_8bit_width = pxav2_mmc_set_width,
};
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_pxav2_of_match[] = {
+ {
+ .compatible = "mrvl,pxav2-mmc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match);
+
+static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
+{
+ struct sdhci_pxa_platdata *pdata;
+ struct device_node *np = dev->of_node;
+ u32 bus_width;
+ u32 clk_delay_cycles;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_find_property(np, "non-removable", NULL))
+ pdata->flags |= PXA_FLAG_CARD_PERMANENT;
+
+ of_property_read_u32(np, "bus-width", &bus_width);
+ if (bus_width == 8)
+ pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT;
+
+ of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+ if (clk_delay_cycles > 0) {
+ pdata->clk_delay_sel = 1;
+ pdata->clk_delay_cycles = clk_delay_cycles;
+ }
+
+ return pdata;
+}
+#else
+static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -128,6 +173,8 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host = NULL;
struct sdhci_pxa *pxa = NULL;
+ const struct of_device_id *match;
+
int ret;
struct clk *clk;
@@ -156,6 +203,10 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
| SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+ match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev);
+ if (match) {
+ pdata = pxav2_get_mmc_pdata(dev);
+ }
if (pdata) {
if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
/* on-chip device */
@@ -218,6 +269,9 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+ .of_match_table = sdhci_pxav2_of_match,
+#endif
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index f296956..07fe383 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -28,6 +28,9 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -164,6 +167,46 @@ static struct sdhci_ops pxav3_sdhci_ops = {
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
};
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_pxav3_of_match[] = {
+ {
+ .compatible = "mrvl,pxav3-mmc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match);
+
+static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+{
+ struct sdhci_pxa_platdata *pdata;
+ struct device_node *np = dev->of_node;
+ u32 bus_width;
+ u32 clk_delay_cycles;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_find_property(np, "non-removable", NULL))
+ pdata->flags |= PXA_FLAG_CARD_PERMANENT;
+
+ of_property_read_u32(np, "bus-width", &bus_width);
+ if (bus_width == 8)
+ pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT;
+
+ of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+ if (clk_delay_cycles > 0)
+ pdata->clk_delay_cycles = clk_delay_cycles;
+
+ return pdata;
+}
+#else
+static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -171,6 +214,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host = NULL;
struct sdhci_pxa *pxa = NULL;
+ const struct of_device_id *match;
+
int ret;
struct clk *clk;
@@ -202,6 +247,10 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
+ if (match)
+ pdata = pxav3_get_mmc_pdata(dev);
+
if (pdata) {
if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
/* on-chip device */
@@ -263,6 +312,9 @@ static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
+#ifdef CONFIG_OF
+ .of_match_table = sdhci_pxav3_of_match,
+#endif
.owner = THIS_MODULE,
.pm = SDHCI_PLTFM_PMOPS,
},
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 55a164f..a50c205 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
if (sc->ext_cd_irq &&
request_threaded_irq(sc->ext_cd_irq, NULL,
sdhci_s3c_gpio_card_detect_thread,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), sc) == 0) {
int status = gpio_get_value(sc->ext_cd_gpio);
if (pdata->ext_cd_gpio_invert)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 1fe32df..423da81 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
* Support of SDHCI platform devices for spear soc family
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* Inspired by sdhci-pltfm.c
*
@@ -289,5 +289,5 @@ static struct platform_driver sdhci_driver = {
module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index b38d8a7..0810ccc 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -223,6 +223,7 @@ static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata(
{
struct tegra_sdhci_platform_data *plat;
struct device_node *np = pdev->dev.of_node;
+ u32 bus_width;
if (!np)
return NULL;
@@ -236,7 +237,9 @@ static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata(
plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
- if (of_find_property(np, "support-8bit", NULL))
+
+ if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+ bus_width == 8)
plat->is_8bit = 1;
return plat;
@@ -334,7 +337,7 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
rc = PTR_ERR(clk);
goto err_clk_get;
}
- clk_enable(clk);
+ clk_prepare_enable(clk);
pltfm_host->clk = clk;
host->mmc->pm_caps = plat->pm_flags;
@@ -349,7 +352,7 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
return 0;
err_add_host:
- clk_disable(pltfm_host->clk);
+ clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
err_clk_get:
if (gpio_is_valid(plat->wp_gpio))
@@ -390,7 +393,7 @@ static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
if (gpio_is_valid(plat->power_gpio))
gpio_free(plat->power_gpio);
- clk_disable(pltfm_host->clk);
+ clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e626732..9a11dc3 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -27,6 +27,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "sdhci.h"
@@ -244,6 +245,19 @@ static void sdhci_init(struct sdhci_host *host, int soft)
static void sdhci_reinit(struct sdhci_host *host)
{
sdhci_init(host, 0);
+ /*
+ * Retuning stuffs are affected by different cards inserted and only
+ * applicable to UHS-I cards. So reset these fields to their initial
+ * value when card is removed.
+ */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
+ host->flags &= ~SDHCI_USING_RETUNING_TIMER;
+
+ del_timer_sync(&host->tuning_timer);
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+ host->mmc->max_blk_count =
+ (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
+ }
sdhci_enable_card_detection(host);
}
@@ -680,8 +694,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
}
if (count >= 0xF) {
- pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
- mmc_hostname(host->mmc), count, cmd->opcode);
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
count = 0xE;
}
@@ -1245,6 +1259,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct sdhci_host *host;
bool present;
unsigned long flags;
+ u32 tuning_opcode;
host = mmc_priv(mmc);
@@ -1292,8 +1307,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
*/
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
!(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
+ /* eMMC uses cmd21 while sd and sdio use cmd19 */
+ tuning_opcode = mmc->card->type == MMC_TYPE_MMC ?
+ MMC_SEND_TUNING_BLOCK_HS200 :
+ MMC_SEND_TUNING_BLOCK;
spin_unlock_irqrestore(&host->lock, flags);
- sdhci_execute_tuning(mmc, mrq->cmd->opcode);
+ sdhci_execute_tuning(mmc, tuning_opcode);
spin_lock_irqsave(&host->lock, flags);
/* Restore original mmc_request structure */
@@ -1663,11 +1682,15 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
pwr &= ~SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->vmmc)
+ regulator_disable(host->vmmc);
/* Wait for 1ms as per the spec */
usleep_range(1000, 1500);
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->vmmc)
+ regulator_enable(host->vmmc);
pr_info(DRIVER_NAME ": Switching to 1.8V signalling "
"voltage failed, retrying with S18R set to 0\n");
@@ -1855,6 +1878,7 @@ out:
*/
if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
(host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+ host->flags |= SDHCI_USING_RETUNING_TIMER;
mod_timer(&host->tuning_timer, jiffies +
host->tuning_count * HZ);
/* Tuning mode 1 limits the maximum data length to 4MB */
@@ -1872,10 +1896,10 @@ out:
* try tuning again at a later time, when the re-tuning timer expires.
* So for these controllers, we return 0. Since there might be other
* controllers who do not have this capability, we return error for
- * them.
+ * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
+ * a retuning timer to do the retuning for the card.
*/
- if (err && host->tuning_count &&
- host->tuning_mode == SDHCI_TUNING_MODE_1)
+ if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
err = 0;
sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
@@ -2382,7 +2406,6 @@ out:
int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
- bool has_tuning_timer;
if (host->ops->platform_suspend)
host->ops->platform_suspend(host);
@@ -2390,16 +2413,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
sdhci_disable_card_detection(host);
/* Disable tuning since we are suspending */
- has_tuning_timer = host->version >= SDHCI_SPEC_300 &&
- host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1;
- if (has_tuning_timer) {
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
ret = mmc_suspend_host(host->mmc);
if (ret) {
- if (has_tuning_timer) {
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
host->flags |= SDHCI_NEEDS_RETUNING;
mod_timer(&host->tuning_timer, jiffies +
host->tuning_count * HZ);
@@ -2450,8 +2471,7 @@ int sdhci_resume_host(struct sdhci_host *host)
host->ops->platform_resume(host);
/* Set the re-tuning expiration flag */
- if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
- (host->tuning_mode == SDHCI_TUNING_MODE_1))
+ if (host->flags & SDHCI_USING_RETUNING_TIMER)
host->flags |= SDHCI_NEEDS_RETUNING;
return ret;
@@ -2490,8 +2510,7 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
int ret = 0;
/* Disable tuning since we are suspending */
- if (host->version >= SDHCI_SPEC_300 &&
- host->tuning_mode == SDHCI_TUNING_MODE_1) {
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
@@ -2532,8 +2551,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
sdhci_do_enable_preset_value(host, true);
/* Set the re-tuning expiration flag */
- if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
- (host->tuning_mode == SDHCI_TUNING_MODE_1))
+ if (host->flags & SDHCI_USING_RETUNING_TIMER)
host->flags |= SDHCI_NEEDS_RETUNING;
spin_lock_irqsave(&host->lock, flags);
@@ -2584,7 +2602,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
- u32 caps[2];
+ u32 caps[2] = {0, 0};
u32 max_current_caps;
unsigned int ocr_avail;
int ret;
@@ -2614,8 +2632,10 @@ int sdhci_add_host(struct sdhci_host *host)
caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
sdhci_readl(host, SDHCI_CAPABILITIES);
- caps[1] = (host->version >= SDHCI_SPEC_300) ?
- sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
+ if (host->version >= SDHCI_SPEC_300)
+ caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
+ host->caps1 :
+ sdhci_readl(host, SDHCI_CAPABILITIES_1);
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
@@ -2779,7 +2799,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
- mmc_card_is_removable(mmc))
+ !(host->mmc->caps & MMC_CAP_NONREMOVABLE))
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
@@ -2837,6 +2857,30 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_RETUNING_MODE_SHIFT;
ocr_avail = 0;
+
+ host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+ if (IS_ERR(host->vmmc)) {
+ pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ host->vmmc = NULL;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (host->vmmc) {
+ ret = regulator_is_supported_voltage(host->vmmc, 3300000,
+ 3300000);
+ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
+ caps[0] &= ~SDHCI_CAN_VDD_330;
+ ret = regulator_is_supported_voltage(host->vmmc, 3000000,
+ 3000000);
+ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300)))
+ caps[0] &= ~SDHCI_CAN_VDD_300;
+ ret = regulator_is_supported_voltage(host->vmmc, 1800000,
+ 1800000);
+ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180)))
+ caps[0] &= ~SDHCI_CAN_VDD_180;
+ }
+#endif /* CONFIG_REGULATOR */
+
/*
* According to SD Host Controller spec v3.00, if the Host System
* can afford more than 150mA, Host Driver should set XPC to 1. Also
@@ -2845,55 +2889,45 @@ int sdhci_add_host(struct sdhci_host *host)
* value.
*/
max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
+ if (!max_current_caps && host->vmmc) {
+ u32 curr = regulator_get_current_limit(host->vmmc);
+ if (curr > 0) {
+
+ /* convert to SDHCI_MAX_CURRENT format */
+ curr = curr/1000; /* convert to mA */
+ curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
+
+ curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
+ max_current_caps =
+ (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
+ (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
+ (curr << SDHCI_MAX_CURRENT_180_SHIFT);
+ }
+ }
if (caps[0] & SDHCI_CAN_VDD_330) {
- int max_current_330;
-
ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
- max_current_330 = ((max_current_caps &
+ mmc->max_current_330 = ((max_current_caps &
SDHCI_MAX_CURRENT_330_MASK) >>
SDHCI_MAX_CURRENT_330_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
-
- if (max_current_330 > 150)
- mmc->caps |= MMC_CAP_SET_XPC_330;
}
if (caps[0] & SDHCI_CAN_VDD_300) {
- int max_current_300;
-
ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
- max_current_300 = ((max_current_caps &
+ mmc->max_current_300 = ((max_current_caps &
SDHCI_MAX_CURRENT_300_MASK) >>
SDHCI_MAX_CURRENT_300_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
-
- if (max_current_300 > 150)
- mmc->caps |= MMC_CAP_SET_XPC_300;
}
if (caps[0] & SDHCI_CAN_VDD_180) {
- int max_current_180;
-
ocr_avail |= MMC_VDD_165_195;
- max_current_180 = ((max_current_caps &
+ mmc->max_current_180 = ((max_current_caps &
SDHCI_MAX_CURRENT_180_MASK) >>
SDHCI_MAX_CURRENT_180_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
-
- if (max_current_180 > 150)
- mmc->caps |= MMC_CAP_SET_XPC_180;
-
- /* Maximum current capabilities of the host at 1.8V */
- if (max_current_180 >= 800)
- mmc->caps |= MMC_CAP_MAX_CURRENT_800;
- else if (max_current_180 >= 600)
- mmc->caps |= MMC_CAP_MAX_CURRENT_600;
- else if (max_current_180 >= 400)
- mmc->caps |= MMC_CAP_MAX_CURRENT_400;
- else
- mmc->caps |= MMC_CAP_MAX_CURRENT_200;
}
mmc->ocr_avail = ocr_avail;
@@ -2992,13 +3026,10 @@ int sdhci_add_host(struct sdhci_host *host)
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(mmc), host);
- if (ret)
+ if (ret) {
+ pr_err("%s: Failed to request IRQ %d: %d\n",
+ mmc_hostname(mmc), host->irq, ret);
goto untasklet;
-
- host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
- if (IS_ERR(host->vmmc)) {
- pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
- host->vmmc = NULL;
}
sdhci_init(host, 0);
@@ -3016,8 +3047,11 @@ int sdhci_add_host(struct sdhci_host *host)
host->led.brightness_set = sdhci_led_control;
ret = led_classdev_register(mmc_dev(mmc), &host->led);
- if (ret)
+ if (ret) {
+ pr_err("%s: Failed to register LED device: %d\n",
+ mmc_hostname(mmc), ret);
goto reset;
+ }
#endif
mmiowb();
@@ -3081,8 +3115,6 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
free_irq(host->irq, host);
del_timer_sync(&host->timer);
- if (host->version >= SDHCI_SPEC_300)
- del_timer_sync(&host->tuning_timer);
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f761f23..97653ea 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -205,6 +205,7 @@
#define SDHCI_CAPABILITIES_1 0x44
#define SDHCI_MAX_CURRENT 0x48
+#define SDHCI_MAX_CURRENT_LIMIT 0xFF
#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF
#define SDHCI_MAX_CURRENT_330_SHIFT 0
#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 724b35e..b2af713 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -54,6 +54,8 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sh_mmcif.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/mod_devicetable.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
@@ -384,6 +386,9 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_dmae_slave *tx, *rx;
host->dma_active = false;
+ if (!pdata)
+ return;
+
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) {
dev_warn(&host->pd->dev,
@@ -444,13 +449,14 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
{
struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+ bool sup_pclk = p ? p->sup_pclk : false;
sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
if (!clk)
return;
- if (p->sup_pclk && clk == host->clk)
+ if (sup_pclk && clk == host->clk)
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
else
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
@@ -892,21 +898,15 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
switch (mrq->cmd->opcode) {
/* MMCIF does not support SD/SDIO command */
- case SD_IO_SEND_OP_COND:
+ case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
+ case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
+ if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
+ break;
case MMC_APP_CMD:
host->state = STATE_IDLE;
mrq->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, mrq);
return;
- case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
- if (!mrq->data) {
- /* send_if_cond cmd (not support) */
- host->state = STATE_IDLE;
- mrq->cmd->error = -ETIMEDOUT;
- mmc_request_done(mmc, mrq);
- return;
- }
- break;
default:
break;
}
@@ -916,10 +916,35 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
sh_mmcif_start_cmd(host, mrq);
}
+static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
+{
+ int ret = clk_enable(host->hclk);
+
+ if (!ret) {
+ host->clk = clk_get_rate(host->hclk);
+ host->mmc->f_max = host->clk / 2;
+ host->mmc->f_min = host->clk / 512;
+ }
+
+ return ret;
+}
+
+static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
+{
+ struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
+ struct mmc_host *mmc = host->mmc;
+
+ if (pd && pd->set_pwr)
+ pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF);
+ if (!IS_ERR(mmc->supply.vmmc))
+ /* Errors ignored... */
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->power_mode ? ios->vdd : 0);
+}
+
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
- struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
@@ -937,6 +962,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sh_mmcif_request_dma(host, host->pd->dev.platform_data);
host->card_present = true;
}
+ sh_mmcif_set_power(host, ios);
} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* clock stop */
sh_mmcif_clock_control(host, 0);
@@ -948,9 +974,10 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
if (host->power) {
pm_runtime_put(&host->pd->dev);
+ clk_disable(host->hclk);
host->power = false;
- if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
- p->down_pwr(host->pd);
+ if (ios->power_mode == MMC_POWER_OFF)
+ sh_mmcif_set_power(host, ios);
}
host->state = STATE_IDLE;
return;
@@ -958,8 +985,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->clock) {
if (!host->power) {
- if (p->set_pwr)
- p->set_pwr(host->pd, ios->power_mode);
+ sh_mmcif_clk_update(host);
pm_runtime_get_sync(&host->pd->dev);
host->power = true;
sh_mmcif_sync_reset(host);
@@ -975,8 +1001,12 @@ static int sh_mmcif_get_cd(struct mmc_host *mmc)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+ int ret = mmc_gpio_get_cd(mmc);
+
+ if (ret >= 0)
+ return ret;
- if (!p->get_cd)
+ if (!p || !p->get_cd)
return -ENOSYS;
else
return p->get_cd(host->pd);
@@ -1242,12 +1272,28 @@ static void mmcif_timeout_work(struct work_struct *work)
mmc_request_done(host->mmc, mrq);
}
+static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
+{
+ struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_get_supply(mmc);
+
+ if (!pd)
+ return;
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = pd->ocr;
+ else if (pd->ocr)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+}
+
static int __devinit sh_mmcif_probe(struct platform_device *pdev)
{
int ret = 0, irq[2];
struct mmc_host *mmc;
struct sh_mmcif_host *host;
- struct sh_mmcif_plat_data *pd;
+ struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
struct resource *res;
void __iomem *reg;
char clk_name[8];
@@ -1268,42 +1314,26 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "ioremap error.\n");
return -ENOMEM;
}
- pd = pdev->dev.platform_data;
- if (!pd) {
- dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
- ret = -ENXIO;
- goto clean_up;
- }
+
mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
- goto clean_up;
+ goto ealloch;
}
host = mmc_priv(mmc);
host->mmc = mmc;
host->addr = reg;
host->timeout = 1000;
- snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
- host->hclk = clk_get(&pdev->dev, clk_name);
- if (IS_ERR(host->hclk)) {
- dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
- ret = PTR_ERR(host->hclk);
- goto clean_up1;
- }
- clk_enable(host->hclk);
- host->clk = clk_get_rate(host->hclk);
host->pd = pdev;
spin_lock_init(&host->lock);
mmc->ops = &sh_mmcif_ops;
- mmc->f_max = host->clk / 2;
- mmc->f_min = host->clk / 512;
- if (pd->ocr)
- mmc->ocr_avail = pd->ocr;
+ sh_mmcif_init_ocr(host);
+
mmc->caps = MMC_CAP_MMC_HIGHSPEED;
- if (pd->caps)
+ if (pd && pd->caps)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
@@ -1311,34 +1341,52 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
mmc->max_seg_size = mmc->max_req_size;
- sh_mmcif_sync_reset(host);
platform_set_drvdata(pdev, host);
pm_runtime_enable(&pdev->dev);
host->power = false;
+ snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
+ host->hclk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(host->hclk)) {
+ ret = PTR_ERR(host->hclk);
+ dev_err(&pdev->dev, "cannot get clock \"%s\": %d\n", clk_name, ret);
+ goto eclkget;
+ }
+ ret = sh_mmcif_clk_update(host);
+ if (ret < 0)
+ goto eclkupdate;
+
ret = pm_runtime_resume(&pdev->dev);
if (ret < 0)
- goto clean_up2;
+ goto eresume;
INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+ sh_mmcif_sync_reset(host);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
- goto clean_up3;
+ goto ereqirq0;
}
ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
- goto clean_up4;
+ goto ereqirq1;
+ }
+
+ if (pd && pd->use_cd_gpio) {
+ ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
+ if (ret < 0)
+ goto erqcd;
}
+ clk_disable(host->hclk);
ret = mmc_add_host(mmc);
if (ret < 0)
- goto clean_up5;
+ goto emmcaddh;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
@@ -1347,33 +1395,42 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
return ret;
-clean_up5:
+emmcaddh:
+ if (pd && pd->use_cd_gpio)
+ mmc_gpio_free_cd(mmc);
+erqcd:
free_irq(irq[1], host);
-clean_up4:
+ereqirq1:
free_irq(irq[0], host);
-clean_up3:
+ereqirq0:
pm_runtime_suspend(&pdev->dev);
-clean_up2:
- pm_runtime_disable(&pdev->dev);
+eresume:
clk_disable(host->hclk);
-clean_up1:
+eclkupdate:
+ clk_put(host->hclk);
+eclkget:
+ pm_runtime_disable(&pdev->dev);
mmc_free_host(mmc);
-clean_up:
- if (reg)
- iounmap(reg);
+ealloch:
+ iounmap(reg);
return ret;
}
static int __devexit sh_mmcif_remove(struct platform_device *pdev)
{
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+ struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
int irq[2];
host->dying = true;
+ clk_enable(host->hclk);
pm_runtime_get_sync(&pdev->dev);
dev_pm_qos_hide_latency_limit(&pdev->dev);
+ if (pd && pd->use_cd_gpio)
+ mmc_gpio_free_cd(host->mmc);
+
mmc_remove_host(host->mmc);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
@@ -1395,9 +1452,9 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- clk_disable(host->hclk);
mmc_free_host(host->mmc);
pm_runtime_put_sync(&pdev->dev);
+ clk_disable(host->hclk);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -1406,24 +1463,18 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sh_mmcif_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+ struct sh_mmcif_host *host = dev_get_drvdata(dev);
int ret = mmc_suspend_host(host->mmc);
- if (!ret) {
+ if (!ret)
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- clk_disable(host->hclk);
- }
return ret;
}
static int sh_mmcif_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_mmcif_host *host = platform_get_drvdata(pdev);
-
- clk_enable(host->hclk);
+ struct sh_mmcif_host *host = dev_get_drvdata(dev);
return mmc_resume_host(host->mmc);
}
@@ -1432,6 +1483,12 @@ static int sh_mmcif_resume(struct device *dev)
#define sh_mmcif_resume NULL
#endif /* CONFIG_PM */
+static const struct of_device_id mmcif_of_match[] = {
+ { .compatible = "renesas,sh-mmcif" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcif_of_match);
+
static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
.suspend = sh_mmcif_suspend,
.resume = sh_mmcif_resume,
@@ -1443,6 +1500,8 @@ static struct platform_driver sh_mmcif_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = &sh_mmcif_dev_pm_ops,
+ .owner = THIS_MODULE,
+ .of_match_table = mmcif_of_match,
},
};
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 934b68e..a842939 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
@@ -39,22 +40,39 @@ struct sh_mobile_sdhi {
struct tmio_mmc_dma dma_priv;
};
+static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
+ int ret = clk_enable(priv->clk);
+ if (ret < 0)
+ return ret;
+
+ *f = clk_get_rate(priv->clk);
+ return 0;
+}
+
+static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
+ clk_disable(priv->clk);
+}
+
static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
{
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
- if (p && p->set_pwr)
- p->set_pwr(pdev, state);
+ p->set_pwr(pdev, state);
}
static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
{
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
- if (p && p->get_cd)
- return p->get_cd(pdev);
- else
- return -ENOSYS;
+ return p->get_cd(pdev);
}
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
@@ -116,12 +134,14 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
}
mmc_data = &priv->mmc_data;
- p->pdata = mmc_data;
- if (p->init) {
- ret = p->init(pdev, &sdhi_ops);
- if (ret)
- goto einit;
+ if (p) {
+ p->pdata = mmc_data;
+ if (p->init) {
+ ret = p->init(pdev, &sdhi_ops);
+ if (ret)
+ goto einit;
+ }
}
snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
@@ -132,9 +152,8 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eclkget;
}
- mmc_data->hclk = clk_get_rate(priv->clk);
- mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
- mmc_data->get_cd = sh_mobile_sdhi_get_cd;
+ mmc_data->clk_enable = sh_mobile_sdhi_clk_enable;
+ mmc_data->clk_disable = sh_mobile_sdhi_clk_disable;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
if (p) {
mmc_data->flags = p->tmio_flags;
@@ -142,7 +161,12 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
+ mmc_data->capabilities2 |= p->tmio_caps2;
mmc_data->cd_gpio = p->cd_gpio;
+ if (p->set_pwr)
+ mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
+ if (p->get_cd)
+ mmc_data->get_cd = sh_mobile_sdhi_get_cd;
if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
priv->param_tx.slave_id = p->dma_slave_tx;
@@ -248,7 +272,7 @@ eirq_card_detect:
eprobe:
clk_put(priv->clk);
eclkget:
- if (p->cleanup)
+ if (p && p->cleanup)
p->cleanup(pdev);
einit:
kfree(priv);
@@ -263,7 +287,8 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
int i = 0, irq;
- p->pdata = NULL;
+ if (p)
+ p->pdata = NULL;
tmio_mmc_host_remove(host);
@@ -276,7 +301,7 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
clk_put(priv->clk);
- if (p->cleanup)
+ if (p && p->cleanup)
p->cleanup(pdev);
kfree(priv);
@@ -291,11 +316,18 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
.runtime_resume = tmio_mmc_host_runtime_resume,
};
+static const struct of_device_id sh_mobile_sdhi_of_match[] = {
+ { .compatible = "renesas,shmobile-sdhi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
+
static struct platform_driver sh_mobile_sdhi_driver = {
.driver = {
.name = "sh_mobile_sdhi",
.owner = THIS_MODULE,
.pm = &tmio_mmc_dev_pm_ops,
+ .of_match_table = sh_mobile_sdhi_of_match,
},
.probe = sh_mobile_sdhi_probe,
.remove = __devexit_p(sh_mobile_sdhi_remove),
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 9a7996a..0d8a9bb 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -34,8 +34,9 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/tmio.h>
-#include <linux/mmc/cd-gpio.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/tmio.h>
#include <linux/module.h>
#include <linux/pagemap.h>
@@ -305,8 +306,8 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
int c = cmd->opcode;
u32 irq_mask = TMIO_MASK_CMD;
- /* Command 12 is handled by hardware */
- if (cmd->opcode == 12 && !cmd->arg) {
+ /* CMD12 is handled by hardware */
+ if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
return 0;
}
@@ -449,7 +450,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
}
if (stop) {
- if (stop->opcode == 12 && !stop->arg)
+ if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg)
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
else
BUG();
@@ -751,6 +752,34 @@ fail:
mmc_request_done(mmc, mrq);
}
+static int tmio_mmc_clk_update(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_data *pdata = host->pdata;
+ int ret;
+
+ if (!pdata->clk_enable)
+ return -ENOTSUPP;
+
+ ret = pdata->clk_enable(host->pdev, &mmc->f_max);
+ if (!ret)
+ mmc->f_min = mmc->f_max / 512;
+
+ return ret;
+}
+
+static void tmio_mmc_set_power(struct tmio_mmc_host *host, struct mmc_ios *ios)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, ios->power_mode != MMC_POWER_OFF);
+ if (!IS_ERR(mmc->supply.vmmc))
+ /* Errors ignored... */
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->power_mode ? ios->vdd : 0);
+}
+
/* Set MMC clock / power.
* Note: This controller uses a simple divider scheme therefore it cannot
* run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
@@ -797,32 +826,37 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
*/
if (ios->power_mode == MMC_POWER_ON && ios->clock) {
if (!host->power) {
+ tmio_mmc_clk_update(mmc);
pm_runtime_get_sync(dev);
host->power = true;
}
tmio_mmc_set_clock(host, ios->clock);
/* power up SD bus */
- if (host->set_pwr)
- host->set_pwr(host->pdev, 1);
+ tmio_mmc_set_power(host, ios);
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
- host->set_pwr(host->pdev, 0);
+ if (ios->power_mode == MMC_POWER_OFF)
+ tmio_mmc_set_power(host, ios);
if (host->power) {
+ struct tmio_mmc_data *pdata = host->pdata;
+ tmio_mmc_clk_stop(host);
host->power = false;
pm_runtime_put(dev);
+ if (pdata->clk_disable)
+ pdata->clk_disable(host->pdev);
}
- tmio_mmc_clk_stop(host);
}
- switch (ios->bus_width) {
- case MMC_BUS_WIDTH_1:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
- break;
- case MMC_BUS_WIDTH_4:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
- break;
+ if (host->power) {
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
+ break;
+ case MMC_BUS_WIDTH_4:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
+ break;
+ }
}
/* Let things settle. delay taken from winCE driver */
@@ -841,6 +875,9 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
+ int ret = mmc_gpio_get_ro(mmc);
+ if (ret >= 0)
+ return ret;
return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
@@ -850,6 +887,9 @@ static int tmio_mmc_get_cd(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
+ int ret = mmc_gpio_get_cd(mmc);
+ if (ret >= 0)
+ return ret;
if (!pdata->get_cd)
return -ENOSYS;
@@ -865,6 +905,19 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
+static void tmio_mmc_init_ocr(struct tmio_mmc_host *host)
+{
+ struct tmio_mmc_data *pdata = host->pdata;
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_get_supply(mmc);
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = pdata->ocr_mask ? : MMC_VDD_32_33 | MMC_VDD_33_34;
+ else if (pdata->ocr_mask)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+}
+
int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
struct platform_device *pdev,
struct tmio_mmc_data *pdata)
@@ -904,18 +957,14 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
mmc->ops = &tmio_mmc_ops;
mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
- mmc->f_max = pdata->hclk;
- mmc->f_min = mmc->f_max / 512;
+ mmc->caps2 = pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- if (pdata->ocr_mask)
- mmc->ocr_avail = pdata->ocr_mask;
- else
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ tmio_mmc_init_ocr(_host);
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
@@ -927,6 +976,11 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
if (ret < 0)
goto pm_disable;
+ if (tmio_mmc_clk_update(mmc) < 0) {
+ mmc->f_max = pdata->hclk;
+ mmc->f_min = mmc->f_max / 512;
+ }
+
/*
* There are 4 different scenarios for the card detection:
* 1) an external gpio irq handles the cd (best for power savings)
@@ -937,7 +991,6 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
* While we increment the runtime PM counter for all scenarios when
* the mmc core activates us by calling an appropriate set_ios(), we
* must additionally ensure that in case 2) the tmio mmc hardware stays
- * additionally ensure that in case 2) the tmio mmc hardware stays
* powered on during runtime for the card detection to work.
*/
if (_host->native_hotplug)
@@ -948,6 +1001,17 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
_host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
+
+ /* Unmask the IRQs we want to know about */
+ if (!_host->chan_rx)
+ irq_mask |= TMIO_MASK_READOP;
+ if (!_host->chan_tx)
+ irq_mask |= TMIO_MASK_WRITEOP;
+ if (!_host->native_hotplug)
+ irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+
+ _host->sdcard_irq_mask &= ~irq_mask;
+
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
tmio_mmc_enable_sdio_irq(mmc, 0);
@@ -961,22 +1025,18 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (pdata->clk_disable)
+ pdata->clk_disable(pdev);
+ if (ret < 0) {
+ tmio_mmc_host_remove(_host);
+ return ret;
+ }
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
- /* Unmask the IRQs we want to know about */
- if (!_host->chan_rx)
- irq_mask |= TMIO_MASK_READOP;
- if (!_host->chan_tx)
- irq_mask |= TMIO_MASK_WRITEOP;
- if (!_host->native_hotplug)
- irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
-
- tmio_mmc_enable_mmc_irqs(_host, irq_mask);
-
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_cd_gpio_request(mmc, pdata->cd_gpio);
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio);
if (ret < 0) {
tmio_mmc_host_remove(_host);
return ret;
@@ -1008,7 +1068,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
* This means we can miss a card-eject, but this is anyway
* possible, because of delayed processing of hotplug events.
*/
- mmc_cd_gpio_free(mmc);
+ mmc_gpio_free_cd(mmc);
if (!host->native_hotplug)
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5760c1a..27143e0 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -128,7 +128,7 @@ config MTD_AFS_PARTS
config MTD_OF_PARTS
tristate "OpenFirmware partitioning information support"
- default Y
+ default y
depends on OF
help
This provides a partition parsing function which derives
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 608321e..63d2a64 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -4,7 +4,7 @@
* Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
* Mike Albon <malbon@openwrt.org>
* Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
- * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ * Copyright © 2011-2012 Jonas Gorski <jonas.gorski@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -82,6 +82,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
int namelen = 0;
int i;
u32 computed_crc;
+ bool rootfs_first = false;
if (bcm63xx_detect_cfe(master))
return -EINVAL;
@@ -109,6 +110,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
char *boardid = &(buf->board_id[0]);
char *tagversion = &(buf->tag_version[0]);
+ sscanf(buf->flash_image_start, "%u", &rootfsaddr);
sscanf(buf->kernel_address, "%u", &kerneladdr);
sscanf(buf->kernel_length, "%u", &kernellen);
sscanf(buf->total_length, "%u", &totallen);
@@ -117,10 +119,19 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
tagversion, boardid);
kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
- rootfsaddr = kerneladdr + kernellen;
+ rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
spareaddr = roundup(totallen, master->erasesize) + cfelen;
sparelen = master->size - spareaddr - nvramlen;
- rootfslen = spareaddr - rootfsaddr;
+
+ if (rootfsaddr < kerneladdr) {
+ /* default Broadcom layout */
+ rootfslen = kerneladdr - rootfsaddr;
+ rootfs_first = true;
+ } else {
+ /* OpenWrt layout */
+ rootfsaddr = kerneladdr + kernellen;
+ rootfslen = spareaddr - rootfsaddr;
+ }
} else {
pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
buf->header_crc, computed_crc);
@@ -156,18 +167,26 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
curpart++;
if (kernellen > 0) {
- parts[curpart].name = "kernel";
- parts[curpart].offset = kerneladdr;
- parts[curpart].size = kernellen;
+ int kernelpart = curpart;
+
+ if (rootfslen > 0 && rootfs_first)
+ kernelpart++;
+ parts[kernelpart].name = "kernel";
+ parts[kernelpart].offset = kerneladdr;
+ parts[kernelpart].size = kernellen;
curpart++;
}
if (rootfslen > 0) {
- parts[curpart].name = "rootfs";
- parts[curpart].offset = rootfsaddr;
- parts[curpart].size = rootfslen;
- if (sparelen > 0)
- parts[curpart].size += sparelen;
+ int rootfspart = curpart;
+
+ if (kernellen > 0 && rootfs_first)
+ rootfspart--;
+ parts[rootfspart].name = "rootfs";
+ parts[rootfspart].offset = rootfsaddr;
+ parts[rootfspart].size = rootfslen;
+ if (sparelen > 0 && !rootfs_first)
+ parts[rootfspart].size += sparelen;
curpart++;
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d02592e..22d0493 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -317,7 +317,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
- pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
+ pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
}
}
@@ -328,10 +328,23 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
- pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
+ pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
}
}
+static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+
+ /*
+ * S29NS512P flash uses more than 8bits to report number of sectors,
+ * which is not permitted by CFI.
+ */
+ cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
+ pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
+}
+
/* Used to fix CFI-Tables of chips without Extended Query Tables */
static struct cfi_fixup cfi_nopri_fixup_table[] = {
{ CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -362,6 +375,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
{ CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
{ CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
{ CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
+ { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
{ CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
{ CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
{ CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index ddf9ec6..4558e0f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -70,7 +70,7 @@ struct cmdline_mtd_partition {
/* mtdpart_setup() parses into here */
static struct cmdline_mtd_partition *partitions;
-/* the command line passed to mtdpart_setupd() */
+/* the command line passed to mtdpart_setup() */
static char *cmdline;
static int cmdline_parsed = 0;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index a4a80b7..681e2ee 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -52,8 +52,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
while (pages) {
page = page_read(mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -112,8 +110,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
len = len - cpylen;
page = page_read(dev->blkdev->bd_inode->i_mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -148,8 +144,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
len = len - cpylen;
page = page_read(mapping, index);
- if (!page)
- return -ENOMEM;
if (IS_ERR(page))
return PTR_ERR(page);
@@ -271,7 +265,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd._erase = block2mtd_erase;
dev->mtd._write = block2mtd_write;
- dev->mtd._writev = mtd_writev;
dev->mtd._sync = block2mtd_sync;
dev->mtd._read = block2mtd_read;
dev->mtd.priv = dev;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 50aa90a..f70854d 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -227,7 +227,7 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
u8 data8, *dst8;
doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
- cdr = len & 0x3;
+ cdr = len & 0x1;
len4 = len - cdr;
if (first)
@@ -732,12 +732,24 @@ err:
* @len: the number of bytes to be read (must be a multiple of 4)
* @buf: the buffer to be filled in (or NULL is forget bytes)
* @first: 1 if first time read, DOC_READADDRESS should be set
+ * @last_odd: 1 if last read ended up on an odd byte
+ *
+ * Reads bytes from a prepared page. There is a trickery here : if the last read
+ * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
+ * planes, the first byte must be read apart. If a word (16bit) read was used,
+ * the read would return the byte of plane 2 as low *and* high endian, which
+ * will mess the read.
*
*/
static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
- int first)
+ int first, int last_odd)
{
- doc_read_data_area(docg3, buf, len, first);
+ if (last_odd && len > 0) {
+ doc_read_data_area(docg3, buf, 1, first);
+ doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
+ } else {
+ doc_read_data_area(docg3, buf, len, first);
+ }
doc_delay(docg3, 2);
return len;
}
@@ -850,6 +862,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
u8 *buf = ops->datbuf;
size_t len, ooblen, nbdata, nboob;
u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+ int max_bitflips = 0;
if (buf)
len = ops->len;
@@ -876,7 +889,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
ret = 0;
skip = from % DOC_LAYOUT_PAGE_SIZE;
mutex_lock(&docg3->cascade->lock);
- while (!ret && (len > 0 || ooblen > 0)) {
+ while (ret >= 0 && (len > 0 || ooblen > 0)) {
calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
docg3->reliable);
nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
@@ -887,20 +900,20 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, skip, NULL, 1);
+ ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
if (ret < skip)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, nbdata, buf, 0);
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
if (ret < nbdata)
goto err_in_read;
doc_read_page_getbytes(docg3,
DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
- NULL, 0);
- ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+ NULL, 0, (skip + nbdata) % 2);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
if (ret < nboob)
goto err_in_read;
doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
- NULL, 0);
+ NULL, 0, nboob % 2);
doc_get_bch_hw_ecc(docg3, hwecc);
eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
@@ -936,7 +949,8 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
}
if (ret > 0) {
mtd->ecc_stats.corrected += ret;
- ret = -EUCLEAN;
+ max_bitflips = max(max_bitflips, ret);
+ ret = max_bitflips;
}
}
@@ -1004,7 +1018,7 @@ static int doc_reload_bbt(struct docg3 *docg3)
DOC_LAYOUT_PAGE_SIZE);
if (!ret)
doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
- buf, 1);
+ buf, 1, 0);
buf += DOC_LAYOUT_PAGE_SIZE;
}
doc_read_page_finish(docg3);
@@ -1064,10 +1078,10 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
ret = doc_reset_seq(docg3);
if (!ret)
ret = doc_read_page_prepare(docg3, block0, block1, page,
- ofs + DOC_LAYOUT_WEAR_OFFSET);
+ ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
if (!ret)
ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
- buf, 1);
+ buf, 1, 0);
doc_read_page_finish(docg3);
if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 1924d24..5d0d68c 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -639,12 +639,16 @@ static const struct spi_device_id m25p_ids[] = {
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
+ /* Everspin */
+ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
+
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
/* Macronix */
+ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
@@ -728,6 +732,7 @@ static const struct spi_device_id m25p_ids[] = {
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
/* Catalyst / On Semiconductor -- non-JEDEC */
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 797d43c..6796036 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -990,9 +990,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
goto err_clk;
}
- ret = clk_enable(dev->clk);
+ ret = clk_prepare_enable(dev->clk);
if (ret)
- goto err_clk_enable;
+ goto err_clk_prepare_enable;
ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
if (ret) {
@@ -1020,8 +1020,8 @@ err_bank_setup:
free_irq(irq, dev);
platform_set_drvdata(pdev, NULL);
err_irq:
- clk_disable(dev->clk);
-err_clk_enable:
+ clk_disable_unprepare(dev->clk);
+err_clk_prepare_enable:
clk_put(dev->clk);
err_clk:
iounmap(dev->io_base);
@@ -1074,7 +1074,7 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
free_irq(irq, dev);
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
clk_put(dev->clk);
iounmap(dev->io_base);
kfree(dev);
@@ -1091,7 +1091,7 @@ int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
struct spear_smi *dev = platform_get_drvdata(pdev);
if (dev && dev->clk)
- clk_disable(dev->clk);
+ clk_disable_unprepare(dev->clk);
return 0;
}
@@ -1102,7 +1102,7 @@ int spear_smi_resume(struct platform_device *pdev)
int ret = -EPERM;
if (dev && dev->clk)
- ret = clk_enable(dev->clk);
+ ret = clk_prepare_enable(dev->clk);
if (!ret)
spear_smi_hw_init(dev);
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index dbfe17b..45abed6 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = {
static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
{
- int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info);
+ int qinfo_lines = ARRAY_SIZE(qinfo_array);
int i;
int bankwidth = map_bankwidth(map) * 8;
int major, minor;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8af67cf..5ba2458 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -224,7 +224,7 @@ config MTD_CK804XROM
config MTD_SCB2_FLASH
tristate "BIOS flash chip on Intel SCB2 boards"
- depends on X86 && MTD_JEDECPROBE
+ depends on X86 && MTD_JEDECPROBE && PCI
help
Support for treating the BIOS flash chip on Intel SCB2 boards
as an MTD device - with this you can reprogram your BIOS.
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 92e1f41..93f0317 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -260,18 +260,7 @@ static struct pci_driver vr_nor_pci_driver = {
.id_table = vr_nor_pci_ids,
};
-static int __init vr_nor_mtd_init(void)
-{
- return pci_register_driver(&vr_nor_pci_driver);
-}
-
-static void __exit vr_nor_mtd_exit(void)
-{
- pci_unregister_driver(&vr_nor_pci_driver);
-}
-
-module_init(vr_nor_mtd_init);
-module_exit(vr_nor_mtd_exit);
+module_pci_driver(vr_nor_pci_driver);
MODULE_AUTHOR("Andy Lowe");
MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index b5401e3..c03456f 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -19,9 +19,9 @@
#include <linux/mtd/cfi.h>
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
+#include <linux/of.h>
#include <lantiq_soc.h>
-#include <lantiq_platform.h>
/*
* The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,8 +44,9 @@ struct ltq_mtd {
struct map_info *map;
};
-static char ltq_map_name[] = "ltq_nor";
-static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL };
+static const char ltq_map_name[] = "ltq_nor";
+static const char *ltq_probe_types[] __devinitconst = {
+ "cmdlinepart", "ofpart", NULL };
static map_word
ltq_read16(struct map_info *map, unsigned long adr)
@@ -108,42 +109,38 @@ ltq_copy_to(struct map_info *map, unsigned long to,
spin_unlock_irqrestore(&ebu_lock, flags);
}
-static int __init
+static int __devinit
ltq_mtd_probe(struct platform_device *pdev)
{
- struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
+ struct mtd_part_parser_data ppdata;
struct ltq_mtd *ltq_mtd;
- struct resource *res;
struct cfi_private *cfi;
int err;
+ if (of_machine_is_compatible("lantiq,falcon") &&
+ (ltq_boot_select() != BS_FLASH)) {
+ dev_err(&pdev->dev, "invalid bootstrap options\n");
+ return -ENODEV;
+ }
+
ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
platform_set_drvdata(pdev, ltq_mtd);
ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to get memory resource");
+ dev_err(&pdev->dev, "failed to get memory resource\n");
err = -ENOENT;
goto err_out;
}
- res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
- resource_size(ltq_mtd->res), dev_name(&pdev->dev));
- if (!ltq_mtd->res) {
- dev_err(&pdev->dev, "failed to request mem resource");
- err = -EBUSY;
- goto err_out;
- }
-
ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
- ltq_mtd->map->phys = res->start;
- ltq_mtd->map->size = resource_size(res);
- ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
- ltq_mtd->map->phys, ltq_mtd->map->size);
+ ltq_mtd->map->phys = ltq_mtd->res->start;
+ ltq_mtd->map->size = resource_size(ltq_mtd->res);
+ ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
if (!ltq_mtd->map->virt) {
- dev_err(&pdev->dev, "failed to ioremap!\n");
- err = -ENOMEM;
- goto err_free;
+ dev_err(&pdev->dev, "failed to remap mem resource\n");
+ err = -EBUSY;
+ goto err_out;
}
ltq_mtd->map->name = ltq_map_name;
@@ -169,9 +166,9 @@ ltq_mtd_probe(struct platform_device *pdev)
cfi->addr_unlock1 ^= 1;
cfi->addr_unlock2 ^= 1;
- err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL,
- ltq_mtd_data->parts,
- ltq_mtd_data->nr_parts);
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
+ &ppdata, NULL, 0);
if (err) {
dev_err(&pdev->dev, "failed to add partitions\n");
goto err_destroy;
@@ -204,32 +201,23 @@ ltq_mtd_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ltq_mtd_match[] = {
+ { .compatible = "lantiq,nor" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_mtd_match);
+
static struct platform_driver ltq_mtd_driver = {
+ .probe = ltq_mtd_probe,
.remove = __devexit_p(ltq_mtd_remove),
.driver = {
- .name = "ltq_nor",
+ .name = "ltq-nor",
.owner = THIS_MODULE,
+ .of_match_table = ltq_mtd_match,
},
};
-static int __init
-init_ltq_mtd(void)
-{
- int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
-
- if (ret)
- pr_err("ltq_nor: error registering platform driver");
- return ret;
-}
-
-static void __exit
-exit_ltq_mtd(void)
-{
- platform_driver_unregister(&ltq_mtd_driver);
-}
-
-module_init(init_ltq_mtd);
-module_exit(exit_ltq_mtd);
+module_platform_driver(ltq_mtd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1d005a3..f14ce0a 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -352,18 +352,7 @@ static struct pci_driver mtd_pci_driver = {
.id_table = mtd_pci_ids,
};
-static int __init mtd_pci_maps_init(void)
-{
- return pci_register_driver(&mtd_pci_driver);
-}
-
-static void __exit mtd_pci_maps_exit(void)
-{
- pci_unregister_driver(&mtd_pci_driver);
-}
-
-module_init(mtd_pci_maps_init);
-module_exit(mtd_pci_maps_exit);
+module_pci_driver(mtd_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 934a72c..9dcbc68 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -234,20 +234,7 @@ static struct pci_driver scb2_flash_driver = {
.remove = __devexit_p(scb2_flash_remove),
};
-static int __init
-scb2_flash_init(void)
-{
- return pci_register_driver(&scb2_flash_driver);
-}
-
-static void __exit
-scb2_flash_exit(void)
-{
- pci_unregister_driver(&scb2_flash_driver);
-}
-
-module_init(scb2_flash_init);
-module_exit(scb2_flash_exit);
+module_pci_driver(scb2_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 71b0ba7..e7534c8 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -59,7 +59,7 @@ static struct mtd_partition bigflash_parts[] = {
}
};
-static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL};
+static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
#define init_sbc82xx_one_flash(map, br, or) \
do { \
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c837507..5757307 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -250,6 +250,43 @@ static ssize_t mtd_name_show(struct device *dev,
}
static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
+static ssize_t mtd_ecc_strength_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
+}
+static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
+
+static ssize_t mtd_bitflip_threshold_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
+}
+
+static ssize_t mtd_bitflip_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ unsigned int bitflip_threshold;
+ int retval;
+
+ retval = kstrtouint(buf, 0, &bitflip_threshold);
+ if (retval)
+ return retval;
+
+ mtd->bitflip_threshold = bitflip_threshold;
+ return count;
+}
+static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
+ mtd_bitflip_threshold_show,
+ mtd_bitflip_threshold_store);
+
static struct attribute *mtd_attrs[] = {
&dev_attr_type.attr,
&dev_attr_flags.attr,
@@ -260,6 +297,8 @@ static struct attribute *mtd_attrs[] = {
&dev_attr_oobsize.attr,
&dev_attr_numeraseregions.attr,
&dev_attr_name.attr,
+ &dev_attr_ecc_strength.attr,
+ &dev_attr_bitflip_threshold.attr,
NULL,
};
@@ -322,6 +361,10 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->index = i;
mtd->usecount = 0;
+ /* default value if not set by driver */
+ if (mtd->bitflip_threshold == 0)
+ mtd->bitflip_threshold = mtd->ecc_strength;
+
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
@@ -757,12 +800,24 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf)
{
+ int ret_code;
*retlen = 0;
if (from < 0 || from > mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- return mtd->_read(mtd, from, len, retlen, buf);
+
+ /*
+ * In the absence of an error, drivers return a non-negative integer
+ * representing the maximum number of bitflips that were corrected on
+ * any one ecc region (if applicable; zero otherwise).
+ */
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ if (unlikely(ret_code < 0))
+ return ret_code;
+ if (mtd->ecc_strength == 0)
+ return 0; /* device lacks ecc */
+ return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
}
EXPORT_SYMBOL_GPL(mtd_read);
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index ae36d7e..551e316 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt)
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason, const char *s1, unsigned long l1,
- const char *s2, unsigned long l2)
+ enum kmsg_dump_reason reason)
{
struct mtdoops_context *cxt = container_of(dumper,
struct mtdoops_context, dump);
- unsigned long s1_start, s2_start;
- unsigned long l1_cpy, l2_cpy;
- char *dst;
-
- if (reason != KMSG_DUMP_OOPS &&
- reason != KMSG_DUMP_PANIC)
- return;
/* Only dump oopses if dump_oops is set */
if (reason == KMSG_DUMP_OOPS && !dump_oops)
return;
- dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */
- l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE);
- l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
-
- s2_start = l2 - l2_cpy;
- s1_start = l1 - l1_cpy;
-
- memcpy(dst, s1 + s1_start, l1_cpy);
- memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
+ kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
+ record_size - MTDOOPS_HEADER_SIZE, NULL);
/* Panics must be written immediately */
if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
return;
}
+ cxt->dump.max_reason = KMSG_DUMP_OOPS;
cxt->dump.dump = mtdoops_do_dump;
err = kmsg_dump_register(&cxt->dump);
if (err) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 9651c06..d518e4d 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -67,12 +67,12 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
stats = part->master->ecc_stats;
res = part->master->_read(part->master, from + part->offset, len,
retlen, buf);
- if (unlikely(res)) {
- if (mtd_is_bitflip(res))
- mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
- if (mtd_is_eccerr(res))
- mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
- }
+ if (unlikely(mtd_is_eccerr(res)))
+ mtd->ecc_stats.failed +=
+ part->master->ecc_stats.failed - stats.failed;
+ else
+ mtd->ecc_stats.corrected +=
+ part->master->ecc_stats.corrected - stats.corrected;
return res;
}
@@ -517,6 +517,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
slave->mtd.ecclayout = master->ecclayout;
slave->mtd.ecc_strength = master->ecc_strength;
+ slave->mtd.bitflip_threshold = master->bitflip_threshold;
+
if (master->_block_isbad) {
uint64_t offs = 0;
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index a90bfe7..334da5f 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -63,7 +63,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
struct super_block *sb;
int ret;
- sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, mtd);
+ sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd);
if (IS_ERR(sb))
goto out_error;
@@ -74,8 +74,6 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
- sb->s_flags = flags;
-
ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7d17cec..31bb7e5 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -115,6 +115,46 @@ config MTD_NAND_OMAP2
Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
platforms.
+config MTD_NAND_OMAP_BCH
+ depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
+ bool "Enable support for hardware BCH error correction"
+ default n
+ select BCH
+ select BCH_CONST_PARAMS
+ help
+ Support for hardware BCH error correction.
+
+choice
+ prompt "BCH error correction capability"
+ depends on MTD_NAND_OMAP_BCH
+
+config MTD_NAND_OMAP_BCH8
+ bool "8 bits / 512 bytes (recommended)"
+ help
+ Support correcting up to 8 bitflips per 512-byte block.
+ This will use 13 bytes of spare area per 512 bytes of page data.
+ This is the recommended mode, as 4-bit mode does not work
+ on some OMAP3 revisions, due to a hardware bug.
+
+config MTD_NAND_OMAP_BCH4
+ bool "4 bits / 512 bytes"
+ help
+ Support correcting up to 4 bitflips per 512-byte block.
+ This will use 7 bytes of spare area per 512 bytes of page data.
+ Note that this mode does not work on some OMAP3 revisions, due to a
+ hardware bug. Please check your OMAP datasheet before selecting this
+ mode.
+
+endchoice
+
+if MTD_NAND_OMAP_BCH
+config BCH_CONST_M
+ default 13
+config BCH_CONST_T
+ default 4 if MTD_NAND_OMAP_BCH4
+ default 8 if MTD_NAND_OMAP_BCH8
+endif
+
config MTD_NAND_IDS
tristate
@@ -440,7 +480,7 @@ config MTD_NAND_NANDSIM
config MTD_NAND_GPMI_NAND
bool "GPMI NAND Flash Controller driver"
- depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
+ depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
help
Enables NAND Flash support for IMX23 or IMX28.
The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 4f20e1d..60a0dfd 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -414,7 +414,7 @@ static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
}
err = 0;
if (corrected)
- err = -EUCLEAN;
+ err = 1; /* return max_bitflips per ecc step */
if (uncorrected)
err = -EBADMSG;
out:
@@ -446,7 +446,7 @@ static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
}
err = 0;
if (corrected)
- err = -EUCLEAN;
+ err = 1; /* return max_bitflips per ecc step */
if (uncorrected)
err = -EBADMSG;
return err;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2165576..97ac671 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -324,9 +324,10 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
* mtd: mtd info structure
* chip: nand chip info structure
* buf: buffer to store read data
+ * oob_required: caller expects OOB data read to chip->oob_poi
*/
-static int atmel_nand_read_page(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -335,6 +336,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
uint8_t *oob = chip->oob_poi;
uint8_t *ecc_pos;
int stat;
+ unsigned int max_bitflips = 0;
/*
* Errata: ALE is incorrectly wired up to the ECC controller
@@ -371,10 +373,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
/* check if there's an error */
stat = chip->ecc.correct(mtd, p, oob, NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
/* get back to oob start (end of page) */
chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -382,7 +386,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
/* read the oob */
chip->read_buf(mtd, oob, mtd->oobsize);
- return 0;
+ return max_bitflips;
}
/*
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 73abbc3..9f609d2 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -508,8 +508,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev)
this->chip_delay = 30;
this->ecc.mode = NAND_ECC_SOFT;
- this->options = NAND_NO_AUTOINCR;
-
if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
index a930666..5914bb3 100644
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -22,9 +22,9 @@
/* ---- Private Function Prototypes -------------------------------------- */
static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page);
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf);
+ struct nand_chip *chip, const uint8_t *buf, int oob_required);
/* ---- Private Variables ------------------------------------------------ */
@@ -103,11 +103,12 @@ static struct nand_ecclayout nand_hw_eccoob_4096 = {
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+* @oob_required: caller expects OOB data read to chip->oob_poi
*
***************************************************************************/
static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
struct nand_chip *chip, uint8_t * buf,
- int page)
+ int oob_required, int page)
{
int sectorIdx = 0;
int eccsize = chip->ecc.size;
@@ -116,6 +117,7 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
uint8_t eccCalc[NAND_ECC_NUM_BYTES];
int sectorOobSize = mtd->oobsize / eccsteps;
int stat;
+ unsigned int max_bitflips = 0;
for (sectorIdx = 0; sectorIdx < eccsteps;
sectorIdx++, datap += eccsize) {
@@ -177,9 +179,10 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
}
#endif
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
}
}
- return 0;
+ return max_bitflips;
}
/****************************************************************************
@@ -188,10 +191,11 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+* @oob_required: must write chip->oob_poi to OOB
*
***************************************************************************/
static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
int sectorIdx = 0;
int eccsize = chip->ecc.size;
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 6908cdd..c855e7c 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -341,7 +341,7 @@ static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
* for MLC parts which may have permanently stuck bits.
*/
struct nand_chip *chip = mtd->priv;
- int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0);
+ int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
if (ret < 0)
return -EFAULT;
else {
@@ -476,12 +476,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
this->badblock_pattern = &largepage_bbt;
}
- /*
- * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
- * conservative guess, given 13 ecc bytes and using bch alg.
- * (Assume Galois field order m=15 to allow a margin of error.)
- */
- this->ecc.strength = 6;
+ this->ecc.strength = 8;
#endif
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index d7b86b9..3f1c185 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -558,7 +558,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
}
static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -567,7 +567,7 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
}
static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2a96e1a..f3f6cfe 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int cafe_device_ready(struct mtd_info *mtd)
{
struct cafe_priv *cafe = mtd->priv;
- int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
cafe_writel(cafe, irqs, NAND_IRQ);
@@ -364,25 +364,27 @@ static int cafe_nand_write_oob(struct mtd_info *mtd,
/* Don't use -- use nand_read_oob_std for now */
static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return 1;
+ return 0;
}
/**
* cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
*
* The hw generator calculates the error syndrome automatically. Therefor
* we need a special oob layout and handling.
*/
static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct cafe_priv *cafe = mtd->priv;
+ unsigned int max_bitflips = 0;
cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
cafe_readl(cafe, NAND_ECC_RESULT),
@@ -449,10 +451,11 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
} else {
dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
mtd->ecc_stats.corrected += n;
+ max_bitflips = max_t(unsigned int, max_bitflips, n);
}
}
- return 0;
+ return max_bitflips;
}
static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -518,7 +521,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
struct cafe_priv *cafe = mtd->priv;
@@ -530,16 +534,17 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
}
static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int page, int cached, int raw)
+ const uint8_t *buf, int oob_required, int page,
+ int cached, int raw)
{
int status;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf);
+ chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
else
- chip->ecc.write_page(mtd, chip, buf);
+ chip->ecc.write_page(mtd, chip, buf, oob_required);
/*
* Cached progamming disabled for now, Not sure if its worth the
@@ -685,7 +690,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
/* Enable the following for a flash based bad block table */
cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
- cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+ cafe->nand.options = NAND_OWN_BUFFERS;
if (skipbbt) {
cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -888,17 +893,7 @@ static struct pci_driver cafe_nand_pci_driver = {
.resume = cafe_nand_resume,
};
-static int __init cafe_nand_init(void)
-{
- return pci_register_driver(&cafe_nand_pci_driver);
-}
-
-static void __exit cafe_nand_exit(void)
-{
- pci_unregister_driver(&cafe_nand_pci_driver);
-}
-module_init(cafe_nand_init);
-module_exit(cafe_nand_exit);
+module_pci_driver(cafe_nand_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 821c34c..adb6c3e 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -240,7 +240,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
/* Enable the following for a flash based bad block table */
this->bbt_options = NAND_BBT_USE_FLASH;
- this->options = NAND_NO_AUTOINCR;
/* Scan to find existence of the device */
if (nand_scan(new_mtd, 1)) {
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index a9e57d6..0650aaf 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -924,9 +924,10 @@ bool is_erased(uint8_t *buf, int len)
#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
- uint32_t irq_status)
+ uint32_t irq_status, unsigned int *max_bitflips)
{
bool check_erased_page = false;
+ unsigned int bitflips = 0;
if (irq_status & INTR_STATUS__ECC_ERR) {
/* read the ECC errors. we'll ignore them for now */
@@ -965,6 +966,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
/* correct the ECC error */
buf[offset] ^= err_correction_value;
denali->mtd.ecc_stats.corrected++;
+ bitflips++;
}
} else {
/* if the error is not correctable, need to
@@ -984,6 +986,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
clear_interrupts(denali);
denali_set_intr_modes(denali, true);
}
+ *max_bitflips = bitflips;
return check_erased_page;
}
@@ -1084,7 +1087,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
* by write_page above.
* */
static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
/* for regular page writes, we let HW handle all the ECC
* data written to the device. */
@@ -1096,7 +1099,7 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
* write_page() function above.
*/
static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
/* for raw page writes, we want to disable ECC and simply write
whatever data is in the buffer. */
@@ -1110,17 +1113,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
}
static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
read_oob_data(mtd, chip->oob_poi, page);
- return 0; /* notify NAND core to send command to
- NAND device. */
+ return 0;
}
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
+ unsigned int max_bitflips;
struct denali_nand_info *denali = mtd_to_denali(mtd);
dma_addr_t addr = denali->buf.dma_buf;
@@ -1153,7 +1156,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
memcpy(buf, denali->buf.buf, mtd->writesize);
- check_erased_page = handle_ecc(denali, buf, irq_status);
+ check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
denali_enable_dma(denali, false);
if (check_erased_page) {
@@ -1167,11 +1170,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
denali->mtd.ecc_stats.failed++;
}
}
- return 0;
+ return max_bitflips;
}
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct denali_nand_info *denali = mtd_to_denali(mtd);
@@ -1702,17 +1705,4 @@ static struct pci_driver denali_pci_driver = {
.remove = denali_pci_remove,
};
-static int __devinit denali_init(void)
-{
- printk(KERN_INFO "Spectra MTD driver\n");
- return pci_register_driver(&denali_pci_driver);
-}
-
-/* Free memory */
-static void __devexit denali_exit(void)
-{
- pci_unregister_driver(&denali_pci_driver);
-}
-
-module_init(denali_init);
-module_exit(denali_exit);
+module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index b082026..a225e49 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -720,6 +720,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
uint16_t status, edc_err, *buf16;
+ int bits_corrected = 0;
dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
@@ -772,7 +773,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
/* If bitflips are reported, attempt to correct with ecc */
if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
- int bits_corrected = correct_data(mtd, buf, page);
+ bits_corrected = correct_data(mtd, buf, page);
if (bits_corrected == -EBADMSG)
mtd->ecc_stats.failed++;
else
@@ -781,24 +782,24 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
}
writew(0, docptr + DOC_DATAEND);
- return 0;
+ return bits_corrected;
}
static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
return read_page(mtd, nand, buf, page, false);
}
static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
return read_page(mtd, nand, buf, page, true);
}
static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
- int page, int sndcmd)
+ int page)
{
struct docg4_priv *doc = nand->priv;
void __iomem *docptr = doc->virtadr;
@@ -952,13 +953,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
}
static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
return write_page(mtd, nand, buf, false);
}
static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
return write_page(mtd, nand, buf, true);
}
@@ -1002,7 +1003,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
return -ENOMEM;
read_page_prologue(mtd, g4_addr);
- status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE);
+ status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
if (status)
goto exit;
@@ -1079,7 +1080,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
/* write first page of block */
write_page_prologue(mtd, g4_addr);
- docg4_write_page(mtd, nand, buf);
+ docg4_write_page(mtd, nand, buf, 1);
ret = pageprog(mtd);
if (!ret)
mtd->ecc_stats.badblocks++;
@@ -1192,8 +1193,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
nand->ecc.prepad = 8;
nand->ecc.bytes = 8;
nand->ecc.strength = DOCG4_T;
- nand->options =
- NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
+ nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
nand->controller = &nand->hwcontrol;
spin_lock_init(&nand->controller->lock);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 80b5264..7842938 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,6 +75,7 @@ struct fsl_elbc_fcm_ctrl {
unsigned int use_mdr; /* Non zero if the MDR is to be set */
unsigned int oob; /* Non zero if operating on OOB data */
unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
};
/* These map to the positions used by the FCM hardware ECC generator */
@@ -253,6 +254,8 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
if (chip->ecc.mode != NAND_ECC_HW)
return 0;
+ elbc_fcm_ctrl->max_bitflips = 0;
+
if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
uint32_t lteccr = in_be32(&lbc->lteccr);
/*
@@ -262,11 +265,16 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
* bits 28-31 are uncorrectable errors, marked elsewhere.
* for small page nand only 1 bit is used.
* if the ELBC doesn't have the lteccr register it reads 0
+ * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+ * count the number of sub-pages with bitflips and update
+ * ecc_stats.corrected accordingly.
*/
if (lteccr & 0x000F000F)
out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
- if (lteccr & 0x000F0000)
+ if (lteccr & 0x000F0000) {
mtd->ecc_stats.corrected++;
+ elbc_fcm_ctrl->max_bitflips = 1;
+ }
}
return 0;
@@ -738,26 +746,28 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
return 0;
}
-static int fsl_elbc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf,
- int page)
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
+ struct fsl_elbc_mtd *priv = chip->priv;
+ struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+ struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
fsl_elbc_read_buf(mtd, buf, mtd->writesize);
- fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
mtd->ecc_stats.failed++;
- return 0;
+ return elbc_fcm_ctrl->max_bitflips;
}
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static void fsl_elbc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf)
+static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
fsl_elbc_write_buf(mtd, buf, mtd->writesize);
fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -795,7 +805,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->bbt_md = &bbt_mirror_descr;
/* set up nand options */
- chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->options = NAND_NO_READRDY;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->controller = &elbc_fcm_ctrl->controller;
@@ -814,11 +824,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
chip->ecc.size = 512;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
- /*
- * FIXME: can hardware ecc correct 4 bitflips if page size is
- * 2k? Then does hardware report number of corrections for this
- * case? If so, ecc_stats reporting needs to be fixed as well.
- */
} else {
/* otherwise fall back to default software ECC */
chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index c30ac7b..9602c1b 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -63,6 +63,7 @@ struct fsl_ifc_nand_ctrl {
unsigned int oob; /* Non zero if operating on OOB data */
unsigned int eccread; /* Non zero for a full-page ECC read */
unsigned int counter; /* counter for the initializations */
+ unsigned int max_bitflips; /* Saved during READ0 cmd */
};
static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
@@ -262,6 +263,8 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+ nctrl->max_bitflips = 0;
+
if (nctrl->eccread) {
int errors;
int bufnum = nctrl->page & priv->bufnum_mask;
@@ -290,6 +293,9 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
}
mtd->ecc_stats.corrected += errors;
+ nctrl->max_bitflips = max_t(unsigned int,
+ nctrl->max_bitflips,
+ errors);
}
nctrl->eccread = 0;
@@ -375,21 +381,31 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
return;
- /* READID must read all 8 possible bytes */
case NAND_CMD_READID:
+ case NAND_CMD_PARAM: {
+ int timing = IFC_FIR_OP_RB;
+ if (command == NAND_CMD_PARAM)
+ timing = IFC_FIR_OP_RBCD;
+
out_be32(&ifc->ifc_nand.nand_fir0,
(IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
+ (timing << IFC_NAND_FIR0_OP2_SHIFT));
out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
- /* 8 bytes for manuf, device and exts */
- out_be32(&ifc->ifc_nand.nand_fbcr, 8);
- ifc_nand_ctrl->read_bytes = 8;
+ command << IFC_NAND_FCR0_CMD0_SHIFT);
+ out_be32(&ifc->ifc_nand.row3, column);
+
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+ ifc_nand_ctrl->read_bytes = 256;
set_addr(mtd, 0, 0, 0);
fsl_ifc_run_command(mtd);
return;
+ }
/* ERASE1 stores the block and page address */
case NAND_CMD_ERASE1:
@@ -682,15 +698,16 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
return nand_fsr | NAND_STATUS_WP;
}
-static int fsl_ifc_read_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int page)
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
{
struct fsl_ifc_mtd *priv = chip->priv;
struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+ struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
fsl_ifc_read_buf(mtd, buf, mtd->writesize);
- fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
@@ -698,15 +715,14 @@ static int fsl_ifc_read_page(struct mtd_info *mtd,
if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
mtd->ecc_stats.failed++;
- return 0;
+ return nctrl->max_bitflips;
}
/* ECC will be calculated automatically, and errors will be detected in
* waitfunc.
*/
-static void fsl_ifc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip,
- const uint8_t *buf)
+static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
fsl_ifc_write_buf(mtd, buf, mtd->writesize);
fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -789,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
out_be32(&ifc->ifc_nand.ncfgr, 0x0);
/* set up nand options */
- chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+ chip->options = NAND_NO_READRDY;
chip->bbt_options = NAND_BBT_USE_FLASH;
@@ -811,6 +827,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* Hardware generates ECC per 512 Bytes */
chip->ecc.size = 512;
chip->ecc.bytes = 8;
+ chip->ecc.strength = 4;
switch (csor & CSOR_NAND_PGS_MASK) {
case CSOR_NAND_PGS_512:
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 1b8330e..38d2624 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -692,6 +692,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller expects OOB data read to chip->oob_poi
* @page: page number to read
*
* This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -701,7 +702,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
* max of 8 bits)
*/
static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct fsmc_nand_data *host = container_of(mtd,
struct fsmc_nand_data, mtd);
@@ -720,6 +721,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
*/
uint16_t ecc_oob[7];
uint8_t *oob = (uint8_t *)&ecc_oob[0];
+ unsigned int max_bitflips = 0;
for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
@@ -748,13 +750,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/*
@@ -994,9 +998,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
return PTR_ERR(host->clk);
}
- ret = clk_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
if (ret)
- goto err_clk_enable;
+ goto err_clk_prepare_enable;
/*
* This device ID is actually a common AMBA ID as used on the
@@ -1176,8 +1180,8 @@ err_req_write_chnl:
if (host->mode == USE_DMA_ACCESS)
dma_release_channel(host->read_dma_chan);
err_req_read_chnl:
- clk_disable(host->clk);
-err_clk_enable:
+ clk_disable_unprepare(host->clk);
+err_clk_prepare_enable:
clk_put(host->clk);
return ret;
}
@@ -1198,7 +1202,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
dma_release_channel(host->write_dma_chan);
dma_release_channel(host->read_dma_chan);
}
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
}
@@ -1210,7 +1214,7 @@ static int fsmc_nand_suspend(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
if (host)
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
return 0;
}
@@ -1218,7 +1222,7 @@ static int fsmc_nand_resume(struct device *dev)
{
struct fsmc_nand_data *host = dev_get_drvdata(dev);
if (host) {
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
fsmc_nand_setup(host->regs_va, host->bank,
host->nand.options & NAND_BUSWIDTH_16,
host->dev_timings);
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index 4effb8c..a092451 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -51,15 +51,26 @@
#define BP_BCH_FLASH0LAYOUT0_ECC0 12
#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
-#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \
- (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
+ (GPMI_IS_MX6Q(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
+ : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
+ & BM_BCH_FLASH0LAYOUT0_ECC0) \
+ )
#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
-#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \
- (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
- & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
+ (GPMI_IS_MX6Q(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
+ )
#define HW_BCH_FLASH0LAYOUT1 0x00000090
@@ -72,13 +83,24 @@
#define BP_BCH_FLASH0LAYOUT1_ECCN 12
#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
-#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \
- (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
+ (GPMI_IS_MX6Q(x) \
+ ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
+ : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
+ & BM_BCH_FLASH0LAYOUT1_ECCN) \
+ )
#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
-#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \
- (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
- & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
+ (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
+ (GPMI_IS_MX6Q(x) \
+ ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+ )
#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index e8ea710..a1f4332 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -21,7 +21,6 @@
#include <linux/mtd/gpmi-nand.h>
#include <linux/delay.h>
#include <linux/clk.h>
-#include <mach/mxs.h>
#include "gpmi-nand.h"
#include "gpmi-regs.h"
@@ -37,6 +36,8 @@ struct timing_threshod timing_default_threshold = {
.max_dll_delay_in_ns = 16,
};
+#define MXS_SET_ADDR 0x4
+#define MXS_CLR_ADDR 0x8
/*
* Clear the bit and poll it cleared. This is usually called with
* a reset address and mask being either SFTRST(bit 31) or CLKGATE
@@ -47,7 +48,7 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
int timeout = 0x400;
/* clear the bit */
- __mxs_clrl(mask, addr);
+ writel(mask, addr + MXS_CLR_ADDR);
/*
* SFTRST needs 3 GPMI clocks to settle, the reference manual
@@ -92,11 +93,11 @@ static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
goto error;
/* clear CLKGATE */
- __mxs_clrl(MODULE_CLKGATE, reset_addr);
+ writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
if (!just_enable) {
/* set SFTRST to reset the block */
- __mxs_setl(MODULE_SFTRST, reset_addr);
+ writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
udelay(1);
/* poll CLKGATE becoming set */
@@ -223,13 +224,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
/* Configure layout 0. */
writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
- | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
- | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
+ | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT0);
writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
- | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
- | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
+ | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT1);
/* Set *all* chip selects to use layout 0. */
@@ -255,11 +256,12 @@ static unsigned int ns_to_cycles(unsigned int time,
return max(k, min);
}
+#define DEF_MIN_PROP_DELAY 5
+#define DEF_MAX_PROP_DELAY 9
/* Apply timing to current hardware conditions. */
static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
struct gpmi_nfc_hardware_timing *hw)
{
- struct gpmi_nand_platform_data *pdata = this->pdata;
struct timing_threshod *nfc = &timing_default_threshold;
struct nand_chip *nand = &this->nand;
struct nand_timing target = this->timing;
@@ -276,8 +278,8 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
int ideal_sample_delay_in_ns;
unsigned int sample_delay_factor;
int tEYE;
- unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
- unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
+ unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
+ unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
/*
* If there are multiple chips, we need to relax the timings to allow
@@ -803,7 +805,8 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
if (GPMI_IS_MX23(this)) {
mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
- } else if (GPMI_IS_MX28(this)) {
+ } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
+ /* MX28 shares the same R/B register as MX6Q. */
mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
reg = readl(r->gpmi_regs + HW_GPMI_STAT);
} else
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index b68e043..a6cad5c 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,6 +25,8 @@
#include <linux/mtd/gpmi-nand.h>
#include <linux/mtd/partitions.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "gpmi-nand.h"
/* add our owner bbt descriptor */
@@ -387,7 +389,7 @@ static void release_bch_irq(struct gpmi_nand_data *this)
static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
{
struct gpmi_nand_data *this = param;
- struct resource *r = this->private;
+ int dma_channel = (int)this->private;
if (!mxs_dma_is_apbh(chan))
return false;
@@ -399,7 +401,7 @@ static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
* for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
* (These eight channels share the same IRQ!)
*/
- if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
+ if (dma_channel == chan->chan_id) {
chan->private = &this->dma_data;
return true;
}
@@ -419,57 +421,45 @@ static void release_dma_channels(struct gpmi_nand_data *this)
static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
{
struct platform_device *pdev = this->pdev;
- struct gpmi_nand_platform_data *pdata = this->pdata;
- struct resources *res = &this->resources;
- struct resource *r, *r_dma;
- unsigned int i;
+ struct resource *r_dma;
+ struct device_node *dn;
+ int dma_channel;
+ unsigned int ret;
+ struct dma_chan *dma_chan;
+ dma_cap_mask_t mask;
+
+ /* dma channel, we only use the first one. */
+ dn = pdev->dev.of_node;
+ ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
+ if (ret) {
+ pr_err("unable to get DMA channel from dt.\n");
+ goto acquire_err;
+ }
+ this->private = (void *)dma_channel;
- r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
- GPMI_NAND_DMA_CHANNELS_RES_NAME);
+ /* gpmi dma interrupt */
r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
GPMI_NAND_DMA_INTERRUPT_RES_NAME);
- if (!r || !r_dma) {
+ if (!r_dma) {
pr_err("Can't get resource for DMA\n");
- return -ENXIO;
+ goto acquire_err;
}
+ this->dma_data.chan_irq = r_dma->start;
- /* used in gpmi_dma_filter() */
- this->private = r;
+ /* request dma channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- for (i = r->start; i <= r->end; i++) {
- struct dma_chan *dma_chan;
- dma_cap_mask_t mask;
-
- if (i - r->start >= pdata->max_chip_count)
- break;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- /* get the DMA interrupt */
- if (r_dma->start == r_dma->end) {
- /* only register the first. */
- if (i == r->start)
- this->dma_data.chan_irq = r_dma->start;
- else
- this->dma_data.chan_irq = NO_IRQ;
- } else
- this->dma_data.chan_irq = r_dma->start + (i - r->start);
-
- dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
- if (!dma_chan)
- goto acquire_err;
-
- /* fill the first empty item */
- this->dma_chans[i - r->start] = dma_chan;
+ dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+ if (!dma_chan) {
+ pr_err("dma_request_channel failed.\n");
+ goto acquire_err;
}
- res->dma_low_channel = r->start;
- res->dma_high_channel = i;
+ this->dma_chans[0] = dma_chan;
return 0;
acquire_err:
- pr_err("Can't acquire DMA channel %u\n", i);
release_dma_channels(this);
return -EINVAL;
}
@@ -851,7 +841,7 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
}
static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
struct gpmi_nand_data *this = chip->priv;
struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -917,17 +907,20 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
mtd->ecc_stats.corrected += corrected;
}
- /*
- * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
- * details about our policy for delivering the OOB.
- *
- * We fill the caller's buffer with set bits, and then copy the block
- * mark to th caller's buffer. Note that, if block mark swapping was
- * necessary, it has already been done, so we can rely on the first
- * byte of the auxiliary buffer to contain the block mark.
- */
- memset(chip->oob_poi, ~0, mtd->oobsize);
- chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+ if (oob_required) {
+ /*
+ * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+ * for details about our policy for delivering the OOB.
+ *
+ * We fill the caller's buffer with set bits, and then copy the
+ * block mark to th caller's buffer. Note that, if block mark
+ * swapping was necessary, it has already been done, so we can
+ * rely on the first byte of the auxiliary buffer to contain
+ * the block mark.
+ */
+ memset(chip->oob_poi, ~0, mtd->oobsize);
+ chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+ }
read_page_swap_end(this, buf, mtd->writesize,
this->payload_virt, this->payload_phys,
@@ -937,8 +930,8 @@ exit_nfc:
return ret;
}
-static void gpmi_ecc_write_page(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
struct gpmi_nand_data *this = chip->priv;
struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1077,7 +1070,7 @@ exit_auxiliary:
* this driver.
*/
static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
struct gpmi_nand_data *this = chip->priv;
@@ -1100,11 +1093,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
chip->oob_poi[0] = chip->read_byte(mtd);
}
- /*
- * Return true, indicating that the next call to this function must send
- * a command.
- */
- return true;
+ return 0;
}
static int
@@ -1318,7 +1307,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
/* Write the first page of the current stride. */
dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
- chip->ecc.write_page_raw(mtd, chip, buffer);
+ chip->ecc.write_page_raw(mtd, chip, buffer, 0);
chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
/* Wait for the write to finish. */
@@ -1444,6 +1433,10 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
if (ret)
return ret;
+ /* Adjust the ECC strength according to the chip. */
+ this->nand.ecc.strength = this->bch_geometry.ecc_strength;
+ this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
+
/* NAND boot init, depends on the gpmi_set_geometry(). */
return nand_boot_init(this);
}
@@ -1471,9 +1464,9 @@ void gpmi_nfc_exit(struct gpmi_nand_data *this)
static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
{
- struct gpmi_nand_platform_data *pdata = this->pdata;
struct mtd_info *mtd = &this->mtd;
struct nand_chip *chip = &this->nand;
+ struct mtd_part_parser_data ppdata = {};
int ret;
/* init current chip */
@@ -1502,6 +1495,7 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
chip->options |= NAND_NO_SUBPAGE_WRITE;
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 1;
+ chip->ecc.strength = 8;
chip->ecc.layout = &gpmi_hw_ecclayout;
/* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
@@ -1511,14 +1505,14 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
if (ret)
goto err_out;
- ret = nand_scan(mtd, pdata->max_chip_count);
+ ret = nand_scan(mtd, 1);
if (ret) {
pr_err("Chip scan failed\n");
goto err_out;
}
- ret = mtd_device_parse_register(mtd, NULL, NULL,
- pdata->partitions, pdata->partition_count);
+ ppdata.of_node = this->pdev->dev.of_node;
+ ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
if (ret)
goto err_out;
return 0;
@@ -1528,12 +1522,41 @@ err_out:
return ret;
}
+static const struct platform_device_id gpmi_ids[] = {
+ { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
+ { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
+ { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
+ {},
+};
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+ {
+ .compatible = "fsl,imx23-gpmi-nand",
+ .data = (void *)&gpmi_ids[IS_MX23]
+ }, {
+ .compatible = "fsl,imx28-gpmi-nand",
+ .data = (void *)&gpmi_ids[IS_MX28]
+ }, {
+ .compatible = "fsl,imx6q-gpmi-nand",
+ .data = (void *)&gpmi_ids[IS_MX6Q]
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
static int __devinit gpmi_nand_probe(struct platform_device *pdev)
{
- struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
struct gpmi_nand_data *this;
+ const struct of_device_id *of_id;
int ret;
+ of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+ if (of_id) {
+ pdev->id_entry = of_id->data;
+ } else {
+ pr_err("Failed to find the right device id.\n");
+ return -ENOMEM;
+ }
+
this = kzalloc(sizeof(*this), GFP_KERNEL);
if (!this) {
pr_err("Failed to allocate per-device memory\n");
@@ -1543,13 +1566,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, this);
this->pdev = pdev;
this->dev = &pdev->dev;
- this->pdata = pdata;
-
- if (pdata->platform_init) {
- ret = pdata->platform_init();
- if (ret)
- goto platform_init_error;
- }
ret = acquire_resources(this);
if (ret)
@@ -1567,7 +1583,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
exit_nfc_init:
release_resources(this);
-platform_init_error:
exit_acquire_resources:
platform_set_drvdata(pdev, NULL);
kfree(this);
@@ -1585,19 +1600,10 @@ static int __exit gpmi_nand_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id gpmi_ids[] = {
- {
- .name = "imx23-gpmi-nand",
- .driver_data = IS_MX23,
- }, {
- .name = "imx28-gpmi-nand",
- .driver_data = IS_MX28,
- }, {},
-};
-
static struct platform_driver gpmi_nand_driver = {
.driver = {
.name = "gpmi-nand",
+ .of_match_table = gpmi_nand_id_table,
},
.probe = gpmi_nand_probe,
.remove = __exit_p(gpmi_nand_remove),
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index ec6180d..ce5daa1 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -266,8 +266,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
#define STATUS_UNCORRECTABLE 0xfe
/* Use the platform_id to distinguish different Archs. */
-#define IS_MX23 0x1
-#define IS_MX28 0x2
+#define IS_MX23 0x0
+#define IS_MX28 0x1
+#define IS_MX6Q 0x2
#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
+#define GPMI_IS_MX6Q(x) ((x)->pdev->id_entry->driver_data == IS_MX6Q)
#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 9bf5ce5..50166e9 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -124,7 +124,6 @@ static int __init h1910_init(void)
/* 15 us command delay time */
this->chip_delay = 50;
this->ecc.mode = NAND_ECC_SOFT;
- this->options = NAND_NO_AUTOINCR;
/* Scan to find existence of the device */
if (nand_scan(h1910_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index e4147e8..a6fa884 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,11 +332,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
chip->ecc.size = 512;
chip->ecc.bytes = 9;
- chip->ecc.strength = 2;
- /*
- * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
- * conservative guess, given 9 ecc bytes and reed-solomon alg.
- */
+ chip->ecc.strength = 4;
if (pdata)
chip->ecc.layout = pdata->ecc_layout;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index c240cf1..c259c24 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -734,7 +734,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
chip->write_buf = mpc5121_nfc_write_buf;
chip->verify_buf = mpc5121_nfc_verify_buf;
chip->select_chip = mpc5121_nfc_select_chip;
- chip->options = NAND_NO_AUTOINCR;
chip->bbt_options = NAND_BBT_USE_FLASH;
chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 9e374e9..6acc790 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,6 +32,8 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_mtd.h>
#include <asm/mach/flash.h>
#include <mach/mxc_nand.h>
@@ -140,13 +142,47 @@
#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+ void (*preset)(struct mtd_info *);
+ void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+ void (*send_page)(struct mtd_info *, unsigned int);
+ void (*send_read_id)(struct mxc_nand_host *);
+ uint16_t (*get_dev_status)(struct mxc_nand_host *);
+ int (*check_int)(struct mxc_nand_host *);
+ void (*irq_control)(struct mxc_nand_host *, int);
+ u32 (*get_ecc_status)(struct mxc_nand_host *);
+ struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
+ void (*select_chip)(struct mtd_info *mtd, int chip);
+ int (*correct_data)(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc);
+
+ /*
+ * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+ * (CONFIG1:INT_MSK is set). To handle this the driver uses
+ * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+ */
+ int irqpending_quirk;
+ int needs_ip;
+
+ size_t regs_offset;
+ size_t spare0_offset;
+ size_t axi_offset;
+
+ int spare_len;
+ int eccbytes;
+ int eccsize;
+};
+
struct mxc_nand_host {
struct mtd_info mtd;
struct nand_chip nand;
struct device *dev;
- void *spare0;
- void *main_area0;
+ void __iomem *spare0;
+ void __iomem *main_area0;
void __iomem *base;
void __iomem *regs;
@@ -163,16 +199,9 @@ struct mxc_nand_host {
uint8_t *data_buf;
unsigned int buf_start;
- int spare_len;
-
- void (*preset)(struct mtd_info *);
- void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
- void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
- void (*send_page)(struct mtd_info *, unsigned int);
- void (*send_read_id)(struct mxc_nand_host *);
- uint16_t (*get_dev_status)(struct mxc_nand_host *);
- int (*check_int)(struct mxc_nand_host *);
- void (*irq_control)(struct mxc_nand_host *, int);
+
+ const struct mxc_nand_devtype_data *devtype_data;
+ struct mxc_nand_platform_data pdata;
};
/* OOB placement block for use with hardware ecc generation */
@@ -242,20 +271,26 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
}
};
-static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL };
+static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
-static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
{
- struct mxc_nand_host *host = dev_id;
-
- if (!host->check_int(host))
- return IRQ_NONE;
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
- host->irq_control(host, 0);
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = __raw_readl(s++);
+}
- complete(&host->op_completion);
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ u32 __iomem *t = trg;
+ const u32 *s = src;
- return IRQ_HANDLED;
+ for (i = 0; i < (size >> 2); i++)
+ __raw_writel(*s++, t++);
}
static int check_int_v3(struct mxc_nand_host *host)
@@ -280,26 +315,12 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
if (!(tmp & NFC_V1_V2_CONFIG2_INT))
return 0;
- if (!cpu_is_mx21())
+ if (!host->devtype_data->irqpending_quirk)
writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
return 1;
}
-/*
- * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
- * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
- * driver can enable/disable the irq line rather than simply masking the
- * interrupts.
- */
-static void irq_control_mx21(struct mxc_nand_host *host, int activate)
-{
- if (activate)
- enable_irq(host->irq);
- else
- disable_irq_nosync(host->irq);
-}
-
static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
{
uint16_t tmp;
@@ -328,6 +349,47 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate)
writel(tmp, NFC_V3_CONFIG2);
}
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+ if (host->devtype_data->irqpending_quirk) {
+ if (activate)
+ enable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+ } else {
+ host->devtype_data->irq_control(host, activate);
+ }
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+ return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+ return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+ return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+ struct mxc_nand_host *host = dev_id;
+
+ if (!host->devtype_data->check_int(host))
+ return IRQ_NONE;
+
+ irq_control(host, 0);
+
+ complete(&host->op_completion);
+
+ return IRQ_HANDLED;
+}
+
/* This function polls the NANDFC to wait for the basic operation to
* complete by checking the INT bit of config2 register.
*/
@@ -336,14 +398,14 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
int max_retries = 8000;
if (useirq) {
- if (!host->check_int(host)) {
+ if (!host->devtype_data->check_int(host)) {
INIT_COMPLETION(host->op_completion);
- host->irq_control(host, 1);
+ irq_control(host, 1);
wait_for_completion(&host->op_completion);
}
} else {
while (max_retries-- > 0) {
- if (host->check_int(host))
+ if (host->devtype_data->check_int(host))
break;
udelay(1);
@@ -374,7 +436,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
writew(cmd, NFC_V1_V2_FLASH_CMD);
writew(NFC_CMD, NFC_V1_V2_CONFIG2);
- if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) {
+ if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
int max_retries = 100;
/* Reset completion is indicated by NFC_CONFIG2 */
/* being set to 0 */
@@ -433,13 +495,27 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
wait_op_done(host, false);
}
-static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops)
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ /* NANDFC buffer 0 is used for page read/write */
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+ writew(ops, NFC_V1_V2_CONFIG2);
+
+ /* Wait for operation to complete */
+ wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
int bufs, i;
- if (nfc_is_v1() && mtd->writesize > 512)
+ if (mtd->writesize > 512)
bufs = 4;
else
bufs = 1;
@@ -463,7 +539,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
wait_op_done(host, true);
- memcpy(host->data_buf, host->main_area0, 16);
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
}
/* Request the NANDFC to perform a read of the NAND device ID. */
@@ -479,7 +555,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
/* Wait for operation to complete */
wait_op_done(host, true);
- memcpy(host->data_buf, host->main_area0, 16);
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
if (this->options & NAND_BUSWIDTH_16) {
/* compress the ID info */
@@ -555,7 +631,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
* additional correction. 2-Bit errors cannot be corrected by
* HW ECC, so we need to return failure
*/
- uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
+ uint16_t ecc_status = get_ecc_status_v1(host);
if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
@@ -580,10 +656,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
no_subpages = mtd->writesize >> 9;
- if (nfc_is_v21())
- ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
- else
- ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
+ ecc_stat = host->devtype_data->get_ecc_status(host);
do {
err = ecc_stat & ecc_bit_mask;
@@ -616,7 +689,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
/* Check for status request */
if (host->status_request)
- return host->get_dev_status(host) & 0xFF;
+ return host->devtype_data->get_dev_status(host) & 0xFF;
ret = *(uint8_t *)(host->data_buf + host->buf_start);
host->buf_start++;
@@ -682,7 +755,7 @@ static int mxc_nand_verify_buf(struct mtd_info *mtd,
/* This function is used by upper layer for select and
* deselect of the NAND chip */
-static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
+static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
@@ -701,11 +774,30 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
clk_prepare_enable(host->clk);
host->clk_act = 1;
}
+}
+
+static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+
+ if (chip == -1) {
+ /* Disable the NFC clock */
+ if (host->clk_act) {
+ clk_disable(host->clk);
+ host->clk_act = 0;
+ }
+ return;
+ }
- if (nfc_is_v21()) {
- host->active_cs = chip;
- writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+ if (!host->clk_act) {
+ /* Enable the NFC clock */
+ clk_enable(host->clk);
+ host->clk_act = 1;
}
+
+ host->active_cs = chip;
+ writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
}
/*
@@ -718,23 +810,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
u16 i, j;
u16 n = mtd->writesize >> 9;
u8 *d = host->data_buf + mtd->writesize;
- u8 *s = host->spare0;
- u16 t = host->spare_len;
+ u8 __iomem *s = host->spare0;
+ u16 t = host->devtype_data->spare_len;
j = (mtd->oobsize / n >> 1) << 1;
if (bfrom) {
for (i = 0; i < n - 1; i++)
- memcpy(d + i * j, s + i * t, j);
+ memcpy32_fromio(d + i * j, s + i * t, j);
/* the last section */
- memcpy(d + i * j, s + i * t, mtd->oobsize - i * j);
+ memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
} else {
for (i = 0; i < n - 1; i++)
- memcpy(&s[i * t], &d[i * j], j);
+ memcpy32_toio(&s[i * t], &d[i * j], j);
/* the last section */
- memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+ memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
}
}
@@ -751,34 +843,44 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
* perform a read/write buf operation, the saved column
* address is used to index into the full page.
*/
- host->send_addr(host, 0, page_addr == -1);
+ host->devtype_data->send_addr(host, 0, page_addr == -1);
if (mtd->writesize > 512)
/* another col addr cycle for 2k page */
- host->send_addr(host, 0, false);
+ host->devtype_data->send_addr(host, 0, false);
}
/* Write out page address, if necessary */
if (page_addr != -1) {
/* paddr_0 - p_addr_7 */
- host->send_addr(host, (page_addr & 0xff), false);
+ host->devtype_data->send_addr(host, (page_addr & 0xff), false);
if (mtd->writesize > 512) {
if (mtd->size >= 0x10000000) {
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, false);
- host->send_addr(host, (page_addr >> 16) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
} else
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
} else {
/* One more address cycle for higher density devices */
if (mtd->size >= 0x4000000) {
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, false);
- host->send_addr(host, (page_addr >> 16) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff,
+ false);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 16) & 0xff,
+ true);
} else
/* paddr_8 - paddr_15 */
- host->send_addr(host, (page_addr >> 8) & 0xff, true);
+ host->devtype_data->send_addr(host,
+ (page_addr >> 8) & 0xff, true);
}
}
}
@@ -800,7 +902,35 @@ static int get_eccsize(struct mtd_info *mtd)
return 8;
}
-static void preset_v1_v2(struct mtd_info *mtd)
+static void preset_v1(struct mtd_info *mtd)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+ struct mxc_nand_host *host = nand_chip->priv;
+ uint16_t config1 = 0;
+
+ if (nand_chip->ecc.mode == NAND_ECC_HW)
+ config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+ if (!host->devtype_data->irqpending_quirk)
+ config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+ host->eccsize = 1;
+
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+ /* Blocks to be unlocked */
+ writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+ writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+ /* Unlock Block Command for given address range */
+ writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v2(struct mtd_info *mtd)
{
struct nand_chip *nand_chip = mtd->priv;
struct mxc_nand_host *host = nand_chip->priv;
@@ -809,13 +939,12 @@ static void preset_v1_v2(struct mtd_info *mtd)
if (nand_chip->ecc.mode == NAND_ECC_HW)
config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
- if (nfc_is_v21())
- config1 |= NFC_V2_CONFIG1_FP_INT;
+ config1 |= NFC_V2_CONFIG1_FP_INT;
- if (!cpu_is_mx21())
+ if (!host->devtype_data->irqpending_quirk)
config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
- if (nfc_is_v21() && mtd->writesize) {
+ if (mtd->writesize) {
uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
host->eccsize = get_eccsize(mtd);
@@ -834,20 +963,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
writew(0x2, NFC_V1_V2_CONFIG);
/* Blocks to be unlocked */
- if (nfc_is_v21()) {
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
- writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
- writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
- } else if (nfc_is_v1()) {
- writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
- writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
- } else
- BUG();
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+ writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+ writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
/* Unlock Block Command for given address range */
writew(0x4, NFC_V1_V2_WRPROT);
@@ -937,15 +1060,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
/* Command pre-processing step */
switch (command) {
case NAND_CMD_RESET:
- host->preset(mtd);
- host->send_cmd(host, command, false);
+ host->devtype_data->preset(mtd);
+ host->devtype_data->send_cmd(host, command, false);
break;
case NAND_CMD_STATUS:
host->buf_start = 0;
host->status_request = true;
- host->send_cmd(host, command, true);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -958,15 +1081,17 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
command = NAND_CMD_READ0; /* only READ0 is valid */
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
if (mtd->writesize > 512)
- host->send_cmd(host, NAND_CMD_READSTART, true);
+ host->devtype_data->send_cmd(host,
+ NAND_CMD_READSTART, true);
- host->send_page(mtd, NFC_OUTPUT);
+ host->devtype_data->send_page(mtd, NFC_OUTPUT);
- memcpy(host->data_buf, host->main_area0, mtd->writesize);
+ memcpy32_fromio(host->data_buf, host->main_area0,
+ mtd->writesize);
copy_spare(mtd, true);
break;
@@ -977,28 +1102,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->buf_start = column;
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_PAGEPROG:
- memcpy(host->main_area0, host->data_buf, mtd->writesize);
+ memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
copy_spare(mtd, false);
- host->send_page(mtd, NFC_INPUT);
- host->send_cmd(host, command, true);
+ host->devtype_data->send_page(mtd, NFC_INPUT);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
case NAND_CMD_READID:
- host->send_cmd(host, command, true);
+ host->devtype_data->send_cmd(host, command, true);
mxc_do_addr_cycle(mtd, column, page_addr);
- host->send_read_id(host);
+ host->devtype_data->send_read_id(host);
host->buf_start = column;
break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
- host->send_cmd(host, command, false);
+ host->devtype_data->send_cmd(host, command, false);
mxc_do_addr_cycle(mtd, column, page_addr);
break;
@@ -1032,15 +1157,191 @@ static struct nand_bbt_descr bbt_mirror_descr = {
.pattern = mirror_pattern,
};
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+ .preset = preset_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v1,
+ .irqpending_quirk = 1,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+ .preset = preset_v1,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v1,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v1,
+ .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv1_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v1,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0xe00,
+ .spare0_offset = 0x800,
+ .axi_offset = 0,
+ .spare_len = 16,
+ .eccbytes = 3,
+ .eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+ .preset = preset_v2,
+ .send_cmd = send_cmd_v1_v2,
+ .send_addr = send_addr_v1_v2,
+ .send_page = send_page_v2,
+ .send_read_id = send_read_id_v1_v2,
+ .get_dev_status = get_dev_status_v1_v2,
+ .check_int = check_int_v1_v2,
+ .irq_control = irq_control_v1_v2,
+ .get_ecc_status = get_ecc_status_v2,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_4k,
+ .select_chip = mxc_nand_select_chip_v2,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 0,
+ .regs_offset = 0x1e00,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0,
+ .spare_len = 64,
+ .eccbytes = 9,
+ .eccsize = 0,
+};
+
+/* v3: i.MX51, i.MX53 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+ .preset = preset_v3,
+ .send_cmd = send_cmd_v3,
+ .send_addr = send_addr_v3,
+ .send_page = send_page_v3,
+ .send_read_id = send_read_id_v3,
+ .get_dev_status = get_dev_status_v3,
+ .check_int = check_int_v3,
+ .irq_control = irq_control_v3,
+ .get_ecc_status = get_ecc_status_v3,
+ .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
+ .ecclayout_2k = &nandv2_hw_eccoob_largepage,
+ .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
+ .select_chip = mxc_nand_select_chip_v1_v3,
+ .correct_data = mxc_nand_correct_data_v2_v3,
+ .irqpending_quirk = 0,
+ .needs_ip = 1,
+ .regs_offset = 0,
+ .spare0_offset = 0x1000,
+ .axi_offset = 0x1e00,
+ .spare_len = 64,
+ .eccbytes = 0,
+ .eccsize = 0,
+};
+
+#ifdef CONFIG_OF_MTD
+static const struct of_device_id mxcnd_dt_ids[] = {
+ {
+ .compatible = "fsl,imx21-nand",
+ .data = &imx21_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx27-nand",
+ .data = &imx27_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx25-nand",
+ .data = &imx25_nand_devtype_data,
+ }, {
+ .compatible = "fsl,imx51-nand",
+ .data = &imx51_nand_devtype_data,
+ },
+ { /* sentinel */ }
+};
+
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ struct device_node *np = host->dev->of_node;
+ struct mxc_nand_platform_data *pdata = &host->pdata;
+ const struct of_device_id *of_id =
+ of_match_device(mxcnd_dt_ids, host->dev);
+ int buswidth;
+
+ if (!np)
+ return 1;
+
+ if (of_get_nand_ecc_mode(np) >= 0)
+ pdata->hw_ecc = 1;
+
+ pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
+
+ buswidth = of_get_nand_bus_width(np);
+ if (buswidth < 0)
+ return buswidth;
+
+ pdata->width = buswidth / 8;
+
+ host->devtype_data = of_id->data;
+
+ return 0;
+}
+#else
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+ return 1;
+}
+#endif
+
+static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
+{
+ struct mxc_nand_platform_data *pdata = host->dev->platform_data;
+
+ if (!pdata)
+ return -ENODEV;
+
+ host->pdata = *pdata;
+
+ if (nfc_is_v1()) {
+ if (cpu_is_mx21())
+ host->devtype_data = &imx21_nand_devtype_data;
+ else
+ host->devtype_data = &imx27_nand_devtype_data;
+ } else if (nfc_is_v21()) {
+ host->devtype_data = &imx25_nand_devtype_data;
+ } else if (nfc_is_v3_2()) {
+ host->devtype_data = &imx51_nand_devtype_data;
+ } else
+ BUG();
+
+ return 0;
+}
+
static int __init mxcnd_probe(struct platform_device *pdev)
{
struct nand_chip *this;
struct mtd_info *mtd;
- struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
struct mxc_nand_host *host;
struct resource *res;
int err = 0;
- struct nand_ecclayout *oob_smallpage, *oob_largepage;
/* Allocate memory for MTD device structure and private data */
host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
@@ -1065,7 +1366,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->priv = host;
this->dev_ready = mxc_nand_dev_ready;
this->cmdfunc = mxc_nand_command;
- this->select_chip = mxc_nand_select_chip;
this->read_byte = mxc_nand_read_byte;
this->read_word = mxc_nand_read_word;
this->write_buf = mxc_nand_write_buf;
@@ -1095,36 +1395,26 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->main_area0 = host->base;
- if (nfc_is_v1() || nfc_is_v21()) {
- host->preset = preset_v1_v2;
- host->send_cmd = send_cmd_v1_v2;
- host->send_addr = send_addr_v1_v2;
- host->send_page = send_page_v1_v2;
- host->send_read_id = send_read_id_v1_v2;
- host->get_dev_status = get_dev_status_v1_v2;
- host->check_int = check_int_v1_v2;
- if (cpu_is_mx21())
- host->irq_control = irq_control_mx21;
- else
- host->irq_control = irq_control_v1_v2;
- }
+ err = mxcnd_probe_dt(host);
+ if (err > 0)
+ err = mxcnd_probe_pdata(host);
+ if (err < 0)
+ goto eirq;
- if (nfc_is_v21()) {
- host->regs = host->base + 0x1e00;
- host->spare0 = host->base + 0x1000;
- host->spare_len = 64;
- oob_smallpage = &nandv2_hw_eccoob_smallpage;
- oob_largepage = &nandv2_hw_eccoob_largepage;
- this->ecc.bytes = 9;
- } else if (nfc_is_v1()) {
- host->regs = host->base + 0xe00;
- host->spare0 = host->base + 0x800;
- host->spare_len = 16;
- oob_smallpage = &nandv1_hw_eccoob_smallpage;
- oob_largepage = &nandv1_hw_eccoob_largepage;
- this->ecc.bytes = 3;
- host->eccsize = 1;
- } else if (nfc_is_v3_2()) {
+ if (host->devtype_data->regs_offset)
+ host->regs = host->base + host->devtype_data->regs_offset;
+ host->spare0 = host->base + host->devtype_data->spare0_offset;
+ if (host->devtype_data->axi_offset)
+ host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+ this->ecc.bytes = host->devtype_data->eccbytes;
+ host->eccsize = host->devtype_data->eccsize;
+
+ this->select_chip = host->devtype_data->select_chip;
+ this->ecc.size = 512;
+ this->ecc.layout = host->devtype_data->ecclayout_512;
+
+ if (host->devtype_data->needs_ip) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
err = -ENODEV;
@@ -1135,42 +1425,22 @@ static int __init mxcnd_probe(struct platform_device *pdev)
err = -ENOMEM;
goto eirq;
}
- host->regs_axi = host->base + 0x1e00;
- host->spare0 = host->base + 0x1000;
- host->spare_len = 64;
- host->preset = preset_v3;
- host->send_cmd = send_cmd_v3;
- host->send_addr = send_addr_v3;
- host->send_page = send_page_v3;
- host->send_read_id = send_read_id_v3;
- host->check_int = check_int_v3;
- host->get_dev_status = get_dev_status_v3;
- host->irq_control = irq_control_v3;
- oob_smallpage = &nandv2_hw_eccoob_smallpage;
- oob_largepage = &nandv2_hw_eccoob_largepage;
- } else
- BUG();
-
- this->ecc.size = 512;
- this->ecc.layout = oob_smallpage;
+ }
- if (pdata->hw_ecc) {
+ if (host->pdata.hw_ecc) {
this->ecc.calculate = mxc_nand_calculate_ecc;
this->ecc.hwctl = mxc_nand_enable_hwecc;
- if (nfc_is_v1())
- this->ecc.correct = mxc_nand_correct_data_v1;
- else
- this->ecc.correct = mxc_nand_correct_data_v2_v3;
+ this->ecc.correct = host->devtype_data->correct_data;
this->ecc.mode = NAND_ECC_HW;
} else {
this->ecc.mode = NAND_ECC_SOFT;
}
- /* NAND bus width determines access funtions used by upper layer */
- if (pdata->width == 2)
+ /* NAND bus width determines access functions used by upper layer */
+ if (host->pdata.width == 2)
this->options |= NAND_BUSWIDTH_16;
- if (pdata->flash_bbt) {
+ if (host->pdata.flash_bbt) {
this->bbt_td = &bbt_main_descr;
this->bbt_md = &bbt_mirror_descr;
/* update flash based bbt */
@@ -1182,28 +1452,25 @@ static int __init mxcnd_probe(struct platform_device *pdev)
host->irq = platform_get_irq(pdev, 0);
/*
- * mask the interrupt. For i.MX21 explicitely call
- * irq_control_v1_v2 to use the mask bit. We can't call
- * disable_irq_nosync() for an interrupt we do not own yet.
+ * Use host->devtype_data->irq_control() here instead of irq_control()
+ * because we must not disable_irq_nosync without having requested the
+ * irq.
*/
- if (cpu_is_mx21())
- irq_control_v1_v2(host, 0);
- else
- host->irq_control(host, 0);
+ host->devtype_data->irq_control(host, 0);
err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
if (err)
goto eirq;
- host->irq_control(host, 0);
-
/*
- * Now that the interrupt is disabled make sure the interrupt
- * mask bit is cleared on i.MX21. Otherwise we can't read
- * the interrupt status bit on this machine.
+ * Now that we "own" the interrupt make sure the interrupt mask bit is
+ * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+ * on this machine.
*/
- if (cpu_is_mx21())
- irq_control_v1_v2(host, 1);
+ if (host->devtype_data->irqpending_quirk) {
+ disable_irq_nosync(host->irq);
+ host->devtype_data->irq_control(host, 1);
+ }
/* first scan to find the device and get the page size */
if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
@@ -1212,18 +1479,12 @@ static int __init mxcnd_probe(struct platform_device *pdev)
}
/* Call preset again, with correct writesize this time */
- host->preset(mtd);
+ host->devtype_data->preset(mtd);
if (mtd->writesize == 2048)
- this->ecc.layout = oob_largepage;
- if (nfc_is_v21() && mtd->writesize == 4096)
- this->ecc.layout = &nandv2_hw_eccoob_4k;
-
- /* second phase scan */
- if (nand_scan_tail(mtd)) {
- err = -ENXIO;
- goto escan;
- }
+ this->ecc.layout = host->devtype_data->ecclayout_2k;
+ else if (mtd->writesize == 4096)
+ this->ecc.layout = host->devtype_data->ecclayout_4k;
if (this->ecc.mode == NAND_ECC_HW) {
if (nfc_is_v1())
@@ -1232,9 +1493,19 @@ static int __init mxcnd_probe(struct platform_device *pdev)
this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
}
+ /* second phase scan */
+ if (nand_scan_tail(mtd)) {
+ err = -ENXIO;
+ goto escan;
+ }
+
/* Register the partitions */
- mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
- pdata->nr_parts);
+ mtd_device_parse_register(mtd, part_probes,
+ &(struct mtd_part_parser_data){
+ .of_node = pdev->dev.of_node,
+ },
+ host->pdata.parts,
+ host->pdata.nr_parts);
platform_set_drvdata(pdev, host);
@@ -1275,6 +1546,8 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
static struct platform_driver mxcnd_driver = {
.driver = {
.name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mxcnd_dt_ids),
},
.remove = __devexit_p(mxcnd_remove),
};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 47b19c0..a11253a0 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1066,15 +1066,17 @@ EXPORT_SYMBOL(nand_lock);
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
chip->read_buf(mtd, buf, mtd->writesize);
- chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
return 0;
}
@@ -1083,13 +1085,14 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* We need a special oob layout and handling even when OOB isn't used.
*/
static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip,
- uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1126,10 +1129,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*/
static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1138,8 +1142,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
- chip->ecc.read_page_raw(mtd, chip, buf, page);
+ chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1154,12 +1159,14 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
int stat;
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
@@ -1180,6 +1187,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
int index = 0;
+ unsigned int max_bitflips = 0;
/* Column address within the page aligned to ECC size (256bytes) */
start_step = data_offs / chip->ecc.size;
@@ -1244,12 +1252,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
stat = chip->ecc.correct(mtd, p,
&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
@@ -1257,12 +1267,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Not for syndrome calculating ECC controllers which need a special oob layout.
*/
static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1271,6 +1282,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *ecc_calc = chip->buffers->ecccalc;
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
+ unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1289,12 +1301,14 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
int stat;
stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
@@ -1302,6 +1316,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* Hardware ECC for large page chips, require OOB to be read first. For this
@@ -1311,7 +1326,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* the data area, by overwriting the NAND manufacturer bad block markings.
*/
static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1320,6 +1335,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
uint8_t *ecc_code = chip->buffers->ecccode;
uint32_t *eccpos = chip->ecc.layout->eccpos;
uint8_t *ecc_calc = chip->buffers->ecccalc;
+ unsigned int max_bitflips = 0;
/* Read the OOB area first */
chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1337,12 +1353,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
chip->ecc.calculate(mtd, p, &ecc_calc[i]);
stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
- return 0;
+ return max_bitflips;
}
/**
@@ -1350,19 +1368,21 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
* @page: page number to read
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
int stat;
@@ -1379,10 +1399,12 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
chip->read_buf(mtd, oob, eccbytes);
stat = chip->ecc.correct(mtd, p, oob, NULL);
- if (stat < 0)
+ if (stat < 0) {
mtd->ecc_stats.failed++;
- else
+ } else {
mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
oob += eccbytes;
@@ -1397,7 +1419,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
if (i)
chip->read_buf(mtd, oob, i);
- return 0;
+ return max_bitflips;
}
/**
@@ -1459,11 +1481,9 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int chipnr, page, realpage, col, bytes, aligned;
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
- int sndcmd = 1;
int ret = 0;
uint32_t readlen = ops->len;
uint32_t oobreadlen = ops->ooblen;
@@ -1471,6 +1491,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
mtd->oobavail : mtd->oobsize;
uint8_t *bufpoi, *oob, *buf;
+ unsigned int max_bitflips = 0;
stats = mtd->ecc_stats;
@@ -1484,6 +1505,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
buf = ops->datbuf;
oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
while (1) {
bytes = min(mtd->writesize - col, readlen);
@@ -1493,21 +1515,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
if (realpage != chip->pagebuf || oob) {
bufpoi = aligned ? buf : chip->buffers->databuf;
- if (likely(sndcmd)) {
- chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
- /* Now read the page into the buffer */
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
if (unlikely(ops->mode == MTD_OPS_RAW))
- ret = chip->ecc.read_page_raw(mtd, chip,
- bufpoi, page);
+ ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+ oob_required,
+ page);
else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
ret = chip->ecc.read_subpage(mtd, chip,
col, bytes, bufpoi);
else
ret = chip->ecc.read_page(mtd, chip, bufpoi,
- page);
+ oob_required, page);
if (ret < 0) {
if (!aligned)
/* Invalidate page cache */
@@ -1515,22 +1538,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
break;
}
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
/* Transfer not aligned data */
if (!aligned) {
if (!NAND_SUBPAGE_READ(chip) && !oob &&
!(mtd->ecc_stats.failed - stats.failed) &&
- (ops->mode != MTD_OPS_RAW))
+ (ops->mode != MTD_OPS_RAW)) {
chip->pagebuf = realpage;
- else
+ chip->pagebuf_bitflips = ret;
+ } else {
/* Invalidate page cache */
chip->pagebuf = -1;
+ }
memcpy(buf, chip->buffers->databuf + col, bytes);
}
buf += bytes;
if (unlikely(oob)) {
-
int toread = min(oobreadlen, max_oobsize);
if (toread) {
@@ -1541,13 +1567,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
}
if (!(chip->options & NAND_NO_READRDY)) {
- /*
- * Apply delay or wait for ready/busy pin. Do
- * this before the AUTOINCR check, so no
- * problems arise if a chip which does auto
- * increment is marked as NOAUTOINCR by the
- * board driver.
- */
+ /* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
@@ -1556,6 +1576,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
} else {
memcpy(buf, chip->buffers->databuf + col, bytes);
buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagebuf_bitflips);
}
readlen -= bytes;
@@ -1575,26 +1597,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
-
- /*
- * Check, if the chip supports auto page increment or if we
- * have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
- sndcmd = 1;
}
ops->retlen = ops->len - (size_t) readlen;
if (oob)
ops->oobretlen = ops->ooblen - oobreadlen;
- if (ret)
+ if (ret < 0)
return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ return max_bitflips;
}
/**
@@ -1630,17 +1645,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to read
- * @sndcmd: flag whether to issue read command or not
*/
static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
- if (sndcmd) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return sndcmd;
+ return 0;
}
/**
@@ -1649,10 +1660,9 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @page: page number to read
- * @sndcmd: flag whether to issue read command or not
*/
static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
uint8_t *buf = chip->oob_poi;
int length = mtd->oobsize;
@@ -1679,7 +1689,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
if (length > 0)
chip->read_buf(mtd, bufpoi, length);
- return 1;
+ return 0;
}
/**
@@ -1775,13 +1785,13 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
- int page, realpage, chipnr, sndcmd = 1;
+ int page, realpage, chipnr;
struct nand_chip *chip = mtd->priv;
struct mtd_ecc_stats stats;
- int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
int readlen = ops->ooblen;
int len;
uint8_t *buf = ops->oobbuf;
+ int ret = 0;
pr_debug("%s: from = 0x%08Lx, len = %i\n",
__func__, (unsigned long long)from, readlen);
@@ -1817,20 +1827,18 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
while (1) {
if (ops->mode == MTD_OPS_RAW)
- sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
+ ret = chip->ecc.read_oob_raw(mtd, chip, page);
else
- sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+ ret = chip->ecc.read_oob(mtd, chip, page);
+
+ if (ret < 0)
+ break;
len = min(len, readlen);
buf = nand_transfer_oob(chip, buf, ops, len);
if (!(chip->options & NAND_NO_READRDY)) {
- /*
- * Apply delay or wait for ready/busy pin. Do this
- * before the AUTOINCR check, so no problems arise if a
- * chip which does auto increment is marked as
- * NOAUTOINCR by the board driver.
- */
+ /* Apply delay or wait for ready/busy pin */
if (!chip->dev_ready)
udelay(chip->chip_delay);
else
@@ -1851,16 +1859,12 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
chip->select_chip(mtd, -1);
chip->select_chip(mtd, chipnr);
}
-
- /*
- * Check, if the chip supports auto page increment or if we
- * have hit a block boundary.
- */
- if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
- sndcmd = 1;
}
- ops->oobretlen = ops->ooblen;
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
@@ -1919,14 +1923,16 @@ out:
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
* Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
- chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ if (oob_required)
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
}
/**
@@ -1934,12 +1940,13 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
* We need a special oob layout and handling even when ECC isn't checked.
*/
static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1973,9 +1980,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*/
static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -1991,7 +1999,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
for (i = 0; i < chip->ecc.total; i++)
chip->oob_poi[eccpos[i]] = ecc_calc[i];
- chip->ecc.write_page_raw(mtd, chip, buf);
+ chip->ecc.write_page_raw(mtd, chip, buf, 1);
}
/**
@@ -1999,9 +2007,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*/
static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2027,12 +2036,14 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
* @mtd: mtd info structure
* @chip: nand chip info structure
* @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
*
* The hw generator calculates the error syndrome automatically. Therefore we
* need a special oob layout and handling.
*/
static void nand_write_page_syndrome(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+ struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -2071,21 +2082,23 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
* @mtd: MTD device structure
* @chip: NAND chip descriptor
* @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
* @page: page number to write
* @cached: cached programming
* @raw: use _raw version of write_page
*/
static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf, int page, int cached, int raw)
+ const uint8_t *buf, int oob_required, int page,
+ int cached, int raw)
{
int status;
chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
if (unlikely(raw))
- chip->ecc.write_page_raw(mtd, chip, buf);
+ chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
else
- chip->ecc.write_page(mtd, chip, buf);
+ chip->ecc.write_page(mtd, chip, buf, oob_required);
/*
* Cached progamming disabled for now. Not sure if it's worth the
@@ -2118,6 +2131,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
if (chip->verify_buf(mtd, buf, mtd->writesize))
return -EIO;
+
+ /* Make sure the next page prog is preceded by a status read */
+ chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
#endif
return 0;
}
@@ -2202,6 +2218,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
uint8_t *oob = ops->oobbuf;
uint8_t *buf = ops->datbuf;
int ret, subpage;
+ int oob_required = oob ? 1 : 0;
ops->retlen = 0;
if (!writelen)
@@ -2264,8 +2281,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
memset(chip->oob_poi, 0xff, mtd->oobsize);
}
- ret = chip->write_page(mtd, chip, wbuf, page, cached,
- (ops->mode == MTD_OPS_RAW));
+ ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
+ cached, (ops->mode == MTD_OPS_RAW));
if (ret)
break;
@@ -2898,8 +2915,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
*busw = NAND_BUSWIDTH_16;
chip->options &= ~NAND_CHIPOPTIONS_MSK;
- chip->options |= (NAND_NO_READRDY |
- NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
+ chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
pr_info("ONFI flash detected\n");
return 1;
@@ -3076,11 +3092,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
ident_done:
- /*
- * Set chip as a default. Board drivers can override it, if necessary.
- */
- chip->options |= NAND_NO_AUTOINCR;
-
/* Try to identify manufacturer */
for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
if (nand_manuf_ids[maf_idx].id == *maf_id)
@@ -3154,10 +3165,11 @@ ident_done:
if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
chip->cmdfunc = nand_command_lp;
- pr_info("NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
- nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? chip->onfi_params.model : type->name);
+ pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
+ " page size: %d, OOB size: %d\n",
+ *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
+ chip->onfi_version ? chip->onfi_params.model : type->name,
+ mtd->writesize, mtd->oobsize);
return type;
}
@@ -3329,8 +3341,13 @@ int nand_scan_tail(struct mtd_info *mtd)
if (!chip->ecc.write_oob)
chip->ecc.write_oob = nand_write_oob_syndrome;
- if (mtd->writesize >= chip->ecc.size)
+ if (mtd->writesize >= chip->ecc.size) {
+ if (!chip->ecc.strength) {
+ pr_warn("Driver must set ecc.strength when using hardware ECC\n");
+ BUG();
+ }
break;
+ }
pr_warn("%d byte HW ECC not possible on "
"%d byte page size, fallback to SW ECC\n",
chip->ecc.size, mtd->writesize);
@@ -3385,7 +3402,7 @@ int nand_scan_tail(struct mtd_info *mtd)
BUG();
}
chip->ecc.strength =
- chip->ecc.bytes*8 / fls(8*chip->ecc.size);
+ chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
break;
case NAND_ECC_NONE:
@@ -3483,7 +3500,14 @@ int nand_scan_tail(struct mtd_info *mtd)
/* propagate ecc info to mtd_info */
mtd->ecclayout = chip->ecc.layout;
- mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps;
+ mtd->ecc_strength = chip->ecc.strength;
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = mtd->ecc_strength;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 20a112f..30d1319 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
buf += mtd->oobsize + mtd->writesize;
len -= mtd->writesize;
+ offs += mtd->writesize;
}
return 0;
}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index af4fe8c..621b70b 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = {
* These are the new chips with large page size. The pagesize and the
* erasesize is determined from the extended id bytes
*/
-#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
+#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
/* 512 Megabit */
@@ -157,9 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = {
* writes possible, but not implemented now
*/
{"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
- NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY |
- BBT_AUTO_REFRESH
- },
+ NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
{NULL,}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 261f478..cf0cd31 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -28,7 +28,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -268,7 +268,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
-#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -547,12 +546,6 @@ static char *get_partition_name(int i)
return kstrdup(buf, GFP_KERNEL);
}
-static uint64_t divide(uint64_t n, uint32_t d)
-{
- do_div(n, d);
- return n;
-}
-
/*
* Initialize the nandsim structure.
*
@@ -581,7 +574,7 @@ static int init_nandsim(struct mtd_info *mtd)
ns->geom.oobsz = mtd->oobsize;
ns->geom.secsz = mtd->erasesize;
ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
- ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
ns->geom.secshift = ffs(ns->geom.secsz) - 1;
ns->geom.pgshift = chip->page_shift;
@@ -594,7 +587,7 @@ static int init_nandsim(struct mtd_info *mtd)
ns->options |= OPT_PAGE256;
}
else if (ns->geom.pgsz == 512) {
- ns->options |= (OPT_PAGE512 | OPT_AUTOINCR);
+ ns->options |= OPT_PAGE512;
if (ns->busw == 8)
ns->options |= OPT_PAGE512_8BIT;
} else if (ns->geom.pgsz == 2048) {
@@ -663,8 +656,6 @@ static int init_nandsim(struct mtd_info *mtd)
for (i = 0; nand_flash_ids[i].name != NULL; i++) {
if (second_id_byte != nand_flash_ids[i].id)
continue;
- if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
- ns->options |= OPT_AUTOINCR;
}
if (ns->busw == 16)
@@ -924,7 +915,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
if (!rptwear)
return 0;
- wear_eb_count = divide(mtd->size, mtd->erasesize);
+ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
NS_ERR("Too many erase blocks for wear reporting\n");
@@ -1936,20 +1927,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
if (ns->regs.count == ns->regs.num) {
NS_DBG("read_byte: all bytes were read\n");
- /*
- * The OPT_AUTOINCR allows to read next consecutive pages without
- * new read operation cycle.
- */
- if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
- ns->regs.count = 0;
- if (ns->regs.row + 1 < ns->geom.pgnum)
- ns->regs.row += 1;
- NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
- do_state_action(ns, ACTION_CPY);
- }
- else if (NS_STATE(ns->nxstate) == STATE_READY)
+ if (NS_STATE(ns->nxstate) == STATE_READY)
switch_state(ns);
-
}
return outb;
@@ -2203,14 +2182,7 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
ns->regs.count += len;
if (ns->regs.count == ns->regs.num) {
- if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
- ns->regs.count = 0;
- if (ns->regs.row + 1 < ns->geom.pgnum)
- ns->regs.row += 1;
- NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
- do_state_action(ns, ACTION_CPY);
- }
- else if (NS_STATE(ns->nxstate) == STATE_READY)
+ if (NS_STATE(ns->nxstate) == STATE_READY)
switch_state(ns);
}
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c2b0bba..d7f681d 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -21,6 +21,10 @@
#include <linux/io.h>
#include <linux/slab.h>
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+#include <linux/bch.h>
+#endif
+
#include <plat/dma.h>
#include <plat/gpmc.h>
#include <plat/nand.h>
@@ -127,6 +131,11 @@ struct omap_nand_info {
} iomode;
u_char *buf;
int buf_len;
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ struct bch_control *bch;
+ struct nand_ecclayout ecclayout;
+#endif
};
/**
@@ -402,7 +411,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
if (ret)
/* PFPW engine is busy, use cpu copy method */
- goto out_copy;
+ goto out_copy_unmap;
init_completion(&info->comp);
@@ -421,6 +430,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
return 0;
+out_copy_unmap:
+ dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
out_copy:
if (info->nand.options & NAND_BUSWIDTH_16)
is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -879,7 +890,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
unsigned long timeo = jiffies;
- int status = NAND_STATUS_FAIL, state = this->state;
+ int status, state = this->state;
if (state == FL_ERASING)
timeo += (HZ * 400) / 1000;
@@ -894,6 +905,8 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
break;
cond_resched();
}
+
+ status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
return status;
}
@@ -925,6 +938,226 @@ static int omap_dev_ready(struct mtd_info *mtd)
return 1;
}
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+
+/**
+ * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+{
+ int nerrors;
+ unsigned int dev_width;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_chip *chip = mtd->priv;
+
+ nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+ /*
+ * Program GPMC to perform correction on one 512-byte sector at a time.
+ * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
+ * gives a slight (5%) performance gain (but requires additional code).
+ */
+ (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
+}
+
+/**
+ * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ */
+static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
+}
+
+/**
+ * omap3_correct_data_bch - Decode received data and correct errors
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ */
+static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ int i, count;
+ /* cannot correct more than 8 errors */
+ unsigned int errloc[8];
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+
+ count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
+ errloc);
+ if (count > 0) {
+ /* correct errors */
+ for (i = 0; i < count; i++) {
+ /* correct data only, not ecc bytes */
+ if (errloc[i] < 8*512)
+ data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
+ pr_debug("corrected bitflip %u\n", errloc[i]);
+ }
+ } else if (count < 0) {
+ pr_err("ecc unrecoverable error\n");
+ }
+ return count;
+}
+
+/**
+ * omap3_free_bch - Release BCH ecc resources
+ * @mtd: MTD device structure
+ */
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ if (info->bch) {
+ free_bch(info->bch);
+ info->bch = NULL;
+ }
+}
+
+/**
+ * omap3_init_bch - Initialize BCH ECC
+ * @mtd: MTD device structure
+ * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
+ */
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+ int ret, max_errors;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+#ifdef CONFIG_MTD_NAND_OMAP_BCH8
+ const int hw_errors = 8;
+#else
+ const int hw_errors = 4;
+#endif
+ info->bch = NULL;
+
+ max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+ if (max_errors != hw_errors) {
+ pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
+ max_errors, hw_errors);
+ goto fail;
+ }
+
+ /* initialize GPMC BCH engine */
+ ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
+ if (ret)
+ goto fail;
+
+ /* software bch library is only used to detect and locate errors */
+ info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
+ if (!info->bch)
+ goto fail;
+
+ info->nand.ecc.size = 512;
+ info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
+ info->nand.ecc.correct = omap3_correct_data_bch;
+ info->nand.ecc.mode = NAND_ECC_HW;
+
+ /*
+ * The number of corrected errors in an ecc block that will trigger
+ * block scrubbing defaults to the ecc strength (4 or 8).
+ * Set mtd->bitflip_threshold here to define a custom threshold.
+ */
+
+ if (max_errors == 8) {
+ info->nand.ecc.strength = 8;
+ info->nand.ecc.bytes = 13;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+ } else {
+ info->nand.ecc.strength = 4;
+ info->nand.ecc.bytes = 7;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+ }
+
+ pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
+ return 0;
+fail:
+ omap3_free_bch(mtd);
+ return -1;
+}
+
+/**
+ * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
+ * @mtd: MTD device structure
+ */
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+ int i, steps;
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ struct nand_ecclayout *layout = &info->ecclayout;
+
+ /* build oob layout */
+ steps = mtd->writesize/info->nand.ecc.size;
+ layout->eccbytes = steps*info->nand.ecc.bytes;
+
+ /* do not bother creating special oob layouts for small page devices */
+ if (mtd->oobsize < 64) {
+ pr_err("BCH ecc is not supported on small page devices\n");
+ goto fail;
+ }
+
+ /* reserve 2 bytes for bad block marker */
+ if (layout->eccbytes+2 > mtd->oobsize) {
+ pr_err("no oob layout available for oobsize %d eccbytes %u\n",
+ mtd->oobsize, layout->eccbytes);
+ goto fail;
+ }
+
+ /* put ecc bytes at oob tail */
+ for (i = 0; i < layout->eccbytes; i++)
+ layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+
+ layout->oobfree[0].offset = 2;
+ layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
+ info->nand.ecc.layout = layout;
+
+ if (!(info->nand.options & NAND_BUSWIDTH_16))
+ info->nand.badblock_pattern = &bb_descrip_flashbased;
+ return 0;
+fail:
+ omap3_free_bch(mtd);
+ return -1;
+}
+
+#else
+static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
+{
+ pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
+ return -1;
+}
+static int omap3_init_bch_tail(struct mtd_info *mtd)
+{
+ return -1;
+}
+static void omap3_free_bch(struct mtd_info *mtd)
+{
+}
+#endif /* CONFIG_MTD_NAND_OMAP_BCH */
+
static int __devinit omap_nand_probe(struct platform_device *pdev)
{
struct omap_nand_info *info;
@@ -1063,6 +1296,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.ecc.hwctl = omap_enable_hwecc;
info->nand.ecc.correct = omap_correct_data;
info->nand.ecc.mode = NAND_ECC_HW;
+ } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+ err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
+ if (err) {
+ err = -EINVAL;
+ goto out_release_mem_region;
+ }
}
/* DIP switches on some boards change between 8 and 16 bit
@@ -1094,6 +1334,14 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
(offset + omap_oobinfo.eccbytes);
info->nand.ecc.layout = &omap_oobinfo;
+ } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
+ (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
+ /* build OOB layout for BCH ECC correction */
+ err = omap3_init_bch_tail(&info->mtd);
+ if (err) {
+ err = -EINVAL;
+ goto out_release_mem_region;
+ }
}
/* second phase scan */
@@ -1122,6 +1370,7 @@ static int omap_nand_remove(struct platform_device *pdev)
struct mtd_info *mtd = platform_get_drvdata(pdev);
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
+ omap3_free_bch(&info->mtd);
platform_set_drvdata(pdev, NULL);
if (info->dma_ch != -1)
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 974dbf8..1440e51 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,6 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
chip->ecc.mode = NAND_ECC_SOFT;
/* Enable the following for a flash based bad block table */
- chip->options = NAND_NO_AUTOINCR;
chip->bbt_options = NAND_BBT_USE_FLASH;
/* Scan to find existence of the device */
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 6404e6e..1bcb520 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -23,14 +23,18 @@ struct plat_nand_data {
void __iomem *io_base;
};
+static const char *part_probe_types[] = { "cmdlinepart", NULL };
+
/*
* Probe for the NAND device.
*/
static int __devinit plat_nand_probe(struct platform_device *pdev)
{
struct platform_nand_data *pdata = pdev->dev.platform_data;
+ struct mtd_part_parser_data ppdata;
struct plat_nand_data *data;
struct resource *res;
+ const char **part_types;
int err = 0;
if (pdata->chip.nr_chips < 1) {
@@ -75,6 +79,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
data->chip.select_chip = pdata->ctrl.select_chip;
data->chip.write_buf = pdata->ctrl.write_buf;
data->chip.read_buf = pdata->ctrl.read_buf;
+ data->chip.read_byte = pdata->ctrl.read_byte;
data->chip.chip_delay = pdata->chip.chip_delay;
data->chip.options |= pdata->chip.options;
data->chip.bbt_options |= pdata->chip.bbt_options;
@@ -98,8 +103,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
goto out;
}
- err = mtd_device_parse_register(&data->mtd,
- pdata->chip.part_probe_types, NULL,
+ part_types = pdata->chip.part_probe_types ? : part_probe_types;
+
+ ppdata.of_node = pdev->dev.of_node;
+ err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
pdata->chip.partitions,
pdata->chip.nr_partitions);
@@ -140,12 +147,19 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id plat_nand_match[] = {
+ { .compatible = "gen_nand" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
static struct platform_driver plat_nand_driver = {
- .probe = plat_nand_probe,
- .remove = __devexit_p(plat_nand_remove),
- .driver = {
- .name = "gen_nand",
- .owner = THIS_MODULE,
+ .probe = plat_nand_probe,
+ .remove = __devexit_p(plat_nand_remove),
+ .driver = {
+ .name = "gen_nand",
+ .owner = THIS_MODULE,
+ .of_match_table = plat_nand_match,
},
};
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index def50ca..252aaef 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -682,14 +682,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
}
static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, const uint8_t *buf)
+ struct nand_chip *chip, const uint8_t *buf, int oob_required)
{
chip->write_buf(mtd, buf, mtd->writesize);
chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
}
static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
- struct nand_chip *chip, uint8_t *buf, int page)
+ struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
{
struct pxa3xx_nand_host *host = mtd->priv;
struct pxa3xx_nand_info *info = host->info_data;
@@ -1004,7 +1005,6 @@ KEEP_CONFIG:
chip->ecc.size = host->page_size;
chip->ecc.strength = 1;
- chip->options = NAND_NO_AUTOINCR;
chip->options |= NAND_NO_READRDY;
if (host->reg_ndcr & NDCR_DWIDTH_M)
chip->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index c204018..8cb6277 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -539,14 +539,11 @@ exit:
* nand_read_oob_syndrome assumes we can send column address - we can't
*/
static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
- int page, int sndcmd)
+ int page)
{
- if (sndcmd) {
- chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
- sndcmd = 0;
- }
+ chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
- return sndcmd;
+ return 0;
}
/*
@@ -1104,18 +1101,7 @@ static struct pci_driver r852_pci_driver = {
.driver.pm = &r852_pm_ops,
};
-static __init int r852_module_init(void)
-{
- return pci_register_driver(&r852_pci_driver);
-}
-
-static void __exit r852_module_exit(void)
-{
- pci_unregister_driver(&r852_pci_driver);
-}
-
-module_init(r852_module_init);
-module_exit(r852_module_exit);
+module_pci_driver(r852_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e9b2b26..aa9b8a5 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -344,7 +344,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
}
static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- uint8_t *buf, int page)
+ uint8_t *buf, int oob_required, int page)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -359,14 +359,14 @@ static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
if (flctl->hwecc_cant_correct[i])
mtd->ecc_stats.failed++;
else
- mtd->ecc_stats.corrected += 0;
+ mtd->ecc_stats.corrected += 0; /* FIXME */
}
return 0;
}
static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
- const uint8_t *buf)
+ const uint8_t *buf, int oob_required)
{
int i, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
@@ -881,8 +881,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
flctl->hwecc = pdata->has_hwecc;
flctl->holden = pdata->use_holden;
- nand->options = NAND_NO_AUTOINCR;
-
/* Set address of hardware control function */
/* 20 us command delay time */
nand->chip_delay = 20;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 774c3c2..082bcdc 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -94,17 +94,16 @@ static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
{NULL,}
};
-#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
static struct nand_flash_dev nand_xd_flash_ids[] = {
{"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
{"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
{"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
{"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
- {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM},
- {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM},
- {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM},
- {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM},
+ {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
+ {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
+ {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
+ {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
{NULL,}
};
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3ce12e..7153e0d2 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1201,7 +1201,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
}
/**
@@ -1333,7 +1334,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
if (mtd->ecc_stats.failed - stats.failed)
return -EBADMSG;
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+ /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
+ return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
}
/**
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 738ee8d..ea4b95b 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -29,7 +29,7 @@ config MTD_UBI_WL_THRESHOLD
config MTD_UBI_BEB_RESERVE
int "Percentage of reserved eraseblocks for bad eraseblocks handling"
- default 1
+ default 2
range 0 25
help
If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index acec85d..fb55678 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1026,7 +1026,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
{
int ubi_num;
- dbg_gen("dettach MTD device");
+ dbg_gen("detach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 9f957c2..7c13803 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir;
*/
int ubi_debugfs_init(void)
{
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -281,7 +284,8 @@ int ubi_debugfs_init(void)
*/
void ubi_debugfs_exit(void)
{
- debugfs_remove(dfs_rootdir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove(dfs_rootdir);
}
/* Read an UBI debugfs file */
@@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
struct dentry *dent;
struct ubi_debug_info *d = ubi->dbg;
+ if (!IS_ENABLED(CONFIG_DEBUG_FS))
+ return 0;
+
n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
ubi->ubi_num);
if (n == UBI_DFS_DIR_LEN) {
@@ -470,5 +477,6 @@ out:
*/
void ubi_debugfs_exit_dev(struct ubi_device *ubi)
{
- debugfs_remove_recursive(ubi->dbg->dfs_dir);
+ if (IS_ENABLED(CONFIG_DEBUG_FS))
+ debugfs_remove_recursive(ubi->dbg->dfs_dir);
}
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index f6a7d7a..8bbfb44 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -92,7 +92,30 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
* eraseblock handling.
* @ubi: UBI device description object
*/
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index a1a81c9..84f66e3 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -647,6 +647,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
int length);
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
void ubi_calculate_reserved(struct ubi_device *ubi);
int ubi_check_pattern(const void *buf, uint8_t patt, int size);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 0669cff..9169e58 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -443,15 +443,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
ubi->avail_pebs += reserved_pebs;
- i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (i > 0) {
- i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
- ubi->avail_pebs -= i;
- ubi->rsvd_pebs += i;
- ubi->beb_rsvd_pebs += i;
- if (i > 0)
- ubi_msg("reserve more %d PEBs", i);
- }
+ ubi_update_reserved(ubi);
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
@@ -558,15 +550,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
- pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (pebs > 0) {
- pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
- ubi->avail_pebs -= pebs;
- ubi->rsvd_pebs += pebs;
- ubi->beb_rsvd_pebs += pebs;
- if (pebs > 0)
- ubi_msg("reserve more %d PEBs", pebs);
- }
+ ubi_update_reserved(ubi);
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9df100a..b6be644 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
vol_id, lnum, ubi->works_count);
- down_write(&ubi->work_sem);
while (found) {
struct ubi_work *wrk;
found = 0;
+ down_read(&ubi->work_sem);
spin_lock(&ubi->wl_lock);
list_for_each_entry(wrk, &ubi->works, list) {
if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
@@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
spin_unlock(&ubi->wl_lock);
err = wrk->func(ubi, wrk, 0);
- if (err)
- goto out;
+ if (err) {
+ up_read(&ubi->work_sem);
+ return err;
+ }
+
spin_lock(&ubi->wl_lock);
found = 1;
break;
}
}
spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
}
-out:
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ down_write(&ubi->work_sem);
up_write(&ubi->work_sem);
+
return err;
}
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index dd5e048..545c09e 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -936,7 +936,7 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct cops_local *lp = netdev_priv(dev);
struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
- struct atalk_addr *aa = (struct atalk_addr *)&lp->node_addr;
+ struct atalk_addr *aa = &lp->node_addr;
switch(cmd)
{
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 3463b46..a030e63 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2454,24 +2454,27 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
out:
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
}
-int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
int ret = RX_HANDLER_ANOTHER;
+ struct lacpdu *lacpdu, _lacpdu;
+
if (skb->protocol != PKT_TYPE_LACPDU)
return ret;
- if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+ lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
+ if (!lacpdu)
return ret;
read_lock(&bond->lock);
- ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
+ ret = bond_3ad_rx_indication(lacpdu, slave, skb->len);
read_unlock(&bond->lock);
return ret;
}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5ee7e3c..0cfaa4a 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,8 +274,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
-int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave);
+int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
#endif //__BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 0f59c15..e15cc11 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -342,27 +342,17 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_unlock_rx_hashtbl_bh(bond);
}
-static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
- struct arp_pkt *arp;
+ struct arp_pkt *arp, _arp;
if (skb->protocol != cpu_to_be16(ETH_P_ARP))
goto out;
- arp = (struct arp_pkt *) skb->data;
- if (!arp) {
- pr_debug("Packet has no ARP data\n");
+ arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
+ if (!arp)
goto out;
- }
-
- if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
- goto out;
-
- if (skb->len < sizeof(struct arp_pkt)) {
- pr_debug("Packet is too small to be an ARP\n");
- goto out;
- }
if (arp->op_code == htons(ARPOP_REPLY)) {
/* update rx hash table for this ARP */
@@ -1356,12 +1346,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
+ read_unlock(&bond->curr_slave_lock);
+
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
- read_unlock(&bond->curr_slave_lock);
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 3680aa2..2cf084e 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -6,7 +6,7 @@
#include "bonding.h"
#include "bond_alb.h"
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee8cf9..6fae5f3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
#include <net/route.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/pkt_sched.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
return next;
}
-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
-
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
{
skb->dev = slave_dev;
- skb->queue_mapping = bond_queue_mapping(skb);
+ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+ sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
+ skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -1239,9 +1240,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
if (!np)
goto out;
- np->dev = slave->dev;
- strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
- err = __netpoll_setup(np);
+ err = __netpoll_setup(np, slave->dev);
if (err) {
kfree(np);
goto out;
@@ -1383,6 +1382,7 @@ static void bond_compute_features(struct bonding *bond)
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
int i;
+ unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
read_lock(&bond->lock);
@@ -1393,6 +1393,7 @@ static void bond_compute_features(struct bonding *bond)
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
+ dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
}
@@ -1401,6 +1402,9 @@ done:
bond_dev->vlan_features = vlan_features;
bond_dev->hard_header_len = max_hard_header_len;
+ flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
+ bond_dev->priv_flags = flags | dst_release_flag;
+
read_unlock(&bond->lock);
netdev_change_features(bond_dev);
@@ -1444,8 +1448,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
- int (*recv_probe)(struct sk_buff *, struct bonding *,
- struct slave *);
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
int ret = RX_HANDLER_ANOTHER;
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -1462,15 +1466,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (likely(nskb)) {
- ret = recv_probe(nskb, bond, slave);
- dev_kfree_skb(nskb);
- if (ret == RX_HANDLER_CONSUMED) {
- consume_skb(skb);
- return ret;
- }
+ ret = recv_probe(skb, bond, slave);
+ if (ret == RX_HANDLER_CONSUMED) {
+ consume_skb(skb);
+ return ret;
}
}
@@ -2737,25 +2736,31 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
}
}
-static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
- struct arphdr *arp;
+ struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr;
__be32 sip, tip;
+ int alen;
if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
return RX_HANDLER_ANOTHER;
read_lock(&bond->lock);
+ alen = arp_hdr_len(bond->dev);
pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
bond->dev->name, skb->dev->name);
- if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
- goto out_unlock;
+ if (alen > skb_headlen(skb)) {
+ arp = kmalloc(alen, GFP_ATOMIC);
+ if (!arp)
+ goto out_unlock;
+ if (skb_copy_bits(skb, 0, arp, alen) < 0)
+ goto out_unlock;
+ }
- arp = arp_hdr(skb);
if (arp->ar_hln != bond->dev->addr_len ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK ||
@@ -2790,6 +2795,8 @@ static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
out_unlock:
read_unlock(&bond->lock);
+ if (arp != (struct arphdr *)skb->data)
+ kfree(arp);
return RX_HANDLER_ANOTHER;
}
@@ -3226,6 +3233,12 @@ static int bond_master_netdev_event(unsigned long event,
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
+ case NETDEV_UNREGISTER:
+ bond_remove_proc_entry(event_bond);
+ break;
+ case NETDEV_REGISTER:
+ bond_create_proc_entry(event_bond);
+ break;
default:
break;
}
@@ -3986,7 +3999,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
out:
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
@@ -4008,11 +4021,11 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
res = bond_dev_queue_xmit(bond, skb,
bond->curr_active_slave->dev);
+ read_unlock(&bond->curr_slave_lock);
+
if (res)
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
-
- read_unlock(&bond->curr_slave_lock);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -4051,7 +4064,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
@@ -4089,7 +4102,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
res = bond_dev_queue_xmit(bond, skb2, tx_dev);
if (res) {
- dev_kfree_skb(skb2);
+ kfree_skb(skb2);
continue;
}
}
@@ -4103,7 +4116,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
out:
if (res)
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
/* frame sent to all suitable interfaces */
return NETDEV_TX_OK;
@@ -4171,7 +4184,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
/*
* Save the original txq to restore before passing to the driver
*/
- bond_queue_mapping(skb) = skb->queue_mapping;
+ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
@@ -4209,7 +4222,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
pr_err("%s: Error: Unknown bonding mode %d\n",
dev->name, bond->params.mode);
WARN_ON_ONCE(1);
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
}
@@ -4231,7 +4244,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bond->slave_cnt)
ret = __bond_start_xmit(skb, dev);
else
- dev_kfree_skb(skb);
+ kfree_skb(skb);
read_unlock(&bond->lock);
@@ -4410,8 +4423,6 @@ static void bond_uninit(struct net_device *bond_dev)
bond_work_cancel_all(bond);
- bond_remove_proc_entry(bond);
-
bond_debug_unregister(bond);
__hw_addr_flush(&bond->mc_list);
@@ -4813,7 +4824,6 @@ static int bond_init(struct net_device *bond_dev)
bond_set_lockdep_class(bond_dev);
- bond_create_proc_entry(bond);
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
@@ -4835,17 +4845,19 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int bond_get_tx_queues(struct net *net, struct nlattr *tb[])
+static unsigned int bond_get_num_tx_queues(void)
{
return tx_queues;
}
static struct rtnl_link_ops bond_link_ops __read_mostly = {
- .kind = "bond",
- .priv_size = sizeof(struct bonding),
- .setup = bond_setup,
- .validate = bond_validate,
- .get_tx_queues = bond_get_tx_queues,
+ .kind = "bond",
+ .priv_size = sizeof(struct bonding),
+ .setup = bond_setup,
+ .validate = bond_validate,
+ .get_num_tx_queues = bond_get_num_tx_queues,
+ .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
+ as for TX queues */
};
/* Create a new bond based on the specified name and bonding parameters.
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index ad284ba..3cea38d 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -150,14 +150,25 @@ static void bond_info_show_master(struct seq_file *seq)
}
}
+static const char *bond_slave_link_status(s8 link)
+{
+ static const char * const status[] = {
+ [BOND_LINK_UP] = "up",
+ [BOND_LINK_FAIL] = "going down",
+ [BOND_LINK_DOWN] = "down",
+ [BOND_LINK_BACK] = "going back",
+ };
+
+ return status[link];
+}
+
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
struct bonding *bond = seq->private;
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
- seq_printf(seq, "MII Status: %s\n",
- (slave->link == BOND_LINK_UP) ? "up" : "down");
+ seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
if (slave->speed == SPEED_UNKNOWN)
seq_printf(seq, "Speed: %s\n", "Unknown");
else
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index aef42f0..dc15d24 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
}
}
- pr_info("%s: Unable to set %.*s as primary slave.\n",
- bond->dev->name, (int)strlen(buf) - 1, buf);
+ strncpy(bond->params.primary, ifname, IFNAMSIZ);
+ bond->params.primary[IFNAMSIZ - 1] = 0;
+
+ pr_info("%s: Recording %s as primary, "
+ "but it has not been enslaved to %s yet.\n",
+ bond->dev->name, ifname, bond->dev->name);
out:
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
@@ -1491,7 +1495,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
/* Check buffer length, valid ifname and queue id */
if (strlen(buffer) > IFNAMSIZ ||
!dev_valid_name(buffer) ||
- qid > bond->params.tx_queues)
+ qid > bond->dev->real_num_tx_queues)
goto err_no_cmd;
/* Get the pointer to that interface if it exists */
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4581aa5..f8af2fc 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -218,8 +218,8 @@ struct bonding {
struct slave *primary_slave;
bool force_primary;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
- int (*recv_probe)(struct sk_buff *, struct bonding *,
- struct slave *);
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
rwlock_t lock;
rwlock_t curr_slave_lock;
u8 send_peer_notif;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 1520814..0def8b3 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/list.h>
@@ -20,7 +19,7 @@
#include <linux/sched.h>
#include <linux/if_arp.h>
#include <linux/timer.h>
-#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
#include <linux/pkt_sched.h>
#include <net/caif/caif_layer.h>
#include <net/caif/caif_hsi.h>
@@ -33,59 +32,46 @@ MODULE_DESCRIPTION("CAIF HSI driver");
#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
(((pow)-((x)&((pow)-1)))))
-static int inactivity_timeout = 1000;
-module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
+static const struct cfhsi_config hsi_default_config = {
-static int aggregation_timeout = 1;
-module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
+ /* Inactivity timeout on HSI, ms */
+ .inactivity_timeout = HZ,
-/*
- * HSI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
-static int hsi_head_align = 4;
-module_param(hsi_head_align, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
+ /* Aggregation timeout (ms) of zero means no aggregation is done*/
+ .aggregation_timeout = 1,
-static int hsi_tail_align = 4;
-module_param(hsi_tail_align, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
-
-/*
- * HSI link layer flowcontrol thresholds.
- * Warning: A high threshold value migth increase throughput but it will at
- * the same time prevent channel prioritization and increase the risk of
- * flooding the modem. The high threshold should be above the low.
- */
-static int hsi_high_threshold = 100;
-module_param(hsi_high_threshold, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
+ /*
+ * HSI link layer flow-control thresholds.
+ * Threshold values for the HSI packet queue. Flow-control will be
+ * asserted when the number of packets exceeds q_high_mark. It will
+ * not be de-asserted before the number of packets drops below
+ * q_low_mark.
+ * Warning: A high threshold value might increase throughput but it
+ * will at the same time prevent channel prioritization and increase
+ * the risk of flooding the modem. The high threshold should be above
+ * the low.
+ */
+ .q_high_mark = 100,
+ .q_low_mark = 50,
-static int hsi_low_threshold = 50;
-module_param(hsi_low_threshold, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
+ /*
+ * HSI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+ .head_align = 4,
+ .tail_align = 4,
+};
#define ON 1
#define OFF 0
-/*
- * Threshold values for the HSI packet queue. Flowcontrol will be asserted
- * when the number of packets exceeds HIGH_WATER_MARK. It will not be
- * de-asserted before the number of packets drops below LOW_WATER_MARK.
- */
-#define LOW_WATER_MARK hsi_low_threshold
-#define HIGH_WATER_MARK hsi_high_threshold
-
static LIST_HEAD(cfhsi_list);
-static spinlock_t cfhsi_list_lock;
static void cfhsi_inactivity_tout(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
/* Schedule power down work queue. */
@@ -101,8 +87,8 @@ static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
int hpad, tpad, len;
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
len = skb->len + hpad + tpad;
if (direction > 0)
@@ -115,7 +101,7 @@ static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
{
int i;
- if (cfhsi->aggregation_timeout < 0)
+ if (cfhsi->cfg.aggregation_timeout == 0)
return true;
for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
@@ -171,7 +157,7 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi)
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
}
@@ -181,14 +167,14 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
size_t fifo_occupancy;
int ret;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
do {
- ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy);
if (ret) {
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't get FIFO occupancy: %d.\n",
__func__, ret);
break;
@@ -198,11 +184,11 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
- cfhsi->dev);
+ ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
+ cfhsi->ops);
if (ret) {
clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't read data: %d.\n",
__func__, ret);
break;
@@ -213,13 +199,13 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
!test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
if (ret < 0) {
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't wait for flush complete: %d.\n",
__func__, ret);
break;
} else if (!ret) {
ret = -ETIMEDOUT;
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: timeout waiting for flush complete.\n",
__func__);
break;
@@ -246,14 +232,14 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Check if we can embed a CAIF frame. */
if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
struct caif_payload_info *info;
- int hpad = 0;
- int tpad = 0;
+ int hpad;
+ int tpad;
/* Calculate needed head alignment and tail alignment. */
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
/* Check if frame still fits with added alignment. */
if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
@@ -282,8 +268,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
while (nfrms < CFHSI_MAX_PKTS) {
struct caif_payload_info *info;
- int hpad = 0;
- int tpad = 0;
+ int hpad;
+ int tpad;
if (!skb)
skb = cfhsi_dequeue(cfhsi);
@@ -294,8 +280,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Calculate needed head alignment and tail alignment. */
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
/* Fill in CAIF frame length in descriptor. */
desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
@@ -348,7 +334,7 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
int len, res;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -366,22 +352,22 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
/* Start inactivity timer. */
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
break;
}
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0))
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
} while (res < 0);
}
static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -392,7 +378,7 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
*/
spin_lock_bh(&cfhsi->lock);
if (cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
+ cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
cfhsi->cfdev.flowctrl) {
cfhsi->flow_off_sent = 0;
@@ -404,19 +390,19 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
cfhsi_start_tx(cfhsi);
} else {
mod_timer(&cfhsi->aggregation_timer,
- jiffies + cfhsi->aggregation_timeout);
+ jiffies + cfhsi->cfg.aggregation_timeout);
spin_unlock_bh(&cfhsi->lock);
}
return;
}
-static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
+static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -433,7 +419,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
if ((desc->header & ~CFHSI_PIGGY_DESC) ||
(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
__func__);
return -EPROTO;
}
@@ -455,7 +441,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check length of CAIF frame. */
if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
__func__);
return -EPROTO;
}
@@ -463,7 +449,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Allocate SKB (OK even in IRQ context). */
skb = alloc_skb(len + 1, GFP_ATOMIC);
if (!skb) {
- dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
__func__);
return -ENOMEM;
}
@@ -477,8 +463,8 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb->dev = cfhsi->ndev;
/*
- * We are called from a arch specific platform device.
- * Unfortunately we don't know what context we're
+ * We are in a callback handler and
+ * unfortunately we don't know what context we're
* running in.
*/
if (in_interrupt())
@@ -504,7 +490,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
xfer_sz += CFHSI_DESC_SZ;
if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: Invalid payload len: %d, ignored.\n",
__func__, xfer_sz);
return -EPROTO;
@@ -551,7 +537,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check header and offset. */
if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
__func__);
return -EPROTO;
}
@@ -573,7 +559,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
struct sk_buff *skb;
u8 *dst = NULL;
u8 *pcffrm = NULL;
- int len = 0;
+ int len;
/* CAIF frame starts after head padding. */
pcffrm = pfrm + *pfrm + 1;
@@ -585,7 +571,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check length of CAIF frames. */
if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
__func__);
return -EPROTO;
}
@@ -593,7 +579,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Allocate SKB (OK even in IRQ context). */
skb = alloc_skb(len + 1, GFP_ATOMIC);
if (!skb) {
- dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
__func__);
cfhsi->rx_state.nfrms = nfrms;
return -ENOMEM;
@@ -608,7 +594,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb->dev = cfhsi->ndev;
/*
- * We're called from a platform device,
+ * We're called in callback from HSI
* and don't know the context we're running in.
*/
if (in_interrupt())
@@ -639,7 +625,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
desc = (struct cfhsi_desc *)cfhsi->rx_buf;
- dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -647,7 +633,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Update inactivity timer if pending. */
spin_lock_bh(&cfhsi->lock);
mod_timer_pending(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
@@ -680,12 +666,11 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
if (desc_pld_len < 0)
goto out_of_sync;
- if (desc_pld_len > 0)
+ if (desc_pld_len > 0) {
rx_len = desc_pld_len;
-
- if (desc_pld_len > 0 &&
- (piggy_desc->header & CFHSI_PIGGY_DESC))
- rx_len += CFHSI_DESC_SZ;
+ if (piggy_desc->header & CFHSI_PIGGY_DESC)
+ rx_len += CFHSI_DESC_SZ;
+ }
/*
* Copy needed information from the piggy-backed
@@ -693,10 +678,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
*/
memcpy(rx_buf, (u8 *)piggy_desc,
CFHSI_DESC_SHORT_SZ);
- /* Mark no embedded frame here */
- piggy_desc->offset = 0;
- if (desc_pld_len == -EPROTO)
- goto out_of_sync;
}
}
@@ -712,13 +693,13 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Initiate next read */
if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
/* Set up new transfer. */
- dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
__func__);
- res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
- cfhsi->dev);
+ res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
+ cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
__func__, res);
cfhsi->ndev->stats.rx_errors++;
cfhsi->ndev->stats.rx_dropped++;
@@ -737,6 +718,8 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Extract any payload in piggyback descriptor. */
if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
goto out_of_sync;
+ /* Mark no embedded frame after extracting it */
+ piggy_desc->offset = 0;
}
}
@@ -753,7 +736,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
return;
out_of_sync:
- dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
cfhsi->rx_buf, CFHSI_DESC_SZ);
schedule_work(&cfhsi->out_of_sync_work);
@@ -763,18 +746,18 @@ static void cfhsi_rx_slowpath(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
cfhsi_rx_done(cfhsi);
}
-static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
+static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -807,9 +790,9 @@ static void cfhsi_wake_up(struct work_struct *work)
}
/* Activate wake line. */
- cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
- dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
__func__);
/* Wait for acknowledge. */
@@ -819,33 +802,33 @@ static void cfhsi_wake_up(struct work_struct *work)
&cfhsi->bits), ret);
if (unlikely(ret < 0)) {
/* Interrupted by signal. */
- dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
__func__, ret);
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
return;
} else if (!ret) {
bool ca_wake = false;
size_t fifo_occupancy = 0;
/* Wakeup timeout */
- dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
__func__);
/* Check FIFO to check if modem has sent something. */
- WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy));
- dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
__func__, (unsigned) fifo_occupancy);
/* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
&ca_wake));
if (ca_wake) {
- dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
__func__);
/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
@@ -856,11 +839,11 @@ static void cfhsi_wake_up(struct work_struct *work)
}
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
return;
}
wake_ack:
- dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
__func__);
/* Clear power up bit. */
@@ -868,11 +851,11 @@ wake_ack:
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
/* Resume read operation. */
- dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
- res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
+ netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
+ res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
if (WARN_ON(res < 0))
- dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
+ netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
/* Clear power up acknowledment. */
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -881,16 +864,16 @@ wake_ack:
/* Resume transmit if queues are not empty. */
if (!cfhsi_tx_queue_len(cfhsi)) {
- dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
__func__);
/* Start inactivity timer. */
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
return;
}
- dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
__func__);
spin_unlock_bh(&cfhsi->lock);
@@ -900,14 +883,14 @@ wake_ack:
if (likely(len > 0)) {
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
cfhsi_abort_tx(cfhsi);
}
} else {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: Failed to create HSI frame: %d.\n",
__func__, len);
}
@@ -921,13 +904,13 @@ static void cfhsi_wake_down(struct work_struct *work)
int retry = CFHSI_WAKE_TOUT;
cfhsi = container_of(work, struct cfhsi, wake_down_work);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
/* Deactivate wake line. */
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
/* Wait for acknowledge. */
ret = CFHSI_WAKE_TOUT;
@@ -936,26 +919,26 @@ static void cfhsi_wake_down(struct work_struct *work)
&cfhsi->bits), ret);
if (ret < 0) {
/* Interrupted by signal. */
- dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
__func__, ret);
return;
} else if (!ret) {
bool ca_wake = true;
/* Timeout */
- dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
/* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
&ca_wake));
if (!ca_wake)
- dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
__func__);
}
/* Check FIFO occupancy. */
while (retry) {
- WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy));
if (!fifo_occupancy)
@@ -967,14 +950,13 @@ static void cfhsi_wake_down(struct work_struct *work)
}
if (!retry)
- dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
/* Clear AWAKE condition. */
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
/* Cancel pending RX requests. */
- cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
-
+ cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
}
static void cfhsi_out_of_sync(struct work_struct *work)
@@ -988,12 +970,12 @@ static void cfhsi_out_of_sync(struct work_struct *work)
rtnl_unlock();
}
-static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
+static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi = NULL;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -1007,12 +989,12 @@ static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
queue_work(cfhsi->wq, &cfhsi->wake_up_work);
}
-static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
+static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi = NULL;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
/* Initiating low power is only permitted by the host (us). */
@@ -1024,7 +1006,7 @@ static void cfhsi_aggregation_tout(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
cfhsi_start_tx(cfhsi);
@@ -1077,7 +1059,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
/* Send flow off if number of packets is above high water mark. */
if (!cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
+ cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
cfhsi->cfdev.flowctrl) {
cfhsi->flow_off_sent = 1;
cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -1114,9 +1096,9 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
WARN_ON(!len);
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
cfhsi_abort_tx(cfhsi);
}
@@ -1129,19 +1111,19 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static const struct net_device_ops cfhsi_ops;
+static const struct net_device_ops cfhsi_netdevops;
static void cfhsi_setup(struct net_device *dev)
{
int i;
struct cfhsi *cfhsi = netdev_priv(dev);
dev->features = 0;
- dev->netdev_ops = &cfhsi_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
+ dev->netdev_ops = &cfhsi_netdevops;
for (i = 0; i < CFHSI_PRIO_LAST; ++i)
skb_queue_head_init(&cfhsi->qhead[i]);
cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
@@ -1149,42 +1131,7 @@ static void cfhsi_setup(struct net_device *dev)
cfhsi->cfdev.use_stx = false;
cfhsi->cfdev.use_fcs = false;
cfhsi->ndev = dev;
-}
-
-int cfhsi_probe(struct platform_device *pdev)
-{
- struct cfhsi *cfhsi = NULL;
- struct net_device *ndev;
-
- int res;
-
- ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
- if (!ndev)
- return -ENODEV;
-
- cfhsi = netdev_priv(ndev);
- cfhsi->ndev = ndev;
- cfhsi->pdev = pdev;
-
- /* Assign the HSI device. */
- cfhsi->dev = pdev->dev.platform_data;
-
- /* Assign the driver to this HSI device. */
- cfhsi->dev->drv = &cfhsi->drv;
-
- /* Register network device. */
- res = register_netdev(ndev);
- if (res) {
- dev_err(&ndev->dev, "%s: Registration error: %d.\n",
- __func__, res);
- free_netdev(ndev);
- }
- /* Add CAIF HSI device to list. */
- spin_lock(&cfhsi_list_lock);
- list_add_tail(&cfhsi->list, &cfhsi_list);
- spin_unlock(&cfhsi_list_lock);
-
- return res;
+ cfhsi->cfg = hsi_default_config;
}
static int cfhsi_open(struct net_device *ndev)
@@ -1200,9 +1147,6 @@ static int cfhsi_open(struct net_device *ndev)
/* Set flow info */
cfhsi->flow_off_sent = 0;
- cfhsi->q_low_mark = LOW_WATER_MARK;
- cfhsi->q_high_mark = HIGH_WATER_MARK;
-
/*
* Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1230,20 +1174,8 @@ static int cfhsi_open(struct net_device *ndev)
goto err_alloc_rx_flip;
}
- /* Pre-calculate inactivity timeout. */
- if (inactivity_timeout != -1) {
- cfhsi->inactivity_timeout =
- inactivity_timeout * HZ / 1000;
- if (!cfhsi->inactivity_timeout)
- cfhsi->inactivity_timeout = 1;
- else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
- cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- } else {
- cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- }
-
/* Initialize aggregation timeout */
- cfhsi->aggregation_timeout = aggregation_timeout;
+ cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
/* Initialize recieve vaiables. */
cfhsi->rx_ptr = cfhsi->rx_buf;
@@ -1253,10 +1185,10 @@ static int cfhsi_open(struct net_device *ndev)
spin_lock_init(&cfhsi->lock);
/* Set up the driver. */
- cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
- cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
- cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
- cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
+ cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
+ cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
+ cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
+ cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
/* Initialize the work queues. */
INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
@@ -1270,9 +1202,9 @@ static int cfhsi_open(struct net_device *ndev)
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
/* Create work thread. */
- cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
+ cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
if (!cfhsi->wq) {
- dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
+ netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
__func__);
res = -ENODEV;
goto err_create_wq;
@@ -1297,9 +1229,9 @@ static int cfhsi_open(struct net_device *ndev)
cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
/* Activate HSI interface. */
- res = cfhsi->dev->cfhsi_up(cfhsi->dev);
+ res = cfhsi->ops->cfhsi_up(cfhsi->ops);
if (res) {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: can't activate HSI interface: %d.\n",
__func__, res);
goto err_activate;
@@ -1308,14 +1240,14 @@ static int cfhsi_open(struct net_device *ndev)
/* Flush FIFO */
res = cfhsi_flush_fifo(cfhsi);
if (res) {
- dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
+ netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
__func__, res);
goto err_net_reg;
}
return res;
err_net_reg:
- cfhsi->dev->cfhsi_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_down(cfhsi->ops);
err_activate:
destroy_workqueue(cfhsi->wq);
err_create_wq:
@@ -1345,7 +1277,7 @@ static int cfhsi_close(struct net_device *ndev)
del_timer_sync(&cfhsi->aggregation_timer);
/* Cancel pending RX request (if any) */
- cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
+ cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
/* Destroy workqueue */
destroy_workqueue(cfhsi->wq);
@@ -1358,7 +1290,7 @@ static int cfhsi_close(struct net_device *ndev)
cfhsi_abort_tx(cfhsi);
/* Deactivate interface */
- cfhsi->dev->cfhsi_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_down(cfhsi->ops);
/* Free buffers. */
kfree(tx_buf);
@@ -1367,85 +1299,184 @@ static int cfhsi_close(struct net_device *ndev)
return 0;
}
-static const struct net_device_ops cfhsi_ops = {
+static void cfhsi_uninit(struct net_device *dev)
+{
+ struct cfhsi *cfhsi = netdev_priv(dev);
+ ASSERT_RTNL();
+ symbol_put(cfhsi_get_device);
+ list_del(&cfhsi->list);
+}
+
+static const struct net_device_ops cfhsi_netdevops = {
+ .ndo_uninit = cfhsi_uninit,
.ndo_open = cfhsi_open,
.ndo_stop = cfhsi_close,
.ndo_start_xmit = cfhsi_xmit
};
-int cfhsi_remove(struct platform_device *pdev)
+static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
{
- struct list_head *list_node;
- struct list_head *n;
- struct cfhsi *cfhsi = NULL;
- struct cfhsi_dev *dev;
+ int i;
- dev = (struct cfhsi_dev *)pdev->dev.platform_data;
- spin_lock(&cfhsi_list_lock);
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
- /* Find the corresponding device. */
- if (cfhsi->dev == dev) {
- /* Remove from list. */
- list_del(list_node);
- spin_unlock(&cfhsi_list_lock);
- return 0;
- }
+ if (!data) {
+ pr_debug("no params data found\n");
+ return;
}
- spin_unlock(&cfhsi_list_lock);
- return -ENODEV;
+
+ i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
+ /*
+ * Inactivity timeout in millisecs. Lowest possible value is 1,
+ * and highest possible is NEXT_TIMER_MAX_DELTA.
+ */
+ if (data[i]) {
+ u32 inactivity_timeout = nla_get_u32(data[i]);
+ /* Pre-calculate inactivity timeout. */
+ cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
+ if (cfhsi->cfg.inactivity_timeout == 0)
+ cfhsi->cfg.inactivity_timeout = 1;
+ else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
+ cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+ }
+
+ i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
+ if (data[i])
+ cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_HEAD_ALIGN;
+ if (data[i])
+ cfhsi->cfg.head_align = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_TAIL_ALIGN;
+ if (data[i])
+ cfhsi->cfg.tail_align = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
+ if (data[i])
+ cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
+ if (data[i])
+ cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
+}
+
+static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ cfhsi_netlink_parms(data, netdev_priv(dev));
+ netdev_state_change(dev);
+ return 0;
}
-struct platform_driver cfhsi_plat_drv = {
- .probe = cfhsi_probe,
- .remove = cfhsi_remove,
- .driver = {
- .name = "cfhsi",
- .owner = THIS_MODULE,
- },
+static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
+ [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
};
-static void __exit cfhsi_exit_module(void)
+static size_t caif_hsi_get_size(const struct net_device *dev)
+{
+ int i;
+ size_t s = 0;
+ for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
+ s += nla_total_size(caif_hsi_policy[i].len);
+ return s;
+}
+
+static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct cfhsi *cfhsi = netdev_priv(dev);
+
+ if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
+ cfhsi->cfg.inactivity_timeout) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
+ cfhsi->cfg.aggregation_timeout) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
+ cfhsi->cfg.head_align) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
+ cfhsi->cfg.tail_align) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
+ cfhsi->cfg.q_high_mark) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
+ cfhsi->cfg.q_low_mark))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
{
- struct list_head *list_node;
- struct list_head *n;
struct cfhsi *cfhsi = NULL;
+ struct cfhsi_ops *(*get_ops)(void);
- spin_lock(&cfhsi_list_lock);
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
+ ASSERT_RTNL();
- /* Remove from list. */
- list_del(list_node);
- spin_unlock(&cfhsi_list_lock);
+ cfhsi = netdev_priv(dev);
+ cfhsi_netlink_parms(data, cfhsi);
+ dev_net_set(cfhsi->ndev, src_net);
+
+ get_ops = symbol_get(cfhsi_get_ops);
+ if (!get_ops) {
+ pr_err("%s: failed to get the cfhsi_ops\n", __func__);
+ return -ENODEV;
+ }
- unregister_netdevice(cfhsi->ndev);
+ /* Assign the HSI device. */
+ cfhsi->ops = (*get_ops)();
+ if (!cfhsi->ops) {
+ pr_err("%s: failed to get the cfhsi_ops\n", __func__);
+ goto err;
+ }
- spin_lock(&cfhsi_list_lock);
+ /* Assign the driver to this HSI device. */
+ cfhsi->ops->cb_ops = &cfhsi->cb_ops;
+ if (register_netdevice(dev)) {
+ pr_warn("%s: caif_hsi device registration failed\n", __func__);
+ goto err;
}
- spin_unlock(&cfhsi_list_lock);
+ /* Add CAIF HSI device to list. */
+ list_add_tail(&cfhsi->list, &cfhsi_list);
- /* Unregister platform driver. */
- platform_driver_unregister(&cfhsi_plat_drv);
+ return 0;
+err:
+ symbol_put(cfhsi_get_ops);
+ return -ENODEV;
}
-static int __init cfhsi_init_module(void)
+static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
+ .kind = "cfhsi",
+ .priv_size = sizeof(struct cfhsi),
+ .setup = cfhsi_setup,
+ .maxtype = __IFLA_CAIF_HSI_MAX,
+ .policy = caif_hsi_policy,
+ .newlink = caif_hsi_newlink,
+ .changelink = caif_hsi_changelink,
+ .get_size = caif_hsi_get_size,
+ .fill_info = caif_hsi_fill_info,
+};
+
+static void __exit cfhsi_exit_module(void)
{
- int result;
+ struct list_head *list_node;
+ struct list_head *n;
+ struct cfhsi *cfhsi;
- /* Initialize spin lock. */
- spin_lock_init(&cfhsi_list_lock);
+ rtnl_link_unregister(&caif_hsi_link_ops);
- /* Register platform driver. */
- result = platform_driver_register(&cfhsi_plat_drv);
- if (result) {
- printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
- result);
- goto err_dev_register;
+ rtnl_lock();
+ list_for_each_safe(list_node, n, &cfhsi_list) {
+ cfhsi = list_entry(list_node, struct cfhsi, list);
+ unregister_netdev(cfhsi->ndev);
}
+ rtnl_unlock();
+}
- err_dev_register:
- return result;
+static int __init cfhsi_init_module(void)
+{
+ return rtnl_link_register(&caif_hsi_link_ops);
}
module_init(cfhsi_init_module);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 6ea905c..fcff73a 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -170,7 +170,7 @@ static const struct at91_devtype_data at91_devtype_data[] __devinitconst = {
},
};
-static struct can_bittiming_const at91_bittiming_const = {
+static const struct can_bittiming_const at91_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 3f88473..f2d6d25 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -44,7 +44,7 @@ struct bfin_can_priv {
/*
* bfin can timing parameters
*/
-static struct can_bittiming_const bfin_can_bittiming_const = {
+static const struct can_bittiming_const bfin_can_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -597,7 +597,7 @@ static int __devinit bfin_can_probe(struct platform_device *pdev)
dev_info(&pdev->dev,
"%s device registered"
"(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
- DRV_NAME, (void *)priv->membase, priv->rx_irq,
+ DRV_NAME, priv->membase, priv->rx_irq,
priv->tx_irq, priv->err_irq, priv->can.clock.freq);
return 0;
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index ffb9773..3b83baf 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -1,15 +1,23 @@
menuconfig CAN_C_CAN
- tristate "Bosch C_CAN devices"
+ tristate "Bosch C_CAN/D_CAN devices"
depends on CAN_DEV && HAS_IOMEM
if CAN_C_CAN
config CAN_C_CAN_PLATFORM
- tristate "Generic Platform Bus based C_CAN driver"
+ tristate "Generic Platform Bus based C_CAN/D_CAN driver"
---help---
- This driver adds support for the C_CAN chips connected to
- the "platform bus" (Linux abstraction for directly to the
+ This driver adds support for the C_CAN/D_CAN chips connected
+ to the "platform bus" (Linux abstraction for directly to the
processor attached devices) which can be found on various
- boards from ST Microelectronics (http://www.st.com)
- like the SPEAr1310 and SPEAr320 evaluation boards.
+ boards from ST Microelectronics (http://www.st.com) like the
+ SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
+ boards like am335x, dm814x, dm813x and dm811x.
+
+config CAN_C_CAN_PCI
+ tristate "Generic PCI Bus based C_CAN/D_CAN driver"
+ depends on PCI
+ ---help---
+ This driver adds support for the C_CAN/D_CAN chips connected
+ to the PCI bus.
endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index 9273f6d..ad1cc84 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_CAN_C_CAN) += c_can.o
obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 536bda0..4c538e3 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -41,6 +41,10 @@
#include "c_can.h"
+/* Number of interface registers */
+#define IF_ENUM_REG_LEN 11
+#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
+
/* control register */
#define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6)
@@ -185,7 +189,7 @@ enum c_can_bus_error_types {
C_CAN_ERROR_PASSIVE,
};
-static struct can_bittiming_const c_can_bittiming_const = {
+static const struct can_bittiming_const c_can_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 16,
@@ -209,10 +213,10 @@ static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
C_CAN_MSG_OBJ_TX_FIRST;
}
-static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
{
- u32 val = priv->read_reg(priv, reg);
- val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+ u32 val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
return val;
}
@@ -220,14 +224,14 @@ static void c_can_enable_all_interrupts(struct c_can_priv *priv,
int enable)
{
unsigned int cntrl_save = priv->read_reg(priv,
- &priv->regs->control);
+ C_CAN_CTRL_REG);
if (enable)
cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
else
cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
- priv->write_reg(priv, &priv->regs->control, cntrl_save);
+ priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
}
static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
@@ -235,7 +239,7 @@ static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
int count = MIN_TIMEOUT_VALUE;
while (count && priv->read_reg(priv,
- &priv->regs->ifregs[iface].com_req) &
+ C_CAN_IFACE(COMREQ_REG, iface)) &
IF_COMR_BUSY) {
count--;
udelay(1);
@@ -258,9 +262,9 @@ static inline void c_can_object_get(struct net_device *dev,
* register and message RAM must be complete in 6 CAN-CLK
* period.
*/
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
IFX_WRITE_LOW_16BIT(objno));
if (c_can_msg_obj_is_busy(priv, iface))
@@ -278,9 +282,9 @@ static inline void c_can_object_put(struct net_device *dev,
* register and message RAM must be complete in 6 CAN-CLK
* period.
*/
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
(IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
IFX_WRITE_LOW_16BIT(objno));
if (c_can_msg_obj_is_busy(priv, iface))
@@ -306,18 +310,18 @@ static void c_can_write_msg_object(struct net_device *dev,
flags |= IF_ARB_MSGVAL;
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
IFX_WRITE_HIGH_16BIT(id));
for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+ priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
frame->data[i] | (frame->data[i + 1] << 8));
}
/* enable interrupt for this message object */
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
frame->can_dlc);
c_can_object_put(dev, iface, objno, IF_COMM_ALL);
@@ -329,7 +333,7 @@ static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
@@ -343,7 +347,7 @@ static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
struct c_can_priv *priv = netdev_priv(dev);
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST |
IF_MCONT_INTPND | IF_MCONT_NEWDAT));
c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
@@ -356,7 +360,7 @@ static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST |
IF_MCONT_INTPND | IF_MCONT_NEWDAT));
c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
@@ -374,7 +378,7 @@ static void c_can_handle_lost_msg_obj(struct net_device *dev,
c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
IF_MCONT_CLR_MSGLST);
c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
@@ -410,8 +414,8 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
- flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
- val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+ flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
+ val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
(flags << 16);
if (flags & IF_ARB_MSGXTD)
@@ -424,7 +428,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
else {
for (i = 0; i < frame->can_dlc; i += 2) {
data = priv->read_reg(priv,
- &priv->regs->ifregs[iface].data[i / 2]);
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
}
@@ -444,40 +448,40 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+ priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
IFX_WRITE_HIGH_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
(IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
- c_can_read_reg32(priv, &priv->regs->msgval1));
+ c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
}
static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
- c_can_read_reg32(priv, &priv->regs->msgval1));
+ c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
}
static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
{
- int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+ int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
/*
* as transmission request register's bit n-1 corresponds to
@@ -540,12 +544,12 @@ static int c_can_set_bittiming(struct net_device *dev)
netdev_info(dev,
"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
- ctrl_save = priv->read_reg(priv, &priv->regs->control);
- priv->write_reg(priv, &priv->regs->control,
+ ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
+ priv->write_reg(priv, C_CAN_CTRL_REG,
ctrl_save | CONTROL_CCE | CONTROL_INIT);
- priv->write_reg(priv, &priv->regs->btr, reg_btr);
- priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
- priv->write_reg(priv, &priv->regs->control, ctrl_save);
+ priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
+ priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
+ priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
return 0;
}
@@ -587,36 +591,36 @@ static void c_can_chip_config(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
/* enable automatic retransmission */
- priv->write_reg(priv, &priv->regs->control,
+ priv->write_reg(priv, C_CAN_CTRL_REG,
CONTROL_ENABLE_AR);
- if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY &
- CAN_CTRLMODE_LOOPBACK)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
+ (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
/* loopback + silent mode : useful for hot self-test */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test,
+ priv->write_reg(priv, C_CAN_TEST_REG,
TEST_LBACK | TEST_SILENT);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* loopback mode : useful for self-test function */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+ priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
/* silent mode : bus-monitoring mode */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+ priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
} else
/* normal mode*/
- priv->write_reg(priv, &priv->regs->control,
+ priv->write_reg(priv, C_CAN_CTRL_REG,
CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
/* configure message objects */
c_can_configure_msg_objects(dev);
/* set a `lec` value so that we can check for updates later */
- priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
/* set bittiming params */
c_can_set_bittiming(dev);
@@ -669,7 +673,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
unsigned int reg_err_counter;
struct c_can_priv *priv = netdev_priv(dev);
- reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
ERR_CNT_REC_SHIFT;
bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
@@ -686,7 +690,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
*
* We iterate from priv->tx_echo to priv->tx_next and check if the
* packet has been transmitted, echo it back to the CAN framework.
- * If we discover a not yet transmitted package, stop looking for more.
+ * If we discover a not yet transmitted packet, stop looking for more.
*/
static void c_can_do_tx(struct net_device *dev)
{
@@ -697,15 +701,17 @@ static void c_can_do_tx(struct net_device *dev)
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
msg_obj_no = get_tx_echo_msg_obj(priv);
- val = c_can_read_reg32(priv, &priv->regs->txrqst1);
- if (!(val & (1 << msg_obj_no))) {
+ val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
+ if (!(val & (1 << (msg_obj_no - 1)))) {
can_get_echo_skb(dev,
msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
stats->tx_bytes += priv->read_reg(priv,
- &priv->regs->ifregs[0].msg_cntrl)
+ C_CAN_IFACE(MSGCTRL_REG, 0))
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
c_can_inval_msg_object(dev, 0, msg_obj_no);
+ } else {
+ break;
}
}
@@ -742,11 +748,11 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
u32 num_rx_pkts = 0;
unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+ u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
- val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+ val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
msg_obj++) {
/*
* as interrupt pending register's bit n-1 corresponds to
@@ -756,7 +762,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
~IF_COMM_TXRQST);
msg_ctrl_save = priv->read_reg(priv,
- &priv->regs->ifregs[0].msg_cntrl);
+ C_CAN_IFACE(MSGCTRL_REG, 0));
if (msg_ctrl_save & IF_MCONT_EOB)
return num_rx_pkts;
@@ -817,7 +823,7 @@ static int c_can_handle_state_change(struct net_device *dev,
return 0;
c_can_get_berr_counter(dev, &bec);
- reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
ERR_CNT_RP_SHIFT;
@@ -933,7 +939,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
}
/* set a `lec` value so that we can check for updates later */
- priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
netif_receive_skb(skb);
stats->rx_packets++;
@@ -950,22 +956,22 @@ static int c_can_poll(struct napi_struct *napi, int quota)
struct net_device *dev = napi->dev;
struct c_can_priv *priv = netdev_priv(dev);
- irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ irqstatus = priv->irqstatus;
if (!irqstatus)
goto end;
/* status events have the highest priority */
if (irqstatus == STATUS_INTERRUPT) {
priv->current_status = priv->read_reg(priv,
- &priv->regs->status);
+ C_CAN_STS_REG);
/* handle Tx/Rx events */
if (priv->current_status & STATUS_TXOK)
- priv->write_reg(priv, &priv->regs->status,
+ priv->write_reg(priv, C_CAN_STS_REG,
priv->current_status & ~STATUS_TXOK);
if (priv->current_status & STATUS_RXOK)
- priv->write_reg(priv, &priv->regs->status,
+ priv->write_reg(priv, C_CAN_STS_REG,
priv->current_status & ~STATUS_RXOK);
/* handle state changes */
@@ -1028,12 +1034,11 @@ end:
static irqreturn_t c_can_isr(int irq, void *dev_id)
{
- u16 irqstatus;
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
- irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
- if (!irqstatus)
+ priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
+ if (!priv->irqstatus)
return IRQ_NONE;
/* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1068,11 @@ static int c_can_open(struct net_device *dev)
goto exit_irq_fail;
}
+ napi_enable(&priv->napi);
+
/* start the c_can controller */
c_can_start(dev);
- napi_enable(&priv->napi);
netif_start_queue(dev);
return 0;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 9b7fbef..01a7049 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,43 +22,129 @@
#ifndef C_CAN_H
#define C_CAN_H
-/* c_can IF registers */
-struct c_can_if_regs {
- u16 com_req;
- u16 com_mask;
- u16 mask1;
- u16 mask2;
- u16 arb1;
- u16 arb2;
- u16 msg_cntrl;
- u16 data[4];
- u16 _reserved[13];
+enum reg {
+ C_CAN_CTRL_REG = 0,
+ C_CAN_STS_REG,
+ C_CAN_ERR_CNT_REG,
+ C_CAN_BTR_REG,
+ C_CAN_INT_REG,
+ C_CAN_TEST_REG,
+ C_CAN_BRPEXT_REG,
+ C_CAN_IF1_COMREQ_REG,
+ C_CAN_IF1_COMMSK_REG,
+ C_CAN_IF1_MASK1_REG,
+ C_CAN_IF1_MASK2_REG,
+ C_CAN_IF1_ARB1_REG,
+ C_CAN_IF1_ARB2_REG,
+ C_CAN_IF1_MSGCTRL_REG,
+ C_CAN_IF1_DATA1_REG,
+ C_CAN_IF1_DATA2_REG,
+ C_CAN_IF1_DATA3_REG,
+ C_CAN_IF1_DATA4_REG,
+ C_CAN_IF2_COMREQ_REG,
+ C_CAN_IF2_COMMSK_REG,
+ C_CAN_IF2_MASK1_REG,
+ C_CAN_IF2_MASK2_REG,
+ C_CAN_IF2_ARB1_REG,
+ C_CAN_IF2_ARB2_REG,
+ C_CAN_IF2_MSGCTRL_REG,
+ C_CAN_IF2_DATA1_REG,
+ C_CAN_IF2_DATA2_REG,
+ C_CAN_IF2_DATA3_REG,
+ C_CAN_IF2_DATA4_REG,
+ C_CAN_TXRQST1_REG,
+ C_CAN_TXRQST2_REG,
+ C_CAN_NEWDAT1_REG,
+ C_CAN_NEWDAT2_REG,
+ C_CAN_INTPND1_REG,
+ C_CAN_INTPND2_REG,
+ C_CAN_MSGVAL1_REG,
+ C_CAN_MSGVAL2_REG,
};
-/* c_can hardware registers */
-struct c_can_regs {
- u16 control;
- u16 status;
- u16 err_cnt;
- u16 btr;
- u16 interrupt;
- u16 test;
- u16 brp_ext;
- u16 _reserved1;
- struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
- u16 _reserved2[8];
- u16 txrqst1;
- u16 txrqst2;
- u16 _reserved3[6];
- u16 newdat1;
- u16 newdat2;
- u16 _reserved4[6];
- u16 intpnd1;
- u16 intpnd2;
- u16 _reserved5[6];
- u16 msgval1;
- u16 msgval2;
- u16 _reserved6[6];
+static const u16 reg_map_c_can[] = {
+ [C_CAN_CTRL_REG] = 0x00,
+ [C_CAN_STS_REG] = 0x02,
+ [C_CAN_ERR_CNT_REG] = 0x04,
+ [C_CAN_BTR_REG] = 0x06,
+ [C_CAN_INT_REG] = 0x08,
+ [C_CAN_TEST_REG] = 0x0A,
+ [C_CAN_BRPEXT_REG] = 0x0C,
+ [C_CAN_IF1_COMREQ_REG] = 0x10,
+ [C_CAN_IF1_COMMSK_REG] = 0x12,
+ [C_CAN_IF1_MASK1_REG] = 0x14,
+ [C_CAN_IF1_MASK2_REG] = 0x16,
+ [C_CAN_IF1_ARB1_REG] = 0x18,
+ [C_CAN_IF1_ARB2_REG] = 0x1A,
+ [C_CAN_IF1_MSGCTRL_REG] = 0x1C,
+ [C_CAN_IF1_DATA1_REG] = 0x1E,
+ [C_CAN_IF1_DATA2_REG] = 0x20,
+ [C_CAN_IF1_DATA3_REG] = 0x22,
+ [C_CAN_IF1_DATA4_REG] = 0x24,
+ [C_CAN_IF2_COMREQ_REG] = 0x40,
+ [C_CAN_IF2_COMMSK_REG] = 0x42,
+ [C_CAN_IF2_MASK1_REG] = 0x44,
+ [C_CAN_IF2_MASK2_REG] = 0x46,
+ [C_CAN_IF2_ARB1_REG] = 0x48,
+ [C_CAN_IF2_ARB2_REG] = 0x4A,
+ [C_CAN_IF2_MSGCTRL_REG] = 0x4C,
+ [C_CAN_IF2_DATA1_REG] = 0x4E,
+ [C_CAN_IF2_DATA2_REG] = 0x50,
+ [C_CAN_IF2_DATA3_REG] = 0x52,
+ [C_CAN_IF2_DATA4_REG] = 0x54,
+ [C_CAN_TXRQST1_REG] = 0x80,
+ [C_CAN_TXRQST2_REG] = 0x82,
+ [C_CAN_NEWDAT1_REG] = 0x90,
+ [C_CAN_NEWDAT2_REG] = 0x92,
+ [C_CAN_INTPND1_REG] = 0xA0,
+ [C_CAN_INTPND2_REG] = 0xA2,
+ [C_CAN_MSGVAL1_REG] = 0xB0,
+ [C_CAN_MSGVAL2_REG] = 0xB2,
+};
+
+static const u16 reg_map_d_can[] = {
+ [C_CAN_CTRL_REG] = 0x00,
+ [C_CAN_STS_REG] = 0x04,
+ [C_CAN_ERR_CNT_REG] = 0x08,
+ [C_CAN_BTR_REG] = 0x0C,
+ [C_CAN_BRPEXT_REG] = 0x0E,
+ [C_CAN_INT_REG] = 0x10,
+ [C_CAN_TEST_REG] = 0x14,
+ [C_CAN_TXRQST1_REG] = 0x88,
+ [C_CAN_TXRQST2_REG] = 0x8A,
+ [C_CAN_NEWDAT1_REG] = 0x9C,
+ [C_CAN_NEWDAT2_REG] = 0x9E,
+ [C_CAN_INTPND1_REG] = 0xB0,
+ [C_CAN_INTPND2_REG] = 0xB2,
+ [C_CAN_MSGVAL1_REG] = 0xC4,
+ [C_CAN_MSGVAL2_REG] = 0xC6,
+ [C_CAN_IF1_COMREQ_REG] = 0x100,
+ [C_CAN_IF1_COMMSK_REG] = 0x102,
+ [C_CAN_IF1_MASK1_REG] = 0x104,
+ [C_CAN_IF1_MASK2_REG] = 0x106,
+ [C_CAN_IF1_ARB1_REG] = 0x108,
+ [C_CAN_IF1_ARB2_REG] = 0x10A,
+ [C_CAN_IF1_MSGCTRL_REG] = 0x10C,
+ [C_CAN_IF1_DATA1_REG] = 0x110,
+ [C_CAN_IF1_DATA2_REG] = 0x112,
+ [C_CAN_IF1_DATA3_REG] = 0x114,
+ [C_CAN_IF1_DATA4_REG] = 0x116,
+ [C_CAN_IF2_COMREQ_REG] = 0x120,
+ [C_CAN_IF2_COMMSK_REG] = 0x122,
+ [C_CAN_IF2_MASK1_REG] = 0x124,
+ [C_CAN_IF2_MASK2_REG] = 0x126,
+ [C_CAN_IF2_ARB1_REG] = 0x128,
+ [C_CAN_IF2_ARB2_REG] = 0x12A,
+ [C_CAN_IF2_MSGCTRL_REG] = 0x12C,
+ [C_CAN_IF2_DATA1_REG] = 0x130,
+ [C_CAN_IF2_DATA2_REG] = 0x132,
+ [C_CAN_IF2_DATA3_REG] = 0x134,
+ [C_CAN_IF2_DATA4_REG] = 0x136,
+};
+
+enum c_can_dev_id {
+ C_CAN_DEVTYPE,
+ D_CAN_DEVTYPE,
};
/* c_can private data structure */
@@ -69,13 +155,15 @@ struct c_can_priv {
int tx_object;
int current_status;
int last_status;
- u16 (*read_reg) (struct c_can_priv *priv, void *reg);
- void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
- struct c_can_regs __iomem *regs;
+ u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
+ void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
+ void __iomem *base;
+ const u16 *regs;
unsigned long irq_flags; /* for request_irq() */
unsigned int tx_next;
unsigned int tx_echo;
void *priv; /* for board-specific data */
+ u16 irqstatus;
};
struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
new file mode 100644
index 0000000..1011146
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -0,0 +1,221 @@
+/*
+ * PCI bus driver for Bosch C_CAN/D_CAN controller
+ *
+ * Copyright (C) 2012 Federico Vaga <federico.vaga@gmail.com>
+ *
+ * Borrowed from c_can_platform.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+enum c_can_pci_reg_align {
+ C_CAN_REG_ALIGN_16,
+ C_CAN_REG_ALIGN_32,
+};
+
+struct c_can_pci_data {
+ /* Specify if is C_CAN or D_CAN */
+ enum c_can_dev_id type;
+ /* Set the register alignment in the memory */
+ enum c_can_pci_reg_align reg_align;
+ /* Set the frequency */
+ unsigned int freq;
+};
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+ enum reg index)
+{
+ return readw(priv->base + priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ writew(val, priv->base + priv->regs[index]);
+}
+
+static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+ enum reg index)
+{
+ return readw(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ writew(val, priv->base + 2 * priv->regs[index]);
+}
+
+static int __devinit c_can_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
+ struct c_can_priv *priv;
+ struct net_device *dev;
+ void __iomem *addr;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_device FAILED\n");
+ goto out;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_request_regions FAILED\n");
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+ pci_enable_msi(pdev);
+
+ addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!addr) {
+ dev_err(&pdev->dev,
+ "device has no PCI memory resources, "
+ "failing adapter\n");
+ ret = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ /* allocate the c_can device */
+ dev = alloc_c_can_dev();
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ priv = netdev_priv(dev);
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ dev->irq = pdev->irq;
+ priv->base = addr;
+
+ if (!c_can_pci_data->freq) {
+ dev_err(&pdev->dev, "no clock frequency defined\n");
+ ret = -ENODEV;
+ goto out_free_c_can;
+ } else {
+ priv->can.clock.freq = c_can_pci_data->freq;
+ }
+
+ /* Configure CAN type */
+ switch (c_can_pci_data->type) {
+ case C_CAN_DEVTYPE:
+ priv->regs = reg_map_c_can;
+ break;
+ case D_CAN_DEVTYPE:
+ priv->regs = reg_map_d_can;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_free_c_can;
+ }
+
+ /* Configure access to registers */
+ switch (c_can_pci_data->reg_align) {
+ case C_CAN_REG_ALIGN_32:
+ priv->read_reg = c_can_pci_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_pci_write_reg_aligned_to_32bit;
+ break;
+ case C_CAN_REG_ALIGN_16:
+ priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_free_c_can;
+ }
+
+ ret = register_c_can_dev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ KBUILD_MODNAME, ret);
+ goto out_free_c_can;
+ }
+
+ dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+ KBUILD_MODNAME, priv->regs, dev->irq);
+
+ return 0;
+
+out_free_c_can:
+ pci_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+out_iounmap:
+ pci_iounmap(pdev, addr);
+out_release_regions:
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void __devexit c_can_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ unregister_c_can_dev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+
+ pci_iounmap(pdev, priv->base);
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct c_can_pci_data c_can_sta2x11= {
+ .type = C_CAN_DEVTYPE,
+ .reg_align = C_CAN_REG_ALIGN_32,
+ .freq = 52000000, /* 52 Mhz */
+};
+
+#define C_CAN_ID(_vend, _dev, _driverdata) { \
+ PCI_DEVICE(_vend, _dev), \
+ .driver_data = (unsigned long)&_driverdata, \
+}
+static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
+ C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
+ c_can_sta2x11),
+ {},
+};
+static struct pci_driver c_can_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = c_can_pci_tbl,
+ .probe = c_can_pci_probe,
+ .remove = __devexit_p(c_can_pci_remove),
+};
+
+module_pci_driver(c_can_pci_driver);
+
+MODULE_AUTHOR("Federico Vaga <federico.vaga@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PCI CAN bus driver for Bosch C_CAN/D_CAN controller");
+MODULE_DEVICE_TABLE(pci, c_can_pci_tbl);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 5e1a5ff..f0921d1 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -42,27 +42,27 @@
* Handle the same by providing a common read/write interface.
*/
static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
- void *reg)
+ enum reg index)
{
- return readw(reg);
+ return readw(priv->base + priv->regs[index]);
}
static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
- void *reg, u16 val)
+ enum reg index, u16 val)
{
- writew(val, reg);
+ writew(val, priv->base + priv->regs[index]);
}
static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
- void *reg)
+ enum reg index)
{
- return readw(reg + (long)reg - (long)priv->regs);
+ return readw(priv->base + 2 * priv->regs[index]);
}
static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
- void *reg, u16 val)
+ enum reg index, u16 val)
{
- writew(val, reg + (long)reg - (long)priv->regs);
+ writew(val, priv->base + 2 * priv->regs[index]);
}
static int __devinit c_can_plat_probe(struct platform_device *pdev)
@@ -71,6 +71,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
+ const struct platform_device_id *id;
struct resource *mem;
int irq;
#ifdef CONFIG_HAVE_CLK
@@ -115,26 +116,40 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
}
priv = netdev_priv(dev);
+ id = platform_get_device_id(pdev);
+ switch (id->driver_data) {
+ case C_CAN_DEVTYPE:
+ priv->regs = reg_map_c_can;
+ switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ default:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ }
+ break;
+ case D_CAN_DEVTYPE:
+ priv->regs = reg_map_d_can;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit_free_device;
+ }
dev->irq = irq;
- priv->regs = addr;
+ priv->base = addr;
#ifdef CONFIG_HAVE_CLK
priv->can.clock.freq = clk_get_rate(clk);
priv->priv = clk;
#endif
- switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
- case IORESOURCE_MEM_32BIT:
- priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
- priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
- break;
- case IORESOURCE_MEM_16BIT:
- default:
- priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
- priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
- break;
- }
-
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -146,7 +161,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
- KBUILD_MODNAME, priv->regs, dev->irq);
+ KBUILD_MODNAME, priv->base, dev->irq);
return 0;
exit_free_device:
@@ -176,7 +191,7 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
- iounmap(priv->regs);
+ iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
@@ -188,6 +203,20 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id c_can_id_table[] = {
+ {
+ .name = KBUILD_MODNAME,
+ .driver_data = C_CAN_DEVTYPE,
+ }, {
+ .name = "c_can",
+ .driver_data = C_CAN_DEVTYPE,
+ }, {
+ .name = "d_can",
+ .driver_data = D_CAN_DEVTYPE,
+ }, {
+ }
+};
+
static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
@@ -195,6 +224,7 @@ static struct platform_driver c_can_plat_driver = {
},
.probe = c_can_plat_probe,
.remove = __devexit_p(c_can_plat_remove),
+ .id_table = c_can_id_table,
};
module_platform_driver(c_can_plat_driver);
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d42a6a7..0f12abf 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -90,7 +90,7 @@ static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
[CC770_OBJ_TX] = 0,
};
-static struct can_bittiming_const cc770_bittiming_const = {
+static const struct can_bittiming_const cc770_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -695,7 +695,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
netif_wake_queue(dev);
}
-irqreturn_t cc770_interrupt(int irq, void *dev_id)
+static irqreturn_t cc770_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct cc770_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 53115ee..688371c 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
struct cc770_platform_data *pdata = pdev->dev.platform_data;
priv->can.clock.freq = pdata->osc_freq;
- if (priv->cpu_interface | CPUIF_DSC)
+ if (priv->cpu_interface & CPUIF_DSC)
priv->can.clock.freq /= 2;
priv->clkout = pdata->cor;
priv->bus_config = pdata->bcr;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f03d7a4..963e2cc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -33,6 +33,39 @@ MODULE_DESCRIPTION(MOD_DESC);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+/* CAN DLC to real data length conversion helpers */
+
+static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 12, 16, 20, 24, 32, 48, 64};
+
+/* get data length from can_dlc with sanitized can_dlc */
+u8 can_dlc2len(u8 can_dlc)
+{
+ return dlc2len[can_dlc & 0x0F];
+}
+EXPORT_SYMBOL_GPL(can_dlc2len);
+
+static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+ 9, 9, 9, 9, /* 9 - 12 */
+ 10, 10, 10, 10, /* 13 - 16 */
+ 11, 11, 11, 11, /* 17 - 20 */
+ 12, 12, 12, 12, /* 21 - 24 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
+ 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_len2dlc(u8 len)
+{
+ if (unlikely(len > 64))
+ return 0xF;
+
+ return len2dlc[len];
+}
+EXPORT_SYMBOL_GPL(can_len2dlc);
+
#ifdef CONFIG_CAN_CALC_BITTIMING
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
@@ -368,7 +401,7 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
/*
* CAN device restart for bus-off recovery
*/
-void can_restart(unsigned long data)
+static void can_restart(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct can_priv *priv = netdev_priv(dev);
@@ -454,7 +487,7 @@ EXPORT_SYMBOL_GPL(can_bus_off);
static void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 38c0690..c5f1431 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -34,6 +34,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
@@ -165,10 +166,21 @@ struct flexcan_regs {
u32 imask1; /* 0x28 */
u32 iflag2; /* 0x2c */
u32 iflag1; /* 0x30 */
- u32 _reserved2[19];
+ u32 crl2; /* 0x34 */
+ u32 esr2; /* 0x38 */
+ u32 imeur; /* 0x3c */
+ u32 lrfr; /* 0x40 */
+ u32 crcr; /* 0x44 */
+ u32 rxfgmask; /* 0x48 */
+ u32 rxfir; /* 0x4c */
+ u32 _reserved3[12];
struct flexcan_mb cantxfg[64];
};
+struct flexcan_devtype_data {
+ u32 hw_ver; /* hardware controller version */
+};
+
struct flexcan_priv {
struct can_priv can;
struct net_device *dev;
@@ -178,11 +190,21 @@ struct flexcan_priv {
u32 reg_esr;
u32 reg_ctrl_default;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
struct flexcan_platform_data *pdata;
+ const struct flexcan_devtype_data *devtype_data;
+};
+
+static struct flexcan_devtype_data fsl_p1010_devtype_data = {
+ .hw_ver = 3,
+};
+
+static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+ .hw_ver = 10,
};
-static struct can_bittiming_const flexcan_bittiming_const = {
+static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
.tseg1_max = 16,
@@ -750,6 +772,9 @@ static int flexcan_chip_start(struct net_device *dev)
flexcan_write(0x0, &regs->rx14mask);
flexcan_write(0x0, &regs->rx15mask);
+ if (priv->devtype_data->hw_ver >= 10)
+ flexcan_write(0x0, &regs->rxfgmask);
+
flexcan_transceiver_switch(priv, 1);
/* synchronize with the can bus */
@@ -804,7 +829,8 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk_ipg);
+ clk_prepare_enable(priv->clk_per);
err = open_candev(dev);
if (err)
@@ -826,7 +852,8 @@ static int flexcan_open(struct net_device *dev)
out_close:
close_candev(dev);
out:
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_per);
+ clk_disable_unprepare(priv->clk_ipg);
return err;
}
@@ -840,7 +867,8 @@ static int flexcan_close(struct net_device *dev)
flexcan_chip_stop(dev);
free_irq(dev->irq, dev);
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_per);
+ clk_disable_unprepare(priv->clk_ipg);
close_candev(dev);
@@ -879,7 +907,8 @@ static int __devinit register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg, err;
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk_ipg);
+ clk_prepare_enable(priv->clk_per);
/* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv);
@@ -912,7 +941,8 @@ static int __devinit register_flexcandev(struct net_device *dev)
out:
/* disable core and turn off clocks */
flexcan_chip_disable(priv);
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_per);
+ clk_disable_unprepare(priv->clk_ipg);
return err;
}
@@ -922,12 +952,25 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
unregister_candev(dev);
}
+static const struct of_device_id flexcan_of_match[] = {
+ { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+ { /* sentinel */ },
+};
+
+static const struct platform_device_id flexcan_id_table[] = {
+ { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
+ { /* sentinel */ },
+};
+
static int __devinit flexcan_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id;
+ const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
struct resource *mem;
- struct clk *clk = NULL;
+ struct clk *clk_ipg = NULL, *clk_per = NULL;
struct pinctrl *pinctrl;
void __iomem *base;
resource_size_t mem_size;
@@ -938,23 +981,25 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
if (IS_ERR(pinctrl))
return PTR_ERR(pinctrl);
- if (pdev->dev.of_node) {
- const u32 *clock_freq_p;
-
- clock_freq_p = of_get_property(pdev->dev.of_node,
- "clock-frequency", NULL);
- if (clock_freq_p)
- clock_freq = *clock_freq_p;
- }
+ if (pdev->dev.of_node)
+ of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &clock_freq);
if (!clock_freq) {
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "no clock defined\n");
- err = PTR_ERR(clk);
+ clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(clk_ipg)) {
+ dev_err(&pdev->dev, "no ipg clock defined\n");
+ err = PTR_ERR(clk_ipg);
+ goto failed_clock;
+ }
+ clock_freq = clk_get_rate(clk_ipg);
+
+ clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(clk_per)) {
+ dev_err(&pdev->dev, "no per clock defined\n");
+ err = PTR_ERR(clk_per);
goto failed_clock;
}
- clock_freq = clk_get_rate(clk);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -982,6 +1027,17 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
goto failed_alloc;
}
+ of_id = of_match_device(flexcan_of_match, &pdev->dev);
+ if (of_id) {
+ devtype_data = of_id->data;
+ } else if (pdev->id_entry->driver_data) {
+ devtype_data = (struct flexcan_devtype_data *)
+ pdev->id_entry->driver_data;
+ } else {
+ err = -ENODEV;
+ goto failed_devtype;
+ }
+
dev->netdev_ops = &flexcan_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -996,8 +1052,10 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
CAN_CTRLMODE_BERR_REPORTING;
priv->base = base;
priv->dev = dev;
- priv->clk = clk;
+ priv->clk_ipg = clk_ipg;
+ priv->clk_per = clk_per;
priv->pdata = pdev->dev.platform_data;
+ priv->devtype_data = devtype_data;
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
@@ -1016,14 +1074,13 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
return 0;
failed_register:
+ failed_devtype:
free_candev(dev);
failed_alloc:
iounmap(base);
failed_map:
release_mem_region(mem->start, mem_size);
failed_get:
- if (clk)
- clk_put(clk);
failed_clock:
return err;
}
@@ -1041,20 +1098,46 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
- if (priv->clk)
- clk_put(priv->clk);
-
free_candev(dev);
return 0;
}
-static struct of_device_id flexcan_of_match[] = {
- {
- .compatible = "fsl,p1010-flexcan",
- },
- {},
-};
+#ifdef CONFIG_PM
+static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ flexcan_chip_disable(priv);
+
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ }
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+
+static int flexcan_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ if (netif_running(dev)) {
+ netif_device_attach(dev);
+ netif_start_queue(dev);
+ }
+ flexcan_chip_enable(priv);
+
+ return 0;
+}
+#else
+#define flexcan_suspend NULL
+#define flexcan_resume NULL
+#endif
static struct platform_driver flexcan_driver = {
.driver = {
@@ -1064,6 +1147,9 @@ static struct platform_driver flexcan_driver = {
},
.probe = flexcan_probe,
.remove = __devexit_p(flexcan_remove),
+ .suspend = flexcan_suspend,
+ .resume = flexcan_resume,
+ .id_table = flexcan_id_table,
};
module_platform_driver(flexcan_driver);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 08c893c..98ee438 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -116,6 +116,7 @@
#define ICAN3_BUSERR_QUOTA_MAX 255
/* Janz ICAN3 CAN Frame Conversion */
+#define ICAN3_SNGL 0x02
#define ICAN3_ECHO 0x10
#define ICAN3_EFF_RTR 0x40
#define ICAN3_SFF_RTR 0x10
@@ -220,6 +221,9 @@ struct ican3_dev {
/* old and new style host interface */
unsigned int iftype;
+ /* queue for echo packets */
+ struct sk_buff_head echoq;
+
/*
* Any function which changes the current DPM page must hold this
* lock while it is performing data accesses. This ensures that the
@@ -235,7 +239,6 @@ struct ican3_dev {
/* fast host interface */
unsigned int fastrx_start;
- unsigned int fastrx_int;
unsigned int fastrx_num;
unsigned int fasttx_start;
unsigned int fasttx_num;
@@ -454,7 +457,6 @@ static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod)
/* save the start recv page */
mod->fastrx_start = mod->free_page;
mod->fastrx_num = 0;
- mod->fastrx_int = 0;
/* build a single fast tohost queue descriptor */
memset(&desc, 0, sizeof(desc));
@@ -813,10 +815,10 @@ static void ican3_to_can_frame(struct ican3_dev *mod,
cf->can_id |= desc->data[0] << 3;
cf->can_id |= (desc->data[1] & 0xe0) >> 5;
- cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK;
- memcpy(cf->data, &desc->data[2], sizeof(cf->data));
+ cf->can_dlc = get_can_dlc(desc->data[1] & ICAN3_CAN_DLC_MASK);
+ memcpy(cf->data, &desc->data[2], cf->can_dlc);
} else {
- cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK;
+ cf->can_dlc = get_can_dlc(desc->data[0] & ICAN3_CAN_DLC_MASK);
if (desc->data[0] & ICAN3_EFF_RTR)
cf->can_id |= CAN_RTR_FLAG;
@@ -831,7 +833,7 @@ static void ican3_to_can_frame(struct ican3_dev *mod,
cf->can_id |= desc->data[3] >> 5; /* 2-0 */
}
- memcpy(cf->data, &desc->data[6], sizeof(cf->data));
+ memcpy(cf->data, &desc->data[6], cf->can_dlc);
}
}
@@ -847,6 +849,10 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
desc->data[0] |= cf->can_dlc;
desc->data[1] |= ICAN3_ECHO;
+ /* support single transmission (no retries) mode */
+ if (mod->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ desc->data[1] |= ICAN3_SNGL;
+
if (cf->can_id & CAN_RTR_FLAG)
desc->data[0] |= ICAN3_EFF_RTR;
@@ -863,7 +869,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
}
/* copy the data bits into the descriptor */
- memcpy(&desc->data[6], cf->data, sizeof(cf->data));
+ memcpy(&desc->data[6], cf->data, cf->can_dlc);
}
/*
@@ -909,8 +915,8 @@ static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
stats->rx_errors++;
- stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
}
}
@@ -927,7 +933,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
struct net_device *dev = mod->ndev;
struct net_device_stats *stats = &dev->stats;
enum can_state state = mod->can.state;
- u8 status, isrc, rxerr, txerr;
+ u8 isrc, ecc, status, rxerr, txerr;
struct can_frame *cf;
struct sk_buff *skb;
@@ -943,15 +949,53 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
return -EINVAL;
}
- skb = alloc_can_err_skb(dev, &cf);
- if (skb == NULL)
- return -ENOMEM;
-
isrc = msg->data[0];
+ ecc = msg->data[2];
status = msg->data[3];
rxerr = msg->data[4];
txerr = msg->data[5];
+ /*
+ * This hardware lacks any support other than bus error messages to
+ * determine if packet transmission has failed.
+ *
+ * When TX errors happen, one echo skb needs to be dropped from the
+ * front of the queue.
+ *
+ * A small bit of code is duplicated here and below, to avoid error
+ * skb allocation when it will just be freed immediately.
+ */
+ if (isrc == CEVTIND_BEI) {
+ int ret;
+ dev_dbg(mod->dev, "bus error interrupt\n");
+
+ /* TX error */
+ if (!(ecc & ECC_DIR)) {
+ kfree_skb(skb_dequeue(&mod->echoq));
+ stats->tx_errors++;
+ } else {
+ stats->rx_errors++;
+ }
+
+ /*
+ * The controller automatically disables bus-error interrupts
+ * and therefore we must re-enable them.
+ */
+ ret = ican3_set_buserror(mod, 1);
+ if (ret) {
+ dev_err(mod->dev, "unable to re-enable bus-error\n");
+ return ret;
+ }
+
+ /* bus error reporting is off, return immediately */
+ if (!(mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ return 0;
+ }
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb == NULL)
+ return -ENOMEM;
+
/* data overrun interrupt */
if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
dev_dbg(mod->dev, "data overrun interrupt\n");
@@ -980,11 +1024,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* bus error interrupt */
if (isrc == CEVTIND_BEI) {
- u8 ecc = msg->data[2];
-
- dev_dbg(mod->dev, "bus error interrupt\n");
mod->can.can_stats.bus_error++;
- stats->rx_errors++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & ECC_MASK) {
@@ -1003,7 +1043,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
break;
}
- if ((ecc & ECC_DIR) == 0)
+ if (!(ecc & ECC_DIR))
cf->data[2] |= CAN_ERR_PROT_TX;
cf->data[6] = txerr;
@@ -1030,8 +1070,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
}
mod->can.state = state;
- stats->rx_errors++;
- stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
return 0;
}
@@ -1091,6 +1129,88 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
}
/*
+ * The ican3 needs to store all echo skbs, and therefore cannot
+ * use the generic infrastructure for this.
+ */
+static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
+{
+ struct sock *srcsk = skb->sk;
+
+ if (atomic_read(&skb->users) != 1) {
+ struct sk_buff *old_skb = skb;
+
+ skb = skb_clone(old_skb, GFP_ATOMIC);
+ kfree_skb(old_skb);
+ if (!skb)
+ return;
+ } else {
+ skb_orphan(skb);
+ }
+
+ skb->sk = srcsk;
+
+ /* save this skb for tx interrupt echo handling */
+ skb_queue_tail(&mod->echoq, skb);
+}
+
+static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
+{
+ struct sk_buff *skb = skb_dequeue(&mod->echoq);
+ struct can_frame *cf;
+ u8 dlc;
+
+ /* this should never trigger unless there is a driver bug */
+ if (!skb) {
+ netdev_err(mod->ndev, "BUG: echo skb not occupied\n");
+ return 0;
+ }
+
+ cf = (struct can_frame *)skb->data;
+ dlc = cf->can_dlc;
+
+ /* check flag whether this packet has to be looped back */
+ if (skb->pkt_type != PACKET_LOOPBACK) {
+ kfree_skb(skb);
+ return dlc;
+ }
+
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->dev = mod->ndev;
+ netif_receive_skb(skb);
+ return dlc;
+}
+
+/*
+ * Compare an skb with an existing echo skb
+ *
+ * This function will be used on devices which have a hardware loopback.
+ * On these devices, this function can be used to compare a received skb
+ * with the saved echo skbs so that the hardware echo skb can be dropped.
+ *
+ * Returns true if the skb's are identical, false otherwise.
+ */
+static bool ican3_echo_skb_matches(struct ican3_dev *mod, struct sk_buff *skb)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct sk_buff *echo_skb = skb_peek(&mod->echoq);
+ struct can_frame *echo_cf;
+
+ if (!echo_skb)
+ return false;
+
+ echo_cf = (struct can_frame *)echo_skb->data;
+ if (cf->can_id != echo_cf->can_id)
+ return false;
+
+ if (cf->can_dlc != echo_cf->can_dlc)
+ return false;
+
+ return memcmp(cf->data, echo_cf->data, cf->can_dlc) == 0;
+}
+
+/*
* Check that there is room in the TX ring to transmit another skb
*
* LOCKING: must hold mod->lock
@@ -1100,6 +1220,10 @@ static bool ican3_txok(struct ican3_dev *mod)
struct ican3_fast_desc __iomem *desc;
u8 control;
+ /* check that we have echo queue space */
+ if (skb_queue_len(&mod->echoq) >= ICAN3_TX_BUFFERS)
+ return false;
+
/* copy the control bits of the descriptor */
ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc));
@@ -1150,10 +1274,27 @@ static int ican3_recv_skb(struct ican3_dev *mod)
/* convert the ICAN3 frame into Linux CAN format */
ican3_to_can_frame(mod, &desc, cf);
- /* receive the skb, update statistics */
- netif_receive_skb(skb);
+ /*
+ * If this is an ECHO frame received from the hardware loopback
+ * feature, use the skb saved in the ECHO stack instead. This allows
+ * the Linux CAN core to support CAN_RAW_RECV_OWN_MSGS correctly.
+ *
+ * Since this is a confirmation of a successfully transmitted packet
+ * sent from this host, update the transmit statistics.
+ *
+ * Also, the netdevice queue needs to be allowed to send packets again.
+ */
+ if (ican3_echo_skb_matches(mod, skb)) {
+ stats->tx_packets++;
+ stats->tx_bytes += ican3_get_echo_skb(mod);
+ kfree_skb(skb);
+ goto err_noalloc;
+ }
+
+ /* update statistics, receive the skb */
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
err_noalloc:
/* toggle the valid bit and return the descriptor to the ring */
@@ -1176,13 +1317,13 @@ err_noalloc:
static int ican3_napi(struct napi_struct *napi, int budget)
{
struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi);
- struct ican3_msg msg;
unsigned long flags;
int received = 0;
int ret;
/* process all communication messages */
while (true) {
+ struct ican3_msg msg;
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
@@ -1325,7 +1466,7 @@ static int __devinit ican3_startup_module(struct ican3_dev *mod)
}
/* default to "bus errors enabled" */
- ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX);
+ ret = ican3_set_buserror(mod, 1);
if (ret) {
dev_err(mod->dev, "unable to set bus-error\n");
return ret;
@@ -1354,7 +1495,6 @@ static int __devinit ican3_startup_module(struct ican3_dev *mod)
static int ican3_open(struct net_device *ndev)
{
struct ican3_dev *mod = netdev_priv(ndev);
- u8 quota;
int ret;
/* open the CAN layer */
@@ -1364,19 +1504,6 @@ static int ican3_open(struct net_device *ndev)
return ret;
}
- /* set the bus error generation state appropriately */
- if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
- quota = ICAN3_BUSERR_QUOTA_MAX;
- else
- quota = 0;
-
- ret = ican3_set_buserror(mod, quota);
- if (ret) {
- dev_err(mod->dev, "unable to set bus-error\n");
- close_candev(ndev);
- return ret;
- }
-
/* bring the bus online */
ret = ican3_set_bus_state(mod, true);
if (ret) {
@@ -1408,6 +1535,9 @@ static int ican3_stop(struct net_device *ndev)
return ret;
}
+ /* drop all outstanding echo skbs */
+ skb_queue_purge(&mod->echoq);
+
/* close the CAN layer */
close_candev(ndev);
return 0;
@@ -1416,18 +1546,19 @@ static int ican3_stop(struct net_device *ndev)
static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ican3_dev *mod = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
struct ican3_fast_desc desc;
void __iomem *desc_addr;
unsigned long flags;
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
spin_lock_irqsave(&mod->lock, flags);
/* check that we can actually transmit */
if (!ican3_txok(mod)) {
- dev_err(mod->dev, "no free descriptors, stopping queue\n");
- netif_stop_queue(ndev);
+ dev_err(mod->dev, "BUG: no free descriptors\n");
spin_unlock_irqrestore(&mod->lock, flags);
return NETDEV_TX_BUSY;
}
@@ -1442,6 +1573,14 @@ static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
can_frame_to_ican3(mod, cf, &desc);
/*
+ * This hardware doesn't have TX-done notifications, so we'll try and
+ * emulate it the best we can using ECHO skbs. Add the skb to the ECHO
+ * stack. Upon packet reception, check if the ECHO skb and received
+ * skb match, and use that to wake the queue.
+ */
+ ican3_put_echo_skb(mod, skb);
+
+ /*
* the programming manual says that you must set the IVALID bit, then
* interrupt, then set the valid bit. Quite weird, but it seems to be
* required for this to work
@@ -1459,19 +1598,7 @@ static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
mod->fasttx_num = (desc.control & DESC_WRAP) ? 0
: (mod->fasttx_num + 1);
- /* update statistics */
- stats->tx_packets++;
- stats->tx_bytes += cf->can_dlc;
- kfree_skb(skb);
-
- /*
- * This hardware doesn't have TX-done notifications, so we'll try and
- * emulate it the best we can using ECHO skbs. Get the next TX
- * descriptor, and see if we have room to send. If not, stop the queue.
- * It will be woken when the ECHO skb for the current packet is recv'd.
- */
-
- /* copy the control bits of the descriptor */
+ /* if there is no free descriptor space, stop the transmit queue */
if (!ican3_txok(mod))
netif_stop_queue(ndev);
@@ -1490,7 +1617,7 @@ static const struct net_device_ops ican3_netdev_ops = {
*/
/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */
-static struct can_bittiming_const ican3_bittiming_const = {
+static const struct can_bittiming_const ican3_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -1667,6 +1794,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
mod->dev = &pdev->dev;
mod->num = pdata->modno;
netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ skb_queue_head_init(&mod->echoq);
spin_lock_init(&mod->lock);
init_completion(&mod->termination_comp);
init_completion(&mod->buserror_comp);
@@ -1687,7 +1815,8 @@ static int __devinit ican3_probe(struct platform_device *pdev)
mod->can.do_set_mode = ican3_set_mode;
mod->can.do_get_berr_counter = ican3_get_berr_counter;
mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
- | CAN_CTRLMODE_BERR_REPORTING;
+ | CAN_CTRLMODE_BERR_REPORTING
+ | CAN_CTRLMODE_ONE_SHOT;
/* find our IRQ number */
mod->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 346785c..a580db2 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -214,7 +214,7 @@ static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
module_param(mcp251x_enable_dma, int, S_IRUGO);
MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
-static struct can_bittiming_const mcp251x_bittiming_const = {
+static const struct can_bittiming_const mcp251x_bittiming_const = {
.name = DEVICE_NAME,
.tseg1_min = 3,
.tseg1_max = 16,
@@ -1020,8 +1020,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
GFP_DMA);
if (priv->spi_tx_buf) {
- priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf +
- (PAGE_SIZE / 2));
+ priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
(PAGE_SIZE / 2));
} else {
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5caa572..06adf88 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -251,7 +251,7 @@ static struct of_device_id mpc5xxx_can_table[];
static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
- struct mpc5xxx_can_data *data;
+ const struct mpc5xxx_can_data *data;
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
struct mscan_priv *priv;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 41a2a2d..2b104d5 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -34,7 +34,7 @@
#include "mscan.h"
-static struct can_bittiming_const mscan_bittiming_const = {
+static const struct can_bittiming_const mscan_bittiming_const = {
.name = "mscan",
.tseg1_min = 4,
.tseg1_max = 16,
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 1226297..48b3d62 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -184,7 +184,7 @@ struct pch_can_priv {
int use_msi;
};
-static struct can_bittiming_const pch_can_bittiming_const = {
+static const struct can_bittiming_const pch_can_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 2,
.tseg1_max = 16,
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 5e10472..4c4f33d 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -69,7 +69,7 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION(DRV_NAME "CAN netdevice driver");
-static struct can_bittiming_const sja1000_bittiming_const = {
+static const struct can_bittiming_const sja1000_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index a7c77c74..f2a221e 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -826,12 +826,12 @@ static __devinit int softing_pdev_probe(struct platform_device *pdev)
goto sysfs_failed;
}
- ret = -ENOMEM;
for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
card->net[j] = netdev =
softing_netdev_create(card, card->id.chip[j]);
if (!netdev) {
dev_alert(&pdev->dev, "failed to make can[%i]", j);
+ ret = -ENOMEM;
goto netdev_failed;
}
priv = netdev_priv(card->net[j]);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 4accd7e..527dbcf 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -196,7 +196,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */
/* CAN Bittiming constants as per HECC specs */
-static struct can_bittiming_const ti_hecc_bittiming_const = {
+static const struct can_bittiming_const ti_hecc_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 7ae65fc..086fa32 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -889,7 +889,7 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_start_xmit = ems_usb_start_xmit,
};
-static struct can_bittiming_const ems_usb_bittiming_const = {
+static const struct can_bittiming_const ems_usb_bittiming_const = {
.name = "ems_usb",
.tseg1_min = 1,
.tseg1_max = 16,
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 09b1da5..bd36e55 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -871,7 +871,7 @@ static const struct net_device_ops esd_usb2_netdev_ops = {
.ndo_start_xmit = esd_usb2_start_xmit,
};
-static struct can_bittiming_const esd_usb2_bittiming_const = {
+static const struct can_bittiming_const esd_usb2_bittiming_const = {
.name = "esd_usb2",
.tseg1_min = ESD_USB2_TSEG1_MIN,
.tseg1_max = ESD_USB2_TSEG1_MAX,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index a948c5a..4c775b6 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -45,7 +45,7 @@ struct peak_usb_adapter {
char *name;
u32 device_id;
struct can_clock clock;
- struct can_bittiming_const bittiming_const;
+ const struct can_bittiming_const bittiming_const;
unsigned int ctrl_count;
int (*intf_probe)(struct usb_interface *intf);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index ea2d942..4f93c0b 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -70,13 +70,12 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cfd->len;
- skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -86,7 +85,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
@@ -94,7 +93,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
stats->tx_packets++;
- stats->tx_bytes += cf->can_dlc;
+ stats->tx_bytes += cfd->len;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -108,7 +107,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cfd->len;
}
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -133,14 +132,28 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int vcan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static const struct net_device_ops vcan_netdev_ops = {
.ndo_start_xmit = vcan_tx,
+ .ndo_change_mtu = vcan_change_mtu,
};
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 9c755db..f0c8bd5 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1008,7 +1008,7 @@ e100_send_mdio_bit(unsigned char bit)
}
static unsigned char
-e100_receive_mdio_bit()
+e100_receive_mdio_bit(void)
{
unsigned char bit;
*R_NETWORK_MGM_CTRL = 0;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 442d91a..c260af5 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -40,18 +40,6 @@
static int numdummies = 1;
-static int dummy_set_address(struct net_device *dev, void *p)
-{
- struct sockaddr *sa = p;
-
- if (!is_valid_ether_addr(sa->sa_data))
- return -EADDRNOTAVAIL;
-
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
- return 0;
-}
-
/* fake multicast ability */
static void set_multicast_list(struct net_device *dev)
{
@@ -75,10 +63,10 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
dstats = per_cpu_ptr(dev->dstats, i);
do {
- start = u64_stats_fetch_begin(&dstats->syncp);
+ start = u64_stats_fetch_begin_bh(&dstats->syncp);
tbytes = dstats->tx_bytes;
tpackets = dstats->tx_packets;
- } while (u64_stats_fetch_retry(&dstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&dstats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpackets;
}
@@ -118,7 +106,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_set_mac_address = dummy_set_address,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
};
@@ -134,6 +122,7 @@ static void dummy_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
eth_hw_addr_random(dev);
@@ -187,8 +176,10 @@ static int __init dummy_init_module(void)
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
- for (i = 0; i < numdummies && !err; i++)
+ for (i = 0; i < numdummies && !err; i++) {
err = dummy_init_one();
+ cond_resched();
+ }
if (err < 0)
__rtnl_link_unregister(&dummy_link_ops);
rtnl_unlock();
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
index bf73e1a..2038eaa 100644
--- a/drivers/net/ethernet/3com/3c501.c
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -143,7 +143,7 @@ static int irq = 5;
static int mem_start;
/**
- * el1_probe: - probe for a 3c501
+ * el1_probe - probe for a 3c501
* @dev: The device structure passed in to probe.
*
* This can be called from two places. The network layer will probe using
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2e53867..e1219e0 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -162,6 +162,20 @@ config MAC8390
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
+config MCF8390
+ tristate "ColdFire NS8390 based Ethernet support"
+ depends on COLDFIRE
+ select CRC32
+ ---help---
+ This driver is for Ethernet devices using an NS8390-compatible
+ chipset on many common ColdFire CPU based boards. Many of the older
+ Freescale dev boards use this, and some other common boards like
+ some SnapGear routers do as well.
+
+ If you have one of these boards and want to use the network interface
+ on them then choose Y. To compile this driver as a module, choose M
+ here, the module will be called mcf8390.
+
config NE2000
tristate "NE2000/NE1000 support"
depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX)
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index d13790b..f43038b 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
obj-$(CONFIG_HPLAN) += hp.o 8390p.o
obj-$(CONFIG_HYDRA) += hydra.o 8390.o
obj-$(CONFIG_LNE390) += lne390.o 8390.o
+obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 9239592..912ed7a 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -454,7 +454,7 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
}
} else {
- ptrc = (char*)buf;
+ ptrc = buf;
for (cnt = 0; cnt < count; cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
new file mode 100644
index 0000000..230efd6
--- /dev/null
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -0,0 +1,480 @@
+/*
+ * Support for ColdFire CPU based boards using a NS8390 Ethernet device.
+ *
+ * Derived from the many other 8390 drivers.
+ *
+ * (C) Copyright 2012, Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <asm/mcf8390.h>
+
+static const char version[] =
+ "mcf8390.c: (15-06-2012) Greg Ungerer <gerg@uclinux.org>";
+
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset */
+#define NE_RESET 0x1f /* Issue a read to reset ,a write to clear */
+#define NE_EN0_ISR 0x07
+#define NE_EN0_DCFG 0x0e
+#define NE_EN0_RSARLO 0x08
+#define NE_EN0_RSARHI 0x09
+#define NE_EN0_RCNTLO 0x0a
+#define NE_EN0_RXCR 0x0c
+#define NE_EN0_TXCR 0x0d
+#define NE_EN0_RCNTHI 0x0b
+#define NE_EN0_IMR 0x0f
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#ifdef NE2000_ODDOFFSET
+/*
+ * A lot of the ColdFire boards use a separate address region for odd offset
+ * register addresses. The following functions convert and map as required.
+ * Note that the data port accesses are treated a little differently, and
+ * always accessed via the insX/outsX functions.
+ */
+static inline u32 NE_PTR(u32 addr)
+{
+ if (addr & 1)
+ return addr - 1 + NE2000_ODDOFFSET;
+ return addr;
+}
+
+static inline u32 NE_DATA_PTR(u32 addr)
+{
+ return addr;
+}
+
+void ei_outb(u32 val, u32 addr)
+{
+ NE2000_BYTE *rp;
+
+ rp = (NE2000_BYTE *) NE_PTR(addr);
+ *rp = RSWAP(val);
+}
+
+#define ei_inb ei_inb
+u8 ei_inb(u32 addr)
+{
+ NE2000_BYTE *rp, val;
+
+ rp = (NE2000_BYTE *) NE_PTR(addr);
+ val = *rp;
+ return (u8) (RSWAP(val) & 0xff);
+}
+
+void ei_insb(u32 addr, void *vbuf, int len)
+{
+ NE2000_BYTE *rp, val;
+ u8 *buf;
+
+ buf = (u8 *) vbuf;
+ rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ val = *rp;
+ *buf++ = RSWAP(val);
+ }
+}
+
+void ei_insw(u32 addr, void *vbuf, int len)
+{
+ volatile u16 *rp;
+ u16 w, *buf;
+
+ buf = (u16 *) vbuf;
+ rp = (volatile u16 *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ w = *rp;
+ *buf++ = BSWAP(w);
+ }
+}
+
+void ei_outsb(u32 addr, const void *vbuf, int len)
+{
+ NE2000_BYTE *rp, val;
+ u8 *buf;
+
+ buf = (u8 *) vbuf;
+ rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ val = *buf++;
+ *rp = RSWAP(val);
+ }
+}
+
+void ei_outsw(u32 addr, const void *vbuf, int len)
+{
+ volatile u16 *rp;
+ u16 w, *buf;
+
+ buf = (u16 *) vbuf;
+ rp = (volatile u16 *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ w = *buf++;
+ *rp = BSWAP(w);
+ }
+}
+
+#else /* !NE2000_ODDOFFSET */
+
+#define ei_inb inb
+#define ei_outb outb
+#define ei_insb insb
+#define ei_insw insw
+#define ei_outsb outsb
+#define ei_outsw outsw
+
+#endif /* !NE2000_ODDOFFSET */
+
+#define ei_inb_p ei_inb
+#define ei_outb_p ei_outb
+
+#include "lib8390.c"
+
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
+static void mcf8390_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+ u32 addr = dev->base_addr;
+
+ if (ei_debug > 1)
+ netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+
+ ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RESET) == 0) {
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
+ netdev_warn(dev, "%s: did not complete\n", __func__);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RESET, addr + NE_EN0_ISR);
+}
+
+/*
+ * This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+static void mcf8390_dmaing_err(const char *func, struct net_device *dev,
+ struct ei_device *ei_local)
+{
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ func, ei_local->dmaing, ei_local->irqlock);
+}
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
+static void mcf8390_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+ ei_outb(sizeof(struct e8390_pkt_hdr), addr + NE_EN0_RCNTLO);
+ ei_outb(0, addr + NE_EN0_RCNTHI);
+ ei_outb(0, addr + NE_EN0_RSARLO); /* On page boundary */
+ ei_outb(ring_page, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
+
+ ei_insw(addr + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr) >> 1);
+
+ outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+
+ hdr->count = cpu_to_le16(hdr->count);
+}
+
+/*
+ * Block input and output, similar to the Crynwr packet driver.
+ * If you are porting to a new ethercard, look at the packet driver source
+ * for hints. The NEx000 doesn't share the on-board packet memory --
+ * you have to put the packet out through the "remote DMA" dataport
+ * using z_writeb.
+ */
+static void mcf8390_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+ char *buf = skb->data;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+ ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
+ ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
+ ei_outb(ring_offset & 0xff, addr + NE_EN0_RSARLO);
+ ei_outb(ring_offset >> 8, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
+
+ ei_insw(addr + NE_DATAPORT, buf, count >> 1);
+ if (count & 1)
+ buf[count - 1] = ei_inb(addr + NE_DATAPORT);
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+}
+
+static void mcf8390_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+ unsigned long dma_start;
+
+ /* Make sure we transfer all bytes if 16bit IO writes */
+ if (count & 0x1)
+ count++;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, addr + NE_CMD);
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
+ ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
+ ei_outb(0x00, addr + NE_EN0_RSARLO);
+ ei_outb(start_page, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RWRITE + E8390_START, addr + NE_CMD);
+
+ ei_outsw(addr + NE_DATAPORT, buf, count >> 1);
+
+ dma_start = jiffies;
+ while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
+ netdev_err(dev, "timeout waiting for Tx RDC\n");
+ mcf8390_reset_8390(dev);
+ __NS8390_init(dev, 1);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+}
+
+static const struct net_device_ops mcf8390_netdev_ops = {
+ .ndo_open = __ei_open,
+ .ndo_stop = __ei_close,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static int mcf8390_init(struct net_device *dev)
+{
+ static u32 offsets[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ };
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char SA_prom[32];
+ u32 addr = dev->base_addr;
+ int start_page, stop_page;
+ int i, ret;
+
+ mcf8390_reset_8390(dev);
+
+ /*
+ * Read the 16 bytes of station address PROM.
+ * We must first initialize registers,
+ * similar to NS8390_init(eifdev, 0).
+ * We can't reliably read the SAPROM address without this.
+ * (I learned the hard way!).
+ */
+ {
+ static const struct {
+ u32 value;
+ u32 offset;
+ } program_seq[] = {
+ {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD},
+ /* Select page 0 */
+ {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD + E8390_START, NE_CMD},
+ };
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
+ ei_outb(program_seq[i].value,
+ addr + program_seq[i].offset);
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ SA_prom[i] = ei_inb(addr + NE_DATAPORT);
+ ei_inb(addr + NE_DATAPORT);
+ }
+
+ /* We must set the 8390 for word mode. */
+ ei_outb(0x49, addr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ /* Install the Interrupt handler */
+ ret = request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
+
+ netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
+
+ ei_local->name = "mcf8390";
+ ei_local->tx_start_page = start_page;
+ ei_local->stop_page = stop_page;
+ ei_local->word16 = 1;
+ ei_local->rx_start_page = start_page + TX_PAGES;
+ ei_local->reset_8390 = mcf8390_reset_8390;
+ ei_local->block_input = mcf8390_block_input;
+ ei_local->block_output = mcf8390_block_output;
+ ei_local->get_8390_hdr = mcf8390_get_8390_hdr;
+ ei_local->reg_offset = offsets;
+
+ dev->netdev_ops = &mcf8390_netdev_ops;
+ __NS8390_init(dev, 0);
+ ret = register_netdev(dev);
+ if (ret) {
+ free_irq(dev->irq, dev);
+ return ret;
+ }
+
+ netdev_info(dev, "addr=0x%08x irq=%d, Ethernet Address %pM\n",
+ addr, dev->irq, dev->dev_addr);
+ return 0;
+}
+
+static int mcf8390_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct ei_device *ei_local;
+ struct resource *mem, *irq;
+ resource_size_t msize;
+ int ret;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq == NULL) {
+ dev_err(&pdev->dev, "no IRQ specified?\n");
+ return -ENXIO;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "no memory address specified?\n");
+ return -ENXIO;
+ }
+ msize = resource_size(mem);
+ if (!request_mem_region(mem->start, msize, pdev->name))
+ return -EBUSY;
+
+ dev = ____alloc_ei_netdev(0);
+ if (dev == NULL) {
+ release_mem_region(mem->start, msize);
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ ei_local = netdev_priv(dev);
+
+ dev->irq = irq->start;
+ dev->base_addr = mem->start;
+
+ ret = mcf8390_init(dev);
+ if (ret) {
+ release_mem_region(mem->start, msize);
+ free_netdev(dev);
+ return ret;
+ }
+ return 0;
+}
+
+static int mcf8390_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ unregister_netdev(dev);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem)
+ release_mem_region(mem->start, resource_size(mem));
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver mcf8390_drv = {
+ .driver = {
+ .name = "mcf8390",
+ .owner = THIS_MODULE,
+ },
+ .probe = mcf8390_probe,
+ .remove = mcf8390_remove,
+};
+
+module_platform_driver(mcf8390_drv);
+
+MODULE_DESCRIPTION("MCF8390 ColdFire NS8390 driver");
+MODULE_AUTHOR("Greg Ungerer <gerg@uclinux.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mcf8390");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 3485011..9c77c73 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1014,7 +1014,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
struct greth_regs *regs;
greth = netdev_priv(dev);
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -1036,7 +1036,7 @@ static void greth_set_hash_filter(struct net_device *dev)
{
struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
u32 mc_filter[2];
unsigned int bitnr;
@@ -1055,7 +1055,7 @@ static void greth_set_multicast_list(struct net_device *dev)
{
int cfg;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
cfg = GRETH_REGLOAD(regs->control);
if (dev->flags & IFF_PROMISC)
@@ -1414,7 +1414,7 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
goto error1;
}
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
greth->irq = ofdev->archdata.irqs[0];
dev_set_drvdata(greth->dev, dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 75299f5..7203b52 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -623,7 +623,7 @@ static int lance_rx(struct net_device *dev)
skb_put(skb, len); /* make room */
cp_from_buf(lp->type, skb->data,
- (char *)lp->rx_buf_ptr_cpu[entry], len);
+ lp->rx_buf_ptr_cpu[entry], len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -919,7 +919,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
- cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
+ cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
/* Now, give the packet to the lance */
*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index a6e2e84..5c72843 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -873,10 +873,9 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
lp->rx_skbuff[i] = skb;
- if (skb) {
- skb->dev = dev;
+ if (skb)
rx_buff = skb->data;
- } else
+ else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
if (rx_buff == NULL)
lp->rx_ring[i].base = 0;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index ab7ff86..a92ddee7 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -228,7 +228,7 @@ static int __devinit mace_probe(struct platform_device *pdev)
* bits are reversed.
*/
- addr = (void *)MACE_PROM;
+ addr = MACE_PROM;
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index ff9c738..21e261f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -199,7 +199,7 @@ int atl1c_read_mac_addr(struct atl1c_hw *hw)
err = atl1c_get_permanent_address(hw);
if (err)
- random_ether_addr(hw->perm_mac_addr);
+ eth_random_addr(hw->perm_mac_addr);
memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
return err;
@@ -602,7 +602,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
int atl1c_phy_init(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
@@ -696,7 +696,7 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
/* select one link mode to get lower power consumption */
int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret = 0;
u16 autoneg_advertised = ADVERTISED_10baseT_Half;
@@ -768,7 +768,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 master_ctrl, mac_ctrl, phy_ctrl;
u32 wol_ctrl, speed;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 17d935b..21d8c4d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -74,6 +74,8 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
#define L2CB_V10 0xc0
#define L2CB_V11 0xc1
+#define L2CB_V20 0xc0
+#define L2CB_V21 0xc1
/* register definition */
#define REG_DEVICE_CAP 0x5C
@@ -87,6 +89,9 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define LINK_CTRL_L1_EN 0x02
#define LINK_CTRL_EXT_SYNC 0x80
+#define REG_PCIE_IND_ACC_ADDR 0x80
+#define REG_PCIE_IND_ACC_DATA 0x84
+
#define REG_DEV_SERIALNUM_CTRL 0x200
#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
#define REG_DEV_MAC_SEL_SHIFT 0
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 9cc1570..1bf5bbf 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -166,7 +166,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
msleep(5);
}
-/*
+/**
* atl1c_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -179,7 +179,7 @@ static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -192,7 +192,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/*
+/**
* atl1c_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
@@ -220,7 +220,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
return data;
}
-/*
+/**
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
if ((phy_data & BMSR_LSTATUS) == 0) {
/* link down */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
hw->hibernate = true;
if (atl1c_reset_mac(hw) != 0)
if (netif_msg_hw(adapter))
@@ -361,7 +360,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
}
-/*
+/**
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -374,7 +373,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->common_task);
}
-/*
+/**
* atl1c_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -453,7 +452,7 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-/*
+/**
* atl1c_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -518,7 +517,7 @@ static int atl1c_set_features(struct net_device *netdev,
return 0;
}
-/*
+/**
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -577,12 +576,6 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
atl1c_write_phy_reg(&adapter->hw, reg_num, val);
}
-/*
- * atl1c_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1c_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -633,12 +626,6 @@ out:
return retval;
}
-/*
- * atl1c_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -651,7 +638,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atl1c_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -740,6 +727,8 @@ static const struct atl1c_platform_patch plats[] __devinitdata = {
static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
{
+ struct pci_dev *pdev = hw->adapter->pdev;
+ u32 misc_ctrl;
int i = 0;
hw->msi_lnkpatch = false;
@@ -754,8 +743,20 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
}
i++;
}
+
+ if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
+ hw->revision_id == L2CB_V21) {
+ /* config acess mode */
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
+ REG_PCIE_DEV_MISC_CTRL);
+ pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
+ misc_ctrl &= ~0x100;
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
+ REG_PCIE_DEV_MISC_CTRL);
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl);
+ }
}
-/*
+/**
* atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
* @adapter: board private structure to initialize
*
@@ -781,7 +782,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
- AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision);
hw->revision_id = revision & 0xFF;
/* before link up, we assume hibernate is true */
hw->hibernate = true;
@@ -853,7 +854,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
}
-/*
+/**
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
@@ -878,7 +879,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
tpd_ring->next_to_use = 0;
}
-/*
+/**
* atl1c_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
@@ -931,7 +932,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -954,7 +955,7 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -989,12 +990,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
}
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += tpd_ring[i].count;
}
rfd_ring->buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += rfd_ring->count;
rx_desc_count += rfd_ring->count;
@@ -1227,7 +1228,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
*/
static int atl1c_reset_mac(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 ctrl_data = 0;
@@ -1363,7 +1364,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
return;
}
-/*
+/**
* atl1c_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1477,7 +1478,7 @@ static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
@@ -1531,8 +1532,7 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
- struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
- &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@ -1559,11 +1559,10 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
return true;
}
-/*
+/**
* atl1c_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1c_intr(int irq, void *data)
{
@@ -1814,9 +1813,8 @@ rrs_checked:
atl1c_alloc_rx_buffer(adapter);
}
-/*
+/**
* atl1c_clean - NAPI Rx polling callback
- * @adapter: board private structure
*/
static int atl1c_clean(struct napi_struct *napi, int budget)
{
@@ -2271,7 +2269,7 @@ static void atl1c_down(struct atl1c_adapter *adapter)
atl1c_reset_dma_ring(adapter);
}
-/*
+/**
* atl1c_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2310,7 +2308,7 @@ err_up:
return err;
}
-/*
+/**
* atl1c_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2433,7 +2431,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
return 0;
}
-/*
+/**
* atl1c_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1c_pci_tbl
@@ -2580,7 +2578,7 @@ err_dma:
return err;
}
-/*
+/**
* atl1c_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -2606,7 +2604,7 @@ static void __devexit atl1c_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-/*
+/**
* atl1c_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
@@ -2634,7 +2632,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-/*
+/**
* atl1c_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
@@ -2662,7 +2660,7 @@ static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/*
+/**
* atl1c_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
@@ -2705,7 +2703,7 @@ static struct pci_driver atl1c_driver = {
.driver.pm = &atl1c_pm_ops,
};
-/*
+/**
* atl1c_init_module - Driver Registration Routine
*
* atl1c_init_module is the first routine called when the driver is
@@ -2716,7 +2714,7 @@ static int __init atl1c_init_module(void)
return pci_register_driver(&atl1c_driver);
}
-/*
+/**
* atl1c_exit_module - Driver Exit Cleanup Routine
*
* atl1c_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6e61f9f..82b2386 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -268,7 +268,7 @@ static int atl1e_set_eeprom(struct net_device *netdev,
if (eeprom_buff == NULL)
return -ENOMEM;
- ptr = (u32 *)eeprom_buff;
+ ptr = eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1220e51..a98acc8 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -89,7 +89,7 @@ static const u16 atl1e_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
};
-/*
+/**
* atl1e_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -102,7 +102,7 @@ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -114,7 +114,7 @@ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/*
+/**
* atl1e_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
@@ -126,7 +126,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
AT_WRITE_FLUSH(&adapter->hw);
}
-/*
+/**
* atl1e_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -210,7 +210,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_link_chg_task - deal with link change event Out of interrupt context
* @netdev: network interface device structure
*/
@@ -259,7 +259,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
cancel_work_sync(&adapter->link_chg_task);
}
-/*
+/**
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -271,7 +271,7 @@ static void atl1e_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl1e_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -345,7 +345,7 @@ static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-/*
+/**
* atl1e_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -397,7 +397,7 @@ static int atl1e_set_features(struct net_device *netdev,
return 0;
}
-/*
+/**
* atl1e_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -449,12 +449,6 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
}
-/*
- * atl1e_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1e_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -505,12 +499,6 @@ out:
}
-/*
- * atl1e_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -541,7 +529,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev)
msleep(1);
}
-/*
+/**
* atl1e_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -551,7 +539,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
* @adapter: board private structure to initialize
*
@@ -635,14 +623,13 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
struct pci_dev *pdev = adapter->pdev;
u16 index, ring_count;
@@ -679,14 +666,14 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
ring_count);
}
-/*
+/**
* atl1e_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
{
struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
+ &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
u16 i, j;
@@ -762,7 +749,7 @@ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -787,7 +774,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -884,14 +871,12 @@ failed:
return err;
}
-static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
+static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
- struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
- struct atl1e_tx_ring *tx_ring =
- (struct atl1e_tx_ring *)&adapter->tx_ring;
+ struct atl1e_hw *hw = &adapter->hw;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_rx_page_desc *rx_page_desc = NULL;
int i, j;
@@ -932,7 +917,7 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 dev_ctrl_data = 0;
u32 max_pay_load = 0;
u32 jumbo_thresh = 0;
@@ -975,7 +960,7 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 rxf_len = 0;
u32 rxf_low = 0;
u32 rxf_high = 0;
@@ -1078,7 +1063,7 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
AT_WRITE_REG(hw, REG_MAC_CTRL, value);
}
-/*
+/**
* atl1e_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1148,7 +1133,7 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
@@ -1224,8 +1209,7 @@ static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
@@ -1261,11 +1245,10 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
return true;
}
-/*
+/**
* atl1e_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1e_intr(int irq, void *data)
{
@@ -1384,15 +1367,14 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
(struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
u8 rx_using = rx_page_desc[que].rx_using;
- return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]);
+ return &(rx_page_desc[que].rx_page[rx_using]);
}
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
struct net_device *netdev = adapter->netdev;
- struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
- &adapter->rx_ring;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc =
(struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
struct sk_buff *skb = NULL;
@@ -1494,9 +1476,8 @@ fatal_err:
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl1e_clean - NAPI Rx polling callback
- * @adapter: board private structure
*/
static int atl1e_clean(struct napi_struct *napi, int budget)
{
@@ -1576,7 +1557,7 @@ static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
tx_ring->next_to_use = 0;
memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
- return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
+ return &tx_ring->desc[next_to_use];
}
static struct atl1e_tx_buffer *
@@ -1961,7 +1942,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
atl1e_clean_rx_ring(adapter);
}
-/*
+/**
* atl1e_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2007,7 +1988,7 @@ err_req_irq:
return err;
}
-/*
+/**
* atl1e_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2061,8 +2042,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc) {
/* get link status */
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
mii_advertise_data = ADVERTISE_10HALF;
@@ -2086,7 +2067,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
msleep(100);
atl1e_read_phy_reg(hw, MII_BMSR,
- (u16 *)&mii_bmsr_data);
+ &mii_bmsr_data);
if (mii_bmsr_data & BMSR_LSTATUS)
break;
}
@@ -2243,7 +2224,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
return 0;
}
-/*
+/**
* atl1e_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1e_pci_tbl
@@ -2397,7 +2378,7 @@ err_dma:
return err;
}
-/*
+/**
* atl1e_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -2429,7 +2410,7 @@ static void __devexit atl1e_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/*
+/**
* atl1e_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
@@ -2457,7 +2438,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return PCI_ERS_RESULT_NEED_RESET;
}
-/*
+/**
* atl1e_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
@@ -2484,7 +2465,7 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/*
+/**
* atl1e_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
@@ -2528,7 +2509,7 @@ static struct pci_driver atl1e_driver = {
.err_handler = &atl1e_err_handler
};
-/*
+/**
* atl1e_init_module - Driver Registration Routine
*
* atl1e_init_module is the first routine called when the driver is
@@ -2539,7 +2520,7 @@ static int __init atl1e_init_module(void)
return pci_register_driver(&atl1e_driver);
}
-/*
+/**
* atl1e_exit_module - Driver Exit Cleanup Routine
*
* atl1e_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index 0ce60b6..b5086f1 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -168,7 +168,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
return -1;
}
-/*
+/**
* atl1e_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5d10884..7bae2ad 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -195,7 +195,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
return -1;
}
-/*
+/**
* atl1_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
@@ -538,7 +538,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
u16 i;
if (atl1_get_permanent_address(hw)) {
- random_ether_addr(hw->perm_mac_addr);
+ eth_random_addr(hw->perm_mac_addr);
ret = 1;
}
@@ -937,7 +937,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw)
iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
}
-/*
+/**
* atl1_sw_init - Initialize general software structures (struct atl1_adapter)
* @adapter: board private structure to initialize
*
@@ -1014,12 +1014,6 @@ static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
atl1_write_phy_reg(&adapter->hw, reg_num, val);
}
-/*
- * atl1_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
@@ -1036,7 +1030,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return retval;
}
-/*
+/**
* atl1_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -1061,7 +1055,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
goto err_nomem;
}
rfd_ring->buffer_info =
- (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+ (tpd_ring->buffer_info + tpd_ring->count);
/*
* real ring DMA buffer
@@ -1147,7 +1141,7 @@ static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
atomic_set(&rrd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_clean_rx_ring - Free RFD Buffers
* @adapter: board private structure
*/
@@ -1187,7 +1181,7 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
atomic_set(&rrd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_clean_tx_ring - Free Tx Buffers
* @adapter: board private structure
*/
@@ -1227,7 +1221,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
atomic_set(&tpd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -1470,7 +1464,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw)
iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
}
-/*
+/**
* atl1_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1844,7 +1838,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
}
}
-/*
+/**
* atl1_alloc_rx_buffers - Replace used receive buffers
* @adapter: address of board private structure
*/
@@ -2489,11 +2483,10 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
return 1;
}
-/*
+/**
* atl1_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1_intr(int irq, void *data)
{
@@ -2574,7 +2567,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
}
-/*
+/**
* atl1_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -2693,7 +2686,7 @@ static void atl1_reset_dev_task(struct work_struct *work)
netif_device_attach(netdev);
}
-/*
+/**
* atl1_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2727,7 +2720,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-/*
+/**
* atl1_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2762,7 +2755,7 @@ err_up:
return err;
}
-/*
+/**
* atl1_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2930,7 +2923,7 @@ static const struct net_device_ops atl1_netdev_ops = {
#endif
};
-/*
+/**
* atl1_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1_pci_tbl
@@ -3111,7 +3104,7 @@ err_request_regions:
return err;
}
-/*
+/**
* atl1_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -3158,7 +3151,7 @@ static struct pci_driver atl1_driver = {
.driver.pm = ATL1_PM_OPS,
};
-/*
+/**
* atl1_exit_module - Driver Exit Cleanup Routine
*
* atl1_exit_module is called just before the driver is removed
@@ -3169,7 +3162,7 @@ static void __exit atl1_exit_module(void)
pci_unregister_driver(&atl1_driver);
}
-/*
+/**
* atl1_init_module - Driver Registration Routine
*
* atl1_init_module is the first routine called when the driver is
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 6762dc4..57d64b8 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -75,7 +75,7 @@ static void atl2_set_ethtool_ops(struct net_device *netdev);
static void atl2_check_options(struct atl2_adapter *adapter);
-/*
+/**
* atl2_sw_init - Initialize general software structures (struct atl2_adapter)
* @adapter: board private structure to initialize
*
@@ -123,7 +123,7 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -177,7 +177,7 @@ static void init_ring_ptrs(struct atl2_adapter *adapter)
adapter->txs_next_clear = 0;
}
-/*
+/**
* atl2_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -283,7 +283,7 @@ static int atl2_configure(struct atl2_adapter *adapter)
return value;
}
-/*
+/**
* atl2_setup_ring_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -340,7 +340,7 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -350,7 +350,7 @@ static inline void atl2_irq_enable(struct atl2_adapter *adapter)
ATL2_WRITE_FLUSH(&adapter->hw);
}
-/*
+/**
* atl2_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -599,11 +599,10 @@ static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
spin_unlock(&adapter->stats_lock);
}
-/*
+/**
* atl2_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl2_intr(int irq, void *data)
{
@@ -679,7 +678,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
netdev);
}
-/*
+/**
* atl2_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -692,7 +691,7 @@ static void atl2_free_ring_resources(struct atl2_adapter *adapter)
adapter->ring_dma);
}
-/*
+/**
* atl2_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -798,7 +797,7 @@ static void atl2_free_irq(struct atl2_adapter *adapter)
#endif
}
-/*
+/**
* atl2_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -918,7 +917,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/*
+/**
* atl2_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -943,7 +942,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-/*
+/**
* atl2_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -969,12 +968,6 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
return 0;
}
-/*
- * atl2_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
@@ -1011,12 +1004,6 @@ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return 0;
}
-/*
- * atl2_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -1033,7 +1020,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -1045,7 +1032,7 @@ static void atl2_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -1070,7 +1057,7 @@ static void atl2_watchdog(unsigned long data)
}
}
-/*
+/**
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -1274,9 +1261,8 @@ static int atl2_check_link(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_link_chg_task - deal with link change event Out of interrupt context
- * @netdev: network interface device structure
*/
static void atl2_link_chg_task(struct work_struct *work)
{
@@ -1341,7 +1327,7 @@ static const struct net_device_ops atl2_netdev_ops = {
#endif
};
-/*
+/**
* atl2_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl2_pci_tbl
@@ -1501,7 +1487,7 @@ err_dma:
return err;
}
-/*
+/**
* atl2_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -1728,7 +1714,7 @@ static struct pci_driver atl2_driver = {
.shutdown = atl2_shutdown,
};
-/*
+/**
* atl2_init_module - Driver Registration Routine
*
* atl2_init_module is the first routine called when the driver is
@@ -1743,7 +1729,7 @@ static int __init atl2_init_module(void)
}
module_init(atl2_init_module);
-/*
+/**
* atl2_exit_module - Driver Exit Cleanup Routine
*
* atl2_exit_module is called just before the driver is removed
@@ -2360,7 +2346,7 @@ static s32 atl2_read_mac_addr(struct atl2_hw *hw)
{
if (get_permanent_address(hw)) {
/* for test */
- /* FIXME: shouldn't we use random_ether_addr() here? */
+ /* FIXME: shouldn't we use eth_random_addr() here? */
hw->perm_mac_addr[0] = 0x00;
hw->perm_mac_addr[1] = 0x13;
hw->perm_mac_addr[2] = 0x74;
@@ -2997,7 +2983,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
return -1;
}
-/*
+/**
* atl2_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index b4f3aa4..77ffbc4 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -64,7 +64,7 @@ static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atlx_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -115,7 +115,7 @@ static void atlx_check_for_link(struct atlx_adapter *adapter)
schedule_work(&adapter->link_chg_task);
}
-/*
+/**
* atlx_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -162,7 +162,7 @@ static inline void atlx_imr_set(struct atlx_adapter *adapter,
ioread32(adapter->hw.hw_addr + REG_IMR);
}
-/*
+/**
* atlx_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -172,7 +172,7 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
adapter->int_enabled = true;
}
-/*
+/**
* atlx_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -193,7 +193,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-/*
+/**
* atlx_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 46b8b7d..9786c0e 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -483,9 +483,11 @@ out:
static void b44_stats_update(struct b44 *bp)
{
unsigned long reg;
- u32 *val;
+ u64 *val;
val = &bp->hw_stats.tx_good_octets;
+ u64_stats_update_begin(&bp->hw_stats.syncp);
+
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
@@ -496,6 +498,8 @@ static void b44_stats_update(struct b44 *bp)
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
+
+ u64_stats_update_end(&bp->hw_stats.syncp);
}
static void b44_link_report(struct b44 *bp)
@@ -656,7 +660,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dma_unmap_single(bp->sdev->dma_dev, mapping,
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
- skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+ skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
if (skb == NULL)
return -ENOMEM;
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +971,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
DMA_TO_DEVICE);
- bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;
@@ -1635,44 +1639,49 @@ static int b44_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *b44_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct b44 *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &dev->stats;
struct b44_hw_stats *hwstat = &bp->hw_stats;
-
- /* Convert HW stats into netdevice stats. */
- nstat->rx_packets = hwstat->rx_pkts;
- nstat->tx_packets = hwstat->tx_pkts;
- nstat->rx_bytes = hwstat->rx_octets;
- nstat->tx_bytes = hwstat->tx_octets;
- nstat->tx_errors = (hwstat->tx_jabber_pkts +
- hwstat->tx_oversize_pkts +
- hwstat->tx_underruns +
- hwstat->tx_excessive_cols +
- hwstat->tx_late_cols);
- nstat->multicast = hwstat->tx_multicast_pkts;
- nstat->collisions = hwstat->tx_total_cols;
-
- nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
- hwstat->rx_undersize);
- nstat->rx_over_errors = hwstat->rx_missed_pkts;
- nstat->rx_frame_errors = hwstat->rx_align_errs;
- nstat->rx_crc_errors = hwstat->rx_crc_errs;
- nstat->rx_errors = (hwstat->rx_jabber_pkts +
- hwstat->rx_oversize_pkts +
- hwstat->rx_missed_pkts +
- hwstat->rx_crc_align_errs +
- hwstat->rx_undersize +
- hwstat->rx_crc_errs +
- hwstat->rx_align_errs +
- hwstat->rx_symbol_errs);
-
- nstat->tx_aborted_errors = hwstat->tx_underruns;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+
+ /* Convert HW stats into rtnl_link_stats64 stats. */
+ nstat->rx_packets = hwstat->rx_pkts;
+ nstat->tx_packets = hwstat->tx_pkts;
+ nstat->rx_bytes = hwstat->rx_octets;
+ nstat->tx_bytes = hwstat->tx_octets;
+ nstat->tx_errors = (hwstat->tx_jabber_pkts +
+ hwstat->tx_oversize_pkts +
+ hwstat->tx_underruns +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_late_cols);
+ nstat->multicast = hwstat->tx_multicast_pkts;
+ nstat->collisions = hwstat->tx_total_cols;
+
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_undersize);
+ nstat->rx_over_errors = hwstat->rx_missed_pkts;
+ nstat->rx_frame_errors = hwstat->rx_align_errs;
+ nstat->rx_crc_errors = hwstat->rx_crc_errs;
+ nstat->rx_errors = (hwstat->rx_jabber_pkts +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_missed_pkts +
+ hwstat->rx_crc_align_errs +
+ hwstat->rx_undersize +
+ hwstat->rx_crc_errs +
+ hwstat->rx_align_errs +
+ hwstat->rx_symbol_errs);
+
+ nstat->tx_aborted_errors = hwstat->tx_underruns;
#if 0
- /* Carrier lost counter seems to be broken for some devices */
- nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+ /* Carrier lost counter seems to be broken for some devices */
+ nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
+ } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
return nstat;
}
@@ -1993,17 +2002,24 @@ static void b44_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct b44 *bp = netdev_priv(dev);
- u32 *val = &bp->hw_stats.tx_good_octets;
+ struct b44_hw_stats *hwstat = &bp->hw_stats;
+ u64 *data_src, *data_dst;
+ unsigned int start;
u32 i;
spin_lock_irq(&bp->lock);
-
b44_stats_update(bp);
+ spin_unlock_irq(&bp->lock);
- for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
- *data++ = *val++;
+ do {
+ data_src = &hwstat->tx_good_octets;
+ data_dst = data;
+ start = u64_stats_fetch_begin_bh(&hwstat->syncp);
- spin_unlock_irq(&bp->lock);
+ for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
+ *data_dst++ = *data_src++;
+
+ } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2113,7 +2129,7 @@ static const struct net_device_ops b44_netdev_ops = {
.ndo_open = b44_open,
.ndo_stop = b44_close,
.ndo_start_xmit = b44_start_xmit,
- .ndo_get_stats = b44_get_stats,
+ .ndo_get_stats64 = b44_get_stats64,
.ndo_set_rx_mode = b44_set_rx_mode,
.ndo_set_mac_address = b44_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index e1905a4..8993d72 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -338,9 +338,10 @@ struct ring_info {
* the layout
*/
struct b44_hw_stats {
-#define _B44(x) u32 x;
+#define _B44(x) u64 x;
B44_STAT_REG_DECLARE
#undef _B44
+ struct u64_stats_sync syncp;
};
struct ssb_device;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ac7b744..79cebd8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/stringify.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
@@ -57,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.1"
-#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define DRV_MODULE_VERSION "2.2.3"
+#define DRV_MODULE_RELDATE "June 27, 2012"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -872,8 +873,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
bnapi = &bp->bnx2_napi[i];
- sblk = (void *) (status_blk +
- BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->status_blk.msix = sblk;
bnapi->hw_tx_cons_ptr =
&sblk->status_tx_quick_consumer_index;
@@ -1972,22 +1972,26 @@ bnx2_remote_phy_event(struct bnx2 *bp)
switch (speed) {
case BNX2_LINK_STATUS_10HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_10FULL:
bp->line_speed = SPEED_10;
break;
case BNX2_LINK_STATUS_100HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_100BASE_T4:
case BNX2_LINK_STATUS_100FULL:
bp->line_speed = SPEED_100;
break;
case BNX2_LINK_STATUS_1000HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_1000FULL:
bp->line_speed = SPEED_1000;
break;
case BNX2_LINK_STATUS_2500HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_2500FULL:
bp->line_speed = SPEED_2500;
break;
@@ -2473,6 +2477,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
pr_cont(" condition[%08x]\n",
bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+ DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
DP_SHMEM_LINE(bp, 0x3cc);
DP_SHMEM_LINE(bp, 0x3dc);
DP_SHMEM_LINE(bp, 0x3ec);
@@ -5372,7 +5377,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
int k, last;
if (skb == NULL) {
- j++;
+ j = NEXT_TX_BD(j);
continue;
}
@@ -5384,8 +5389,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf->skb = NULL;
last = tx_buf->nr_frags;
- j++;
- for (k = 0; k < last; k++, j++) {
+ j = NEXT_TX_BD(j);
+ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
@@ -6245,7 +6250,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
static int
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
- int cpus = num_online_cpus();
+ int cpus = netif_get_num_default_rss_queues();
int msix_vecs;
if (!bp->num_req_rx_rings)
@@ -6383,6 +6388,7 @@ bnx2_reset_task(struct work_struct *work)
{
struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
int rc;
+ u16 pcicmd;
rtnl_lock();
if (!netif_running(bp->dev)) {
@@ -6392,6 +6398,12 @@ bnx2_reset_task(struct work_struct *work)
bnx2_netif_stop(bp, true);
+ pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
+ if (!(pcicmd & PCI_COMMAND_MEMORY)) {
+ /* in case PCI block has reset */
+ pci_restore_state(bp->pdev);
+ pci_save_state(bp->pdev);
+ }
rc = bnx2_init_nic(bp, 1);
if (rc) {
netdev_err(bp->dev, "failed to reset NIC, closing\n");
@@ -6406,6 +6418,75 @@ bnx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
+
+static void
+bnx2_dump_ftq(struct bnx2 *bp)
+{
+ int i;
+ u32 reg, bdidx, cid, valid;
+ struct net_device *dev = bp->dev;
+ static const struct ftq_reg {
+ char *name;
+ u32 off;
+ } ftq_arr[] = {
+ BNX2_FTQ_ENTRY(RV2P_P),
+ BNX2_FTQ_ENTRY(RV2P_T),
+ BNX2_FTQ_ENTRY(RV2P_M),
+ BNX2_FTQ_ENTRY(TBDR_),
+ BNX2_FTQ_ENTRY(TDMA_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TPAT_),
+ BNX2_FTQ_ENTRY(RXP_C),
+ BNX2_FTQ_ENTRY(RXP_),
+ BNX2_FTQ_ENTRY(COM_COMXQ_),
+ BNX2_FTQ_ENTRY(COM_COMTQ_),
+ BNX2_FTQ_ENTRY(COM_COMQ_),
+ BNX2_FTQ_ENTRY(CP_CPQ_),
+ };
+
+ netdev_err(dev, "<--- start FTQ dump --->\n");
+ for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
+ netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
+ bnx2_reg_rd_ind(bp, ftq_arr[i].off));
+
+ netdev_err(dev, "CPU states:\n");
+ for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
+ netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
+ reg, bnx2_reg_rd_ind(bp, reg),
+ bnx2_reg_rd_ind(bp, reg + 4),
+ bnx2_reg_rd_ind(bp, reg + 8),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x20));
+
+ netdev_err(dev, "<--- end FTQ dump --->\n");
+ netdev_err(dev, "<--- start TBDC dump --->\n");
+ netdev_err(dev, "TBDC free cnt: %ld\n",
+ REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
+ netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
+ for (i = 0; i < 0x20; i++) {
+ int j = 0;
+
+ REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
+ REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
+ BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
+ REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
+ while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
+ BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
+ j++;
+
+ cid = REG_RD(bp, BNX2_TBDC_CID);
+ bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
+ valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
+ netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
+ i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
+ bdidx >> 24, (valid >> 8) & 0x0ff);
+ }
+ netdev_err(dev, "<--- end TBDC dump --->\n");
+}
+
static void
bnx2_dump_state(struct bnx2 *bp)
{
@@ -6435,6 +6516,7 @@ bnx2_tx_timeout(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
+ bnx2_dump_ftq(bp);
bnx2_dump_state(bp);
bnx2_dump_mcp_state(bp);
@@ -6628,6 +6710,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
+ netif_tx_disable(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
@@ -7832,7 +7915,7 @@ bnx2_get_5709_media(struct bnx2 *bp)
else
strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
- if (PCI_FUNC(bp->pdev->devfn) == 0) {
+ if (bp->func == 0) {
switch (strap) {
case 0x4:
case 0x5:
@@ -8131,9 +8214,12 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
+ if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
+ bp->func = 1;
+
if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
BNX2_SHM_HDR_SIGNATURE_SIG) {
- u32 off = PCI_FUNC(pdev->devfn) << 2;
+ u32 off = bp->func << 2;
bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
} else
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index dc06bda..af6451d 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -4642,6 +4642,47 @@ struct l2_fhdr {
#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+/*
+ * tbdc definition
+ * offset: 0x5400
+ */
+#define BNX2_TBDC_COMMAND 0x5400
+#define BNX2_TBDC_COMMAND_CMD_ENABLED (1UL<<0)
+#define BNX2_TBDC_COMMAND_CMD_FLUSH (1UL<<1)
+#define BNX2_TBDC_COMMAND_CMD_SOFT_RST (1UL<<2)
+#define BNX2_TBDC_COMMAND_CMD_REG_ARB (1UL<<3)
+#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR (1UL<<4)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR (1UL<<5)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR (1UL<<6)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR (1UL<<7)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR (1UL<<8)
+
+#define BNX2_TBDC_STATUS 0x5404
+#define BNX2_TBDC_STATUS_FREE_CNT (0x3fUL<<0)
+
+#define BNX2_TBDC_BD_ADDR 0x5424
+
+#define BNX2_TBDC_BIDX 0x542c
+#define BNX2_TBDC_BDIDX_BDIDX (0xffffUL<<0)
+#define BNX2_TBDC_BDIDX_CMD (0xffUL<<24)
+
+#define BNX2_TBDC_CID 0x5430
+
+#define BNX2_TBDC_CAM_OPCODE 0x5434
+#define BNX2_TBDC_CAM_OPCODE_OPCODE (0x7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH (0UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE (1UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE (2UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE (4UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ (5UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE (6UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ (7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX (1UL<<4)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CID (1UL<<5)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD (1UL<<6)
+#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED (1UL<<7)
+#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS (0xffUL<<8)
+
/*
* tdma_reg definition
@@ -6930,6 +6971,8 @@ struct bnx2 {
struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC];
int irq_nvecs;
+ u8 func;
+
u8 num_tx_rings;
u8 num_rx_rings;
@@ -7314,6 +7357,8 @@ struct bnx2_rv2p_fw_file {
#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
(msg))
+#define BNX2_BC_RESET_TYPE 0x000001c0
+
#define BNX2_BC_STATE 0x000001c4
#define BNX2_BC_STATE_ERR_MASK 0x0000ff00
#define BNX2_BC_STATE_SIGN 0x42530000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e30e2a2..77bcd4c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.72.50-0"
-#define DRV_MODULE_RELDATE "2012/04/23"
+#define DRV_MODULE_VERSION "1.72.51-0"
+#define DRV_MODULE_RELDATE "2012/06/18"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -51,6 +51,7 @@
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
#include "bnx2x_sp.h"
@@ -248,13 +249,12 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID 48
-enum {
+#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+ (bp)->max_cos)
/* iSCSI L2 */
- BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
+#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
- BNX2X_FCOE_ETH_CID,
-};
+#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
/** Additional rings budgeting */
#ifdef BCM_CNIC
@@ -276,29 +276,30 @@ enum {
#define FIRST_TX_ONLY_COS_INDEX 1
#define FIRST_TX_COS_INDEX 0
-/* defines for decodeing the fastpath index and the cos index out of the
- * transmission queue index
- */
-#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
-
-#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
-#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
-
/* rules for calculating the cids of tx-only connections */
-#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
-#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
+#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
+#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
+ (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
/* fp index inside class of service range */
-#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
-
-/*
- * 0..15 eth cos0
- * 16..31 eth cos1 if applicable
- * 32..47 eth cos2 If applicable
- * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+#define FP_COS_TO_TXQ(fp, cos, bp) \
+ ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* Indexes for transmission queues array:
+ * txdata for RSS i CoS j is at location i + (j * num of RSS)
+ * txdata for FCoE (if exist) is at location max cos * num of RSS
+ * txdata for FWD (if exist) is one location after FCoE
+ * txdata for OOO (if exist) is one location after FWD
*/
-#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
-#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
+enum {
+ FCOE_TXQ_IDX_OFFSET,
+ FWD_TXQ_IDX_OFFSET,
+ OOO_TXQ_IDX_OFFSET,
+};
+#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
+#ifdef BCM_CNIC
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
+#endif
/* fast path */
/*
@@ -453,6 +454,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
+ bool l4_rxhash;
u16 gro_size;
u16 full_page;
};
@@ -481,6 +483,8 @@ struct bnx2x_fp_txdata {
__le16 *tx_cons_sb;
int txq_index;
+ struct bnx2x_fastpath *parent_fp;
+ int tx_ring_size;
};
enum bnx2x_tpa_mode_t {
@@ -507,7 +511,7 @@ struct bnx2x_fastpath {
enum bnx2x_tpa_mode_t mode;
u8 max_cos; /* actual number of active tx coses */
- struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
+ struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -547,51 +551,45 @@ struct bnx2x_fastpath {
rx_calls;
/* TPA related */
- struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
+ struct bnx2x_agg_info *tpa_info;
u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
-
- struct tstorm_per_queue_stats old_tclient;
- struct ustorm_per_queue_stats old_uclient;
- struct xstorm_per_queue_stats old_xclient;
- struct bnx2x_eth_q_stats eth_q_stats;
- struct bnx2x_eth_q_stats_old eth_q_stats_old;
-
/* The size is calculated using the following:
sizeof name field from netdev structure +
4 ('-Xx-' string) +
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
-
- /* MACs object */
- struct bnx2x_vlan_mac_obj mac_obj;
-
- /* Queue State object */
- struct bnx2x_queue_sp_obj q_obj;
-
};
-#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
+#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
+#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
+#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
-/* FCoE L2 `fastpath' entry is right after the eth entries */
-#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
-#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
-#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
-#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
- txdata[FIRST_TX_COS_INDEX].var)
+#define FCOE_IDX_OFFSET 0
+
+#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
+ FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
+#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata_ptr[FIRST_TX_COS_INDEX] \
+ ->var)
#define IS_ETH_FP(fp) (fp->index < \
BNX2X_NUM_ETH_QUEUES(fp->bp))
#ifdef BCM_CNIC
-#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
-#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
#else
#define IS_FCOE_FP(fp) false
#define IS_FCOE_IDX(idx) false
@@ -616,6 +614,22 @@ struct bnx2x_fastpath {
#define TX_BD(x) ((x) & MAX_TX_BD)
#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
+/* number of NEXT_PAGE descriptors may be required during placement */
+#define NEXT_CNT_PER_TX_PKT(bds) \
+ (((bds) + MAX_TX_DESC_CNT - 1) / \
+ MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT)
+/* max BDs per tx packet w/o next_pages:
+ * START_BD - describes packed
+ * START_BD(splitted) - includes unpaged data segment for GSO
+ * PARSING_BD - for TSO and CSUM data
+ * Frag BDs - decribes pages for frags
+ */
+#define BDS_PER_TX_PKT 3
+#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
+/* max BDs per tx packet including next pages */
+#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))
+
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
#define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
@@ -747,21 +761,6 @@ struct bnx2x_fastpath {
#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
-#define BNX2X_IP_CSUM_ERR(cqe) \
- (!((cqe)->fast_path_cqe.status_flags & \
- ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
- ((cqe)->fast_path_cqe.type_error_flags & \
- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
-
-#define BNX2X_L4_CSUM_ERR(cqe) \
- (!((cqe)->fast_path_cqe.status_flags & \
- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
- ((cqe)->fast_path_cqe.type_error_flags & \
- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
-
-#define BNX2X_RX_CSUM_OK(cqe) \
- (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
-
#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
(((le16_to_cpu(flags) & \
PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
@@ -820,8 +819,11 @@ struct bnx2x_common {
#define CHIP_NUM_57810_MF 0x16ae
#define CHIP_NUM_57811 0x163d
#define CHIP_NUM_57811_MF 0x163e
-#define CHIP_NUM_57840 0x168d
-#define CHIP_NUM_57840_MF 0x16ab
+#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
+#define CHIP_NUM_57840_4_10 0x16a1
+#define CHIP_NUM_57840_2_20 0x16a2
+#define CHIP_NUM_57840_MF 0x16a4
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
@@ -833,8 +835,12 @@ struct bnx2x_common {
#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
-#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
-#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
+#define CHIP_IS_57840(bp) \
+ ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
+#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
@@ -993,8 +999,8 @@ union cdu_context {
};
/* CDU host DB constants */
-#define CDU_ILT_PAGE_SZ_HW 3
-#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
+#define CDU_ILT_PAGE_SZ_HW 2
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
#ifdef BCM_CNIC
@@ -1197,11 +1203,31 @@ struct bnx2x_prev_path_list {
struct list_head list;
};
+struct bnx2x_sp_objs {
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+};
+
+struct bnx2x_fp_stats {
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
*/
struct bnx2x_fastpath *fp;
+ struct bnx2x_sp_objs *sp_objs;
+ struct bnx2x_fp_stats *fp_stats;
+ struct bnx2x_fp_txdata *bnx2x_txq;
+ int bnx2x_txq_size;
void __iomem *regview;
void __iomem *doorbells;
u16 db_size;
@@ -1316,7 +1342,9 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
+#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1392,6 +1420,7 @@ struct bnx2x {
#define BNX2X_MAX_COS 3
#define BNX2X_MAX_TX_COS 2
int num_queues;
+ int num_napi_queues;
int disable_tpa;
u32 rx_mode;
@@ -1404,6 +1433,7 @@ struct bnx2x {
u8 igu_dsb_id;
u8 igu_base_sb;
u8 igu_sb_cnt;
+
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
@@ -1435,7 +1465,11 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- struct hw_context context;
+ /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ * context size we need 8 ILT entries.
+ */
+#define ILT_MAX_L2_LINES 8
+ struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
#define BP_ILT(bp) ((bp)->ilt)
@@ -1448,13 +1482,14 @@ struct bnx2x {
/*
* Maximum CID count that might be required by the bnx2x:
- * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
-#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
- NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
-#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
int qm_cid_count;
@@ -1613,6 +1648,8 @@ struct bnx2x {
extern int num_queues;
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
+ NON_ETH_CONTEXT_USE)
#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1671,6 +1708,9 @@ struct bnx2x_func_init_params {
continue; \
else
+#define for_each_napi_rx_queue(bp, var) \
+ for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
+
/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
@@ -1724,15 +1764,6 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
/**
- * Deletes all MACs configured for the specific MAC object.
- *
- * @param bp Function driver instance
- * @param mac_obj MAC object to cleanup
- *
- * @return zero if all MACs were cleaned
- */
-
-/**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
*
* @bp: driver handle
@@ -1832,6 +1863,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_NORMAL 0
#define LOAD_OPEN 1
#define LOAD_DIAG 2
+#define LOAD_LOOPBACK_EXT 3
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
#define UNLOAD_RECOVERY 2
@@ -1914,13 +1946,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED 0xf0000
#define PCICFG_LINK_SPEED_SHIFT 16
-
-#define BNX2X_NUM_TESTS 7
+#define BNX2X_NUM_TESTS_SF 7
+#define BNX2X_NUM_TESTS_MF 3
+#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
+ BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
+#define BNX2X_EXT_LOOPBACK 2
#define BNX2X_PHY_LOOPBACK_FAILED 1
#define BNX2X_MAC_LOOPBACK_FAILED 2
+#define BNX2X_EXT_LOOPBACK_FAILED 3
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ad0743b..e879e19 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -40,12 +40,19 @@
* Makes sure the contents of the bp->fp[to].napi is kept
* intact. This is done by first copying the napi struct from
* the target to the source, and then mem copying the entire
- * source onto the target
+ * source onto the target. Update txdata pointers and related
+ * content.
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
+ struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
+ struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
+ struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
+ int old_max_eth_txqs, new_max_eth_txqs;
+ int old_txdata_index = 0, new_txdata_index = 0;
/* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi;
@@ -53,6 +60,30 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
+
+ /* move sp_objs contents as well, as their indices match fp ones */
+ memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
+
+ /* move fp_stats contents as well, as their indices match fp ones */
+ memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
+
+ /* Update txdata pointers in fp and move txdata content accordingly:
+ * Each fp consumes 'max_cos' txdata structures, so the index should be
+ * decremented by max_cos x delta.
+ */
+
+ old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
+ new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
+ (bp)->max_cos;
+ if (from == FCOE_IDX(bp)) {
+ old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ }
+
+ memcpy(&bp->bnx2x_txq[old_txdata_index],
+ &bp->bnx2x_txq[new_txdata_index],
+ sizeof(struct bnx2x_fp_txdata));
+ to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -190,7 +221,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+ (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
@@ -264,12 +295,20 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
- const struct eth_fast_path_rx_cqe *cqe)
+ const struct eth_fast_path_rx_cqe *cqe,
+ bool *l4_rxhash)
{
/* Set Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
+ enum eth_rss_hash_type htype;
+
+ htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
+ *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE);
return le32_to_cpu(cqe->rss_hash_result);
+ }
+ *l4_rxhash = false;
return 0;
}
@@ -323,7 +362,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page =
@@ -479,7 +518,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
where we are and drop the whole packet */
err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
if (unlikely(err)) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
}
@@ -558,6 +597,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
skb->rxhash = tpa_info->rxhash;
+ skb->l4_rxhash = tpa_info->l4_rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -584,7 +624,7 @@ drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate or map a new skb - dropping packet!\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
@@ -617,6 +657,27 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
return 0;
}
+static
+void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp,
+ struct bnx2x_eth_q_stats *qstats)
+{
+ /* Do nothing if no IP/L4 csum validation was done */
+
+ if (cqe->fast_path_cqe.status_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+ return;
+
+ /* If both IP/L4 validation were done, check if an error was found. */
+
+ if (cqe->fast_path_cqe.type_error_flags &
+ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
+ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+ qstats->hw_csum_err++;
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
@@ -660,6 +721,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
+ bool l4_rxhash;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -757,7 +819,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
goto reuse_rx;
}
@@ -770,7 +832,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
goto reuse_rx;
}
memcpy(skb->data, data + pad, len);
@@ -784,14 +846,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb = build_skb(data, 0);
if (unlikely(!skb)) {
kfree(data);
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->
+ rx_skb_alloc_failed++;
goto next_rx;
}
skb_reserve(skb, pad);
} else {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
@@ -802,17 +865,14 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
+ skb->l4_rxhash = l4_rxhash;
skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM) {
-
- if (likely(BNX2X_RX_CSUM_OK(cqe)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- fp->eth_q_stats.hw_csum_err++;
- }
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ bnx2x_csum_validate(skb, cqe, fp,
+ bnx2x_fp_qstats(bp, fp));
skb_record_rx_queue(skb, fp->rx_queue);
@@ -873,7 +933,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -1190,7 +1250,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
unsigned pkts_compl = 0, bytes_compl = 0;
u16 sw_prod = txdata->tx_pkt_prod;
@@ -1202,7 +1262,8 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
sw_cons++;
}
netdev_tx_reset_queue(
- netdev_get_tx_queue(bp->dev, txdata->txq_index));
+ netdev_get_tx_queue(bp->dev,
+ txdata->txq_index));
}
}
}
@@ -1310,7 +1371,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
free_irq(bp->dev->irq, bp->dev);
}
-int __devinit bnx2x_enable_msix(struct bnx2x *bp)
+int bnx2x_enable_msix(struct bnx2x *bp)
{
int msix_vec = 0, i, rc, req_cnt;
@@ -1564,6 +1625,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
+
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
}
/**
@@ -1592,8 +1655,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
int rc, tx, rx;
- tx = MAX_TXQS_PER_COS * bp->max_cos;
- rx = BNX2X_NUM_ETH_QUEUES(bp);
+ tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
+ rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
/* account for fcoe queue */
#ifdef BCM_CNIC
@@ -1651,14 +1714,13 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
static int bnx2x_init_rss_pf(struct bnx2x *bp)
{
int i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
/* Prepare the initial contents fo the indirection table if RSS is
* enabled
*/
- for (i = 0; i < sizeof(ind_table); i++)
- ind_table[i] =
+ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
+ bp->rss_conf_obj.ind_table[i] =
bp->fp->cl_id +
ethtool_rxfh_indir_default(i, num_eth_queues);
@@ -1670,12 +1732,11 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
* For 57712 and newer on the other hand it's a per-function
* configuration.
*/
- return bnx2x_config_rss_eth(bp, ind_table,
- bp->port.pmf || !CHIP_IS_E1x(bp));
+ return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash)
+ bool config_hash)
{
struct bnx2x_config_rss_params params = {NULL};
int i;
@@ -1698,11 +1759,15 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
- memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+ memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
if (config_hash) {
/* RSS keys */
@@ -1739,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
struct bnx2x_mcast_ramrod_params rparam = {NULL};
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
/***************** Cleanup MACs' object first *************************/
@@ -1750,7 +1815,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Clean ETH primary MAC */
__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
- rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+ rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0)
BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
@@ -1836,11 +1901,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
+
+ int cos;
struct napi_struct orig_napi = fp->napi;
+ struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */
- if (bp->stats_init)
+ if (bp->stats_init) {
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
- else {
+ } else {
/* Keep Queue statistics */
struct bnx2x_eth_q_stats *tmp_eth_q_stats;
struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
@@ -1848,26 +1918,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
GFP_KERNEL);
if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
+ memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
sizeof(struct bnx2x_eth_q_stats));
tmp_eth_q_stats_old =
kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
GFP_KERNEL);
if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
+ memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
if (tmp_eth_q_stats) {
- memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
+ memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
kfree(tmp_eth_q_stats);
}
if (tmp_eth_q_stats_old) {
- memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
+ memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
kfree(tmp_eth_q_stats_old);
}
@@ -1876,7 +1947,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
-
+ fp->tpa_info = orig_tpa_info;
fp->bp = bp;
fp->index = index;
if (IS_ETH_FP(fp))
@@ -1885,6 +1956,16 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Special queues support only one CoS */
fp->max_cos = 1;
+ /* Init txdata pointers */
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
+#endif
+ if (IS_ETH_FP(fp))
+ for_each_cos_in_tx_queue(fp, cos)
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
+ BNX2X_NUM_ETH_QUEUES(bp) + index];
+
/*
* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
@@ -1934,11 +2015,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/*
* Zero fastpath structures preserving invariants like napi, which are
* allocated only once, fp index, max_cos, bp pointer.
- * Also set fp->disable_tpa.
+ * Also set fp->disable_tpa and txdata_ptr.
*/
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
+ memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
+ sizeof(struct bnx2x_fp_txdata));
/* Set the receive queues buffer size */
@@ -2161,6 +2244,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
case LOAD_DIAG:
+ case LOAD_LOOPBACK_EXT:
bp->state = BNX2X_STATE_DIAG;
break;
@@ -2180,6 +2264,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
+ bnx2x_setup_cnic_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
@@ -2200,7 +2285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return -EBUSY;
}
- bnx2x_dcbx_init(bp);
+ /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
+ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
+ bnx2x_dcbx_init(bp, false);
+
return 0;
#ifndef BNX2X_STOP_ON_ERROR
@@ -2283,6 +2371,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Stop Tx */
bnx2x_tx_disable(bp);
+ netdev_reset_tc(bp->dev);
#ifdef BCM_CNIC
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2441,8 +2530,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
#endif
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
- bnx2x_tx_int(bp, &fp->txdata[cos]);
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+ bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
if (bnx2x_has_rx_work(fp)) {
@@ -2501,8 +2590,6 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
/* we split the first BD into headers and data BDs
* to ease the pain of our fellow microcode engineers
* we use one mapping for both BDs
- * So far this has only been observed to happen
- * in Other Operating Systems(TM)
*/
static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
struct bnx2x_fp_txdata *txdata,
@@ -2821,7 +2908,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_fastpath *fp;
struct netdev_queue *txq;
struct bnx2x_fp_txdata *txdata;
struct sw_tx_bd *tx_buf;
@@ -2831,7 +2917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
- int nbd, txq_index, fp_index, txdata_index;
+ int nbd, txq_index;
dma_addr_t mapping;
u32 xmit_type = bnx2x_xmit_type(bp, skb);
int i;
@@ -2850,39 +2936,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
- /* decode the fastpath index and the cos index from the txq */
- fp_index = TXQ_TO_FP(txq_index);
- txdata_index = TXQ_TO_COS(txq_index);
-
-#ifdef BCM_CNIC
- /*
- * Override the above for the FCoE queue:
- * - FCoE fp entry is right after the ETH entries.
- * - FCoE L2 queue uses bp->txdata[0] only.
- */
- if (unlikely(!NO_FCOE(bp) && (txq_index ==
- bnx2x_fcoe_tx(bp, txq_index)))) {
- fp_index = FCOE_IDX;
- txdata_index = 0;
- }
-#endif
+ txdata = &bp->bnx2x_txq[txq_index];
/* enable this debug print to view the transmission queue being used
DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
- /* locate the fastpath and the txdata */
- fp = &bp->fp[fp_index];
- txdata = &fp->txdata[txdata_index];
-
/* enable this debug print to view the tranmission details
DP(NETIF_MSG_TX_QUEUED,
"transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
if (unlikely(bnx2x_tx_avail(bp, txdata) <
- (skb_shinfo(skb)->nr_frags + 3))) {
- fp->eth_q_stats.driver_xoff++;
+ skb_shinfo(skb)->nr_frags +
+ BDS_PER_TX_PKT +
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
@@ -3156,7 +3225,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata->tx_bd_prod += nbd;
- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
netif_tx_stop_queue(txq);
/* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3164,8 +3233,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
* fp->bd_tx_cons */
smp_mb();
- fp->eth_q_stats.driver_xoff++;
- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
netif_tx_wake_queue(txq);
}
txdata->tx_pkt++;
@@ -3230,7 +3299,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* configure traffic class to transmission queue mapping */
for (cos = 0; cos < bp->max_cos; cos++) {
count = BNX2X_NUM_ETH_QUEUES(bp);
- offset = cos * MAX_TXQS_PER_COS;
+ offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
netdev_set_tc_queue(dev, cos, count, offset);
DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
"mapping tc %d to offset %d count %d\n",
@@ -3329,7 +3398,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
if (!skip_tx_queue(bp, fp_index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFDOWN,
"freeing tx memory of fp %d cos %d cid %d\n",
@@ -3401,7 +3470,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
- fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
return i - failure_cnt;
}
@@ -3486,7 +3555,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!skip_tx_queue(bp, index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFUP,
"allocating tx memory of fp %d cos %d\n",
@@ -3569,7 +3638,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp))
/* FCoE */
- if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
/* we will fail load process instead of mark
* NO_FCOE_FLAG
*/
@@ -3594,7 +3663,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
*/
/* move FCoE fp even NO_FCOE_FLAG is on */
- bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+ bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
#endif
bp->num_queues -= delta;
BNX2X_ERR("Adjusted num of queues from %d to %d\n",
@@ -3606,7 +3675,11 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
+ kfree(bp->fp->tpa_info);
kfree(bp->fp);
+ kfree(bp->sp_objs);
+ kfree(bp->fp_stats);
+ kfree(bp->bnx2x_txq);
kfree(bp->msix_table);
kfree(bp->ilt);
}
@@ -3617,6 +3690,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
int msix_table_size = 0;
+ int fp_array_size;
+ int i;
/*
* The biggest MSI-X table we might need is as a maximum number of fast
@@ -3625,12 +3700,44 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
- sizeof(*fp), GFP_KERNEL);
+ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
+ BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+
+ fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
+ for (i = 0; i < fp_array_size; i++) {
+ fp[i].tpa_info =
+ kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
+ sizeof(struct bnx2x_agg_info), GFP_KERNEL);
+ if (!(fp[i].tpa_info))
+ goto alloc_err;
+ }
+
bp->fp = fp;
+ /* allocate sp objs */
+ bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ GFP_KERNEL);
+ if (!bp->sp_objs)
+ goto alloc_err;
+
+ /* allocate fp_stats */
+ bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ GFP_KERNEL);
+ if (!bp->fp_stats)
+ goto alloc_err;
+
+ /* Allocate memory for the transmission queues array */
+ bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
+#ifdef BCM_CNIC
+ bp->bnx2x_txq_size++;
+#endif
+ bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
+ sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
+ if (!bp->bnx2x_txq)
+ goto alloc_err;
+
/* msix table */
tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7cd99b7..dfa757e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -29,6 +29,7 @@
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
extern int num_queues;
+extern int int_mode;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -89,12 +90,12 @@ void bnx2x_send_unload_done(struct bnx2x *bp);
* bnx2x_config_rss_pf - configure RSS parameters in a PF.
*
* @bp: driver handle
- * @rss_obj RSS object to use
+ * @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
*/
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash);
+ bool config_hash);
/**
* bnx2x__init_func_obj - init function object
@@ -244,6 +245,14 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
* @bp: driver handle
*/
void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_cnic_info - provides cnic with updated info
+ *
+ * @bp: driver handle
+ */
+void bnx2x_setup_cnic_info(struct bnx2x *bp);
+
#endif
/**
@@ -409,7 +418,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
*
* @bp: driver handle
*/
-void bnx2x_dcbx_init(struct bnx2x *bp);
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
/**
* bnx2x_set_power_state - set power state to the requested value.
@@ -487,7 +496,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
* fills msix_table, requests vectors, updates num_queues
* according to number of available vectors.
*/
-int __devinit bnx2x_enable_msix(struct bnx2x *bp);
+int bnx2x_enable_msix(struct bnx2x *bp);
/**
* bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -728,7 +737,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
{
u8 cos;
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
return true;
return false;
}
@@ -780,8 +789,10 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
{
int i;
+ bp->num_napi_queues = bp->num_queues;
+
/* Add NAPI objects */
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -790,10 +801,12 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+void bnx2x_set_int_mode(struct bnx2x *bp);
+
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG) {
@@ -809,7 +822,8 @@ static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
{
return num_queues ?
min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
+ min_t(int, netif_get_num_default_rss_queues(),
+ BNX2X_MAX_QUEUES(bp));
}
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
@@ -865,11 +879,9 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
return 2 * vn + BP_PORT(bp);
}
-static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
- bool config_hash)
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
- config_hash);
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
}
/**
@@ -975,8 +987,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
struct bnx2x *bp = fp->bp;
/* Configure classification DBs */
- bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
- BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+ bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
+ fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
bnx2x_sp_mapping(bp, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
@@ -1068,12 +1080,14 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
}
static inline void bnx2x_init_txdata(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
- __le16 *tx_cons_sb)
+ struct bnx2x_fp_txdata *txdata, u32 cid,
+ int txq_index, __le16 *tx_cons_sb,
+ struct bnx2x_fastpath *fp)
{
txdata->cid = cid;
txdata->txq_index = txq_index;
txdata->tx_cons_sb = tx_cons_sb;
+ txdata->parent_fp = fp;
DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
txdata->cid, txdata->txq_index);
@@ -1107,18 +1121,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
- /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
- * 16 ETH clients per function when CNIC is enabled!
- *
- * Fix it ASAP!!!
- */
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
-
- bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
@@ -1135,8 +1144,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
/* No multi-CoS for FCoE L2 client */
BUG_ON(fp->max_cos != 1);
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
DP(NETIF_MSG_IFUP,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4f9244b..8a73374 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -972,23 +972,26 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_default_priority = 0;
}
-void bnx2x_dcbx_init(struct bnx2x *bp)
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
{
u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+ /* only PMF can send ADMIN msg to MFW in old MFW versions */
+ if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
+ return;
+
if (bp->dcbx_enabled <= 0)
return;
/* validate:
* chip of good for dcbx version,
* dcb is wanted
- * the function is pmf
* shmem2 contains DCBX support fields
*/
DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
- if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -999,12 +1002,23 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
- bnx2x_dcbx_admin_mib_updated_params(bp,
- dcbx_lldp_params_offset);
+ /* need HW lock to avoid scenario of two drivers
+ * writing in parallel to shmem
+ */
+ bnx2x_acquire_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+ if (update_shmem)
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
/* Let HW start negotiation */
bnx2x_fw_command(bp,
DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ /* release HW lock only after MFW acks that it finished
+ * reading values from shmem
+ */
+ bnx2x_release_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
}
}
}
@@ -2063,10 +2077,8 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
"Handling parity error recovery. Try again later\n");
return 1;
}
- if (netif_running(bp->dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
+ if (netif_running(bp->dev))
+ bnx2x_dcbx_init(bp, true);
DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
if (rc)
return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ddc18ee..fc4e0e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -177,6 +177,8 @@ static const struct {
4, STATS_FLAGS_FUNC, "recoverable_errors" },
{ STATS_OFFSET32(unrecoverable_error),
4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
+ { STATS_OFFSET32(eee_tx_lpi),
+ 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -185,7 +187,8 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
int port_type;
u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFP_FIBER:
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
case ETH_PHY_XFP_FIBER:
case ETH_PHY_KR:
case ETH_PHY_CX4:
@@ -218,6 +221,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
+ if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
+ ETH_PHY_SFP_1G_FIBER) {
+ cmd->supported &= ~(SUPPORTED_10000baseT_Full);
+ cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+ }
if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
if (!(bp->flags & MF_FUNC_DIS)) {
@@ -293,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
- u32 speed;
+ u32 speed, phy_idx;
if (IS_MF_SD(bp))
return 0;
@@ -548,9 +556,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
"10G half not supported\n");
return -EINVAL;
}
-
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
if (!(bp->port.supported[cfg_idx]
- & SUPPORTED_10000baseT_Full)) {
+ & SUPPORTED_10000baseT_Full) ||
+ (bp->link_params.phy[phy_idx].media_type ==
+ ETH_PHY_SFP_1G_FIBER)) {
DP(BNX2X_MSG_ETHTOOL,
"10G full not supported\n");
return -EINVAL;
@@ -824,7 +834,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
- info->testinfo_len = BNX2X_NUM_TESTS;
+ info->testinfo_len = BNX2X_NUM_TESTS(bp);
info->eedump_len = bp->common.flash_size;
info->regdump_len = bnx2x_get_regs_len(dev);
}
@@ -1150,6 +1160,65 @@ static int bnx2x_get_eeprom(struct net_device *dev,
return rc;
}
+static int bnx2x_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0, phy_idx;
+ u8 *user_data = data;
+ int remaining_len = ee->len, xfer_size;
+ unsigned int page_off = ee->offset;
+
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ bnx2x_acquire_phy_lock(bp);
+ while (!rc && remaining_len > 0) {
+ xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : remaining_len;
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ page_off,
+ xfer_size,
+ user_data);
+ remaining_len -= xfer_size;
+ user_data += xfer_size;
+ page_off += xfer_size;
+ }
+
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+static int bnx2x_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int phy_idx;
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
+ case ETH_PHY_DA_TWINAX:
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
u32 cmd_flags)
{
@@ -1531,18 +1600,146 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
-static const struct {
- char string[ETH_GSTRING_LEN];
-} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
- { "register_test (offline)" },
- { "memory_test (offline)" },
- { "loopback_test (offline)" },
- { "nvram_test (online)" },
- { "interrupt_test (online)" },
- { "link_test (online)" },
- { "idle check (online)" }
+static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+ "register_test (offline) ",
+ "memory_test (offline) ",
+ "int_loopback_test (offline)",
+ "ext_loopback_test (offline)",
+ "nvram_test (online) ",
+ "interrupt_test (online) ",
+ "link_test (online) "
};
+static u32 bnx2x_eee_to_adv(u32 eee_adv)
+{
+ u32 modes = 0;
+
+ if (eee_adv & SHMEM_EEE_100M_ADV)
+ modes |= ADVERTISED_100baseT_Full;
+ if (eee_adv & SHMEM_EEE_1G_ADV)
+ modes |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & SHMEM_EEE_10G_ADV)
+ modes |= ADVERTISED_10000baseT_Full;
+
+ return modes;
+}
+
+static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+{
+ u32 eee_adv = 0;
+ if (modes & ADVERTISED_100baseT_Full)
+ eee_adv |= SHMEM_EEE_100M_ADV;
+ if (modes & ADVERTISED_1000baseT_Full)
+ eee_adv |= SHMEM_EEE_1G_ADV;
+ if (modes & ADVERTISED_10000baseT_Full)
+ eee_adv |= SHMEM_EEE_10G_ADV;
+
+ return eee_adv << shift;
+}
+
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ edata->supported =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ edata->advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ edata->lp_advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ /* SHMEM value is in 16u units --> Convert to 1u units. */
+ edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
+
+ edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
+ edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
+ edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
+
+ return 0;
+}
+
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+ u32 advertised;
+
+ if (IS_MF(bp))
+ return 0;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
+ DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
+ return -EOPNOTSUPP;
+ }
+
+ advertised = bnx2x_adv_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Direct manipulation of EEE advertisment is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Maximal Tx Lpi timer supported is %x(u)\n",
+ EEE_MODE_TIMER_MASK);
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled &&
+ (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Minimal Tx Lpi timer supported is %d(u)\n",
+ EEE_MODE_NVRAM_AGGRESSIVE_TIME);
+ return -EINVAL;
+ }
+
+ /* All is well; Apply changes*/
+ if (edata->eee_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
+
+ if (edata->tx_lpi_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
+
+ bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
+ bp->link_params.eee_mode |= (edata->tx_lpi_timer &
+ EEE_MODE_TIMER_MASK) |
+ EEE_MODE_OVERRIDE_NVRAM |
+ EEE_MODE_OUTPUT_TIME;
+
+ /* Restart link to propogate changes */
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1811,6 +2008,14 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
+
+ cnt = 1400;
+ while (!bp->link_vars.link_up && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && !bp->link_vars.link_up)
+ DP(BNX2X_MSG_ETHTOOL,
+ "Timeout waiting for link init\n");
}
}
@@ -1821,7 +2026,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
unsigned char *packet;
struct bnx2x_fastpath *fp_rx = &bp->fp[0];
struct bnx2x_fastpath *fp_tx = &bp->fp[0];
- struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
+ struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
u16 pkt_prod, bd_prod;
@@ -1836,13 +2041,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
u16 len;
int rc = -ENODEV;
u8 *data;
- struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
+ txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
case BNX2X_PHY_LOOPBACK:
- if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
+ DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
return -EINVAL;
+ }
break;
case BNX2X_MAC_LOOPBACK:
if (CHIP_IS_E3(bp)) {
@@ -1859,6 +2067,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
+ case BNX2X_EXT_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't configure external loopback\n");
+ return -EINVAL;
+ }
+ break;
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
@@ -2030,6 +2245,38 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
return rc;
}
+static int bnx2x_test_ext_loopback(struct bnx2x *bp)
+{
+ int rc;
+ u8 is_serdes =
+ (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+ if (BP_NOMCP(bp))
+ return -ENODEV;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_EXT_LOOPBACK_FAILED;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for external lb) failed\n");
+ return -ENODEV;
+ }
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ bnx2x_netif_stop(bp, 1);
+
+ rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
+
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
#define CRC32_RESIDUAL 0xdebb20e3
static int bnx2x_test_nvram(struct bnx2x *bp)
@@ -2112,7 +2359,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
return -ENODEV;
}
- params.q_obj = &bp->fp->q_obj;
+ params.q_obj = &bp->sp_objs->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY;
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
@@ -2125,24 +2372,31 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
u8 is_serdes;
+ int rc;
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
"Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ DP(BNX2X_MSG_ETHTOOL,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
- memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test when interface is down\n");
return;
+ }
- /* offline tests are not supported in MF mode */
- if (IS_MF(bp))
- etest->flags &= ~ETH_TEST_FL_OFFLINE;
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
- if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ /* offline tests are not supported in MF mode */
+ if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
u8 link_up;
@@ -2155,7 +2409,14 @@ static void bnx2x_self_test(struct net_device *dev,
link_up = bp->link_vars.link_up;
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_DIAG);
+ rc = bnx2x_nic_load(bp, LOAD_DIAG);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for offline) failed\n");
+ return;
+ }
+
/* wait until link state is restored */
bnx2x_wait_for_link(bp, 1, is_serdes);
@@ -2168,30 +2429,51 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- buf[2] = bnx2x_test_loopback(bp);
+ buf[2] = bnx2x_test_loopback(bp); /* internal LB */
if (buf[2] != 0)
etest->flags |= ETH_TEST_FL_FAILED;
+ if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
+ if (buf[3] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
/* restore input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
-
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for online) failed\n");
+ return;
+ }
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
if (bnx2x_test_nvram(bp) != 0) {
- buf[3] = 1;
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_test_intr(bp) != 0) {
- buf[4] = 1;
+ if (!IS_MF(bp))
+ buf[5] = 1;
+ else
+ buf[1] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
+ if (!IS_MF(bp))
+ buf[6] = 1;
+ else
+ buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
@@ -2236,7 +2518,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
return num_stats;
case ETH_SS_TEST:
- return BNX2X_NUM_TESTS;
+ return BNX2X_NUM_TESTS(bp);
default:
return -EINVAL;
@@ -2246,7 +2528,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k;
+ int i, j, k, offset, start;
char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
@@ -2277,7 +2559,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
break;
case ETH_SS_TEST:
- memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+ /* First 4 tests cannot be done in MF mode */
+ if (!IS_MF(bp))
+ start = 0;
+ else
+ start = 4;
+ for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
+ i++, j++) {
+ offset = sprintf(buf+32*i, "%s",
+ bnx2x_tests_str_arr[j]);
+ *(buf+offset) = '\0';
+ }
break;
}
}
@@ -2291,7 +2583,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+ hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
/* skip this counter */
@@ -2375,6 +2667,41 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
+static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v4)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v6)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules __always_unused)
{
@@ -2384,7 +2711,102 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
+ case ETHTOOL_GRXFH:
+ return bnx2x_get_rss_flags(bp, info);
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+ int udp_rss_requested;
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tupple hash or 4-tupple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ udp_rss_requested = 1;
+ else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
+ udp_rss_requested = 0;
+ else
+ return -EINVAL;
+ if ((info->flow_type == UDP_V4_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ } else if ((info->flow_type == UDP_V6_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ } else {
+ return 0;
+ }
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return bnx2x_set_rss_flags(bp, info);
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
@@ -2424,7 +2846,6 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
@@ -2436,10 +2857,88 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, ind_table, false);
+ return bnx2x_config_rss_eth(bp, false);
+}
+
+/**
+ * bnx2x_get_channels - gets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: returns the number of max / current queues
+ */
+static void bnx2x_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
+ channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+/**
+ * bnx2x_change_num_queues - change the number of RSS queues.
+ *
+ * @bp: bnx2x private structure
+ *
+ * Re-configure interrupt mode to get the new number of MSI-X
+ * vectors and re-add NAPI objects.
+ */
+static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
+{
+ bnx2x_del_all_napi(bp);
+ bnx2x_disable_msi(bp);
+ BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
+ bnx2x_set_int_mode(bp);
+ bnx2x_add_all_napi(bp);
+}
+
+/**
+ * bnx2x_set_channels - sets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: includes the number of queues requested
+ */
+static int bnx2x_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+ channels->rx_count, channels->tx_count, channels->other_count,
+ channels->combined_count);
+
+ /* We don't support separate rx / tx channels.
+ * We don't allow setting 'other' channels.
+ */
+ if (channels->rx_count || channels->tx_count || channels->other_count
+ || (channels->combined_count == 0) ||
+ (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
+ DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ /* Check if there was a change in the active parameters */
+ if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
+ DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
+ return 0;
+ }
+
+ /* Set the requested number of queues in bp context.
+ * Note that the actual number of queues created during load may be
+ * less than requested if memory is low.
+ */
+ if (unlikely(!netif_running(dev))) {
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return 0;
+ }
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
@@ -2469,9 +2968,17 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+ .get_module_info = bnx2x_get_module_info,
+ .get_module_eeprom = bnx2x_get_module_eeprom,
+ .get_eee = bnx2x_get_eee,
+ .set_eee = bnx2x_set_eee,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 426f77a..bbc66ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -321,9 +321,7 @@
#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
-/**
- * This file defines HSI constants common to all microcode flows
- */
+/* This file defines HSI constants common to all microcode flows */
#define PROTOCOL_STATE_BIT_OFFSET 6
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index a440a8b..76b6e65 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -10,6 +10,7 @@
#define BNX2X_HSI_H
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
@@ -33,12 +34,6 @@ struct license_key {
u32 reserved_b[4];
};
-
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
-#define NVM_PATH_MAX 2
-
/****************************************************************************
* Shared HW configuration *
****************************************************************************/
@@ -1067,8 +1062,18 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
uses the same defines as link_config */
u32 mfw_wol_link_cfg2; /* 0x480 */
- u32 Reserved2[17]; /* 0x484 */
+ /* EEE power saving mode */
+ u32 eee_power_mode; /* 0x484 */
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
+
+
+ u32 Reserved2[16]; /* 0x488 */
};
@@ -1140,6 +1145,7 @@ struct drv_port_mb {
u32 link_status;
/* Driver should update this field on any link change event */
+ #define LINK_STATUS_NONE (0<<0)
#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
#define LINK_STATUS_LINK_UP 0x00000001
#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
@@ -1197,6 +1203,7 @@ struct drv_port_mb {
#define LINK_STATUS_PFC_ENABLED 0x20000000
#define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+ #define LINK_STATUS_SFP_TX_FAULT 0x80000000
u32 port_stx;
@@ -1240,9 +1247,11 @@ struct drv_func_mb {
#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
#define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+ #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
@@ -1255,6 +1264,8 @@ struct drv_func_mb {
#define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
#define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
+ #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1320,6 +1331,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
#define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
+ #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1383,6 +1396,8 @@ struct drv_func_mb {
#define DRV_STATUS_DRV_INFO_REQ 0x04000000
+ #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
+
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
#define VIRT_MAC_SIGNATURE 0x564d0000
@@ -1613,6 +1628,11 @@ struct fw_flr_mb {
struct fw_flr_ack ack;
};
+struct eee_remote_vals {
+ u32 tx_tw;
+ u32 rx_tw;
+};
+
/**** SUPPORT FOR SHMEM ARRRAYS ***
* The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
* define arrays with storage types smaller then unsigned dwords.
@@ -2053,6 +2073,41 @@ struct shmem2_region {
#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
u32 ibft_host_addr; /* initialized by option ROM */
+ struct eee_remote_vals eee_remote_vals[PORT_MAX];
+ u32 reserved[E2_FUNC_MAX];
+
+
+ /* the status of EEE auto-negotiation
+ * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 19:16 the supported modes for EEE.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * 30:29 are 2'b11.
+ * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+ * value. When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status[PORT_MAX];
+ #define SHMEM_EEE_TIMER_MASK 0x0000ffff
+ #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
+ #define SHMEM_EEE_SUPPORTED_SHIFT 16
+ #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
+ #define SHMEM_EEE_100M_ADV (1<<0)
+ #define SHMEM_EEE_1G_ADV (1<<1)
+ #define SHMEM_EEE_10G_ADV (1<<2)
+ #define SHMEM_EEE_ADV_STATUS_SHIFT 20
+ #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
+ #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
+ #define SHMEM_EEE_REQUESTED_BIT 0x10000000
+ #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
+ #define SHMEM_EEE_ACTIVE_BIT 0x40000000
+ #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 sizeof_port_stats;
};
@@ -2599,6 +2654,9 @@ struct host_port_stats {
u32 pfc_frames_tx_lo;
u32 pfc_frames_rx_hi;
u32 pfc_frames_rx_lo;
+
+ u32 eee_lpi_count_hi;
+ u32 eee_lpi_count_lo;
};
@@ -2638,118 +2696,6 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
-/* current drv_info version */
-#define DRV_INFO_CUR_VER 1
-
-/* drv_info op codes supported */
-enum drv_info_opcode {
- ETH_STATS_OPCODE,
- FCOE_STATS_OPCODE,
- ISCSI_STATS_OPCODE
-};
-
-#define ETH_STAT_INFO_VERSION_LEN 12
-/* Per PCI Function Ethernet Statistics required from the driver */
-struct eth_stats_info {
- /* Function's Driver Version. padded to 12 */
- u8 version[ETH_STAT_INFO_VERSION_LEN];
- /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
- u8 mac_local[8];
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
- u32 feature_flags; /* Feature_Flags. */
-#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
-#define FEATURE_ETH_LSO_MASK 0x02
-#define FEATURE_ETH_BOOTMODE_MASK 0x1C
-#define FEATURE_ETH_BOOTMODE_SHIFT 2
-#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
-#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
-#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
-#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
-#define FEATURE_ETH_TOE_MASK 0x20
- u32 lso_max_size; /* LSO MaxOffloadSize. */
- u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
- /* Num Offloaded Connections TCP_IPv4. */
- u32 ipv4_ofld_cnt;
- /* Num Offloaded Connections TCP_IPv6. */
- u32 ipv6_ofld_cnt;
- u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
- u32 txq_size; /* TX Descriptors Queue Size */
- u32 rxq_size; /* RX Descriptors Queue Size */
- /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 txq_avg_depth;
- /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 rxq_avg_depth;
- /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
- u32 iov_offload;
- /* Number of NetQueue/VMQ Config'd. */
- u32 netq_cnt;
- u32 vf_cnt; /* Num VF assigned to this PF. */
-};
-
-/* Per PCI Function FCOE Statistics required from the driver */
-struct fcoe_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u32 txq_size; /* FCoE TX Descriptors Queue Size. */
- u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
- /* FCoE TX Descriptor Queue Avg Depth. */
- u32 txq_avg_depth;
- /* FCoE RX Descriptors Queue Avg Depth. */
- u32 rxq_avg_depth;
- u32 rx_frames_lo; /* FCoE RX Frames received. */
- u32 rx_frames_hi; /* FCoE RX Frames received. */
- u32 rx_bytes_lo; /* FCoE RX Bytes received. */
- u32 rx_bytes_hi; /* FCoE RX Bytes received. */
- u32 tx_frames_lo; /* FCoE TX Frames sent. */
- u32 tx_frames_hi; /* FCoE TX Frames sent. */
- u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
- u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
-};
-
-/* Per PCI Function iSCSI Statistics required from the driver*/
-struct iscsi_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
- u8 ww_port_name[64]; /* iSCSI World wide port name */
- u8 boot_target_name[64];/* iSCSI Boot Target Name. */
- u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
- u32 boot_target_portal; /* iSCSI Boot Target Portal. */
- u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
- u32 max_frame_size; /* Max Frame Size. bytes */
- u32 txq_size; /* PDU TX Descriptors Queue Size. */
- u32 rxq_size; /* PDU RX Descriptors Queue Size. */
- u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
- u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
- u32 rx_pdus_lo; /* iSCSI PDUs received. */
- u32 rx_pdus_hi; /* iSCSI PDUs received. */
- u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
- u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
- u32 tx_pdus_lo; /* iSCSI PDUs sent. */
- u32 tx_pdus_hi; /* iSCSI PDUs sent. */
- u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
- u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
- u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
- * 9 nibbles, the position of each nibble
- * represents the C-PCP value, the value
- * of the nibble = S-PCP value.
- */
-};
-
-union drv_info_to_mcp {
- struct eth_stats_info ether_stat;
- struct fcoe_stats_info fcoe_stat;
- struct iscsi_stats_info iscsi_stat;
-};
/* stats collected for afex.
* NOTE: structure is exactly as expected to be received by the switch.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a3fb721..f4beb46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -40,6 +40,7 @@
#define I2C_BSC0 0
#define I2C_BSC1 1
#define I2C_WA_RETRY_CNT 3
+#define I2C_WA_PWR_ITER (I2C_WA_RETRY_CNT - 1)
#define MCPR_IMC_COMMAND_READ_OP 1
#define MCPR_IMC_COMMAND_WRITE_OP 2
@@ -284,7 +285,6 @@
#define ETS_E3B0_PBF_MIN_W_VAL (10000)
#define MAX_PACKET_SIZE (9700)
-#define WC_UC_TIMEOUT 100
#define MAX_KR_LINK_RETRY 4
/**********************************************************/
@@ -1305,6 +1305,94 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
return 0;
}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (REG_RD(bp, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+ switch (idle_timer) {
+ case EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+ u32 eee_mode, eee_idle;
+ struct bnx2x *bp = params->bp;
+
+ if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly*/
+ eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (bnx2x_eee_nvram_to_time(params->eee_mode &
+ EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time*/
+ eee_mode = ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+
/******************************************************************/
/* PFC section */
/******************************************************************/
@@ -1539,7 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params,
/* Reset UMAC */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
@@ -1630,7 +1718,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
* ports of the path
*/
- if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
+ if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
DP(NETIF_MSG_LINK,
@@ -1641,7 +1729,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Hard reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC);
@@ -1671,7 +1759,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Soft reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
@@ -1729,6 +1817,14 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* update PFC */
bnx2x_update_pfc_xmac(params, vars, 0);
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
+ } else {
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
+ }
+
/* Enable TX and RX */
val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
@@ -1784,11 +1880,6 @@ static int bnx2x_emac_enable(struct link_params *params,
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- if (CHIP_REV_IS_SLOW(bp)) {
- /* config GMII mode */
- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
- } else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
@@ -1811,7 +1902,6 @@ static int bnx2x_emac_enable(struct link_params *params,
} else
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_FLOW_EN);
- }
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1850,23 +1940,23 @@ static int bnx2x_emac_enable(struct link_params *params,
val &= ~0x810;
EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
- /* enable emac */
+ /* Enable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
- /* enable emac for jumbo packets */
+ /* Enable emac for jumbo packets */
EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
- /* strip CRC */
+ /* Strip CRC */
REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
- /* disable the NIG in/out to the bmac */
+ /* Disable the NIG in/out to the bmac */
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
- /* enable the NIG in/out to the emac */
+ /* Enable the NIG in/out to the emac */
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
if ((params->feature_config_flags &
@@ -1901,7 +1991,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
- /* tx control */
+ /* TX control */
val = 0xc0;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
@@ -1961,7 +2051,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] &= ~(1<<2);
} else {
DP(NETIF_MSG_LINK, "PFC is disabled\n");
- /* disable PFC RX & TX & STATS and set 8 COS */
+ /* Disable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x8;
wb_data[1] = 0;
}
@@ -2055,7 +2145,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2083,7 +2173,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2113,7 +2203,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2131,7 +2221,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2188,7 +2278,7 @@ static void bnx2x_pfc_brb_get_e3b0_config_params(
if (pfc_params->cos0_pauseable !=
pfc_params->cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
+ /* Nonpauseable= Lossy + pauseable = Lossless*/
e3b0_val->lb_guarantied =
PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
@@ -2387,9 +2477,9 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
******************************************************************************/
-int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
- u8 cos_entry,
- u32 priority_mask, u8 port)
+static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
{
u32 nig_reg_rx_priority_mask_add = 0;
@@ -2439,6 +2529,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
port_mb[params->port].link_status), link_status);
}
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (bnx2x_eee_has_cap(params))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2506,7 +2606,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
- /* output enable for RX_XCM # IF */
+ /* Output enable for RX_XCM # IF */
REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
NIG_REG_XCM0_OUT_EN, xcm_out_en);
@@ -2555,10 +2655,10 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_mng(params, vars->link_status);
- /* update NIG params */
+ /* Update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
- /* update BRB params */
+ /* Update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
if (bnx2x_status)
return bnx2x_status;
@@ -2613,7 +2713,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
wb_data, 2);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2622,7 +2722,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
- /* mac control */
+ /* MAC control */
val = 0x3;
if (is_lb) {
val |= 0x4;
@@ -2632,24 +2732,24 @@ static int bnx2x_bmac1_enable(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
- /* set rx mtu */
+ /* Set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
- /* set tx mtu */
+ /* Set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
- /* configure safc */
+ /* Configure SAFC */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
@@ -2683,7 +2783,7 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2702,18 +2802,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
wb_data, 2);
udelay(30);
- /* set rx mtu */
+ /* Set RX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set tx mtu */
+ /* Set TX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@@ -2731,15 +2831,15 @@ static int bnx2x_bmac_enable(struct link_params *params,
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
- /* reset and unreset the BigMac */
+ /* Reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- msleep(1);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- /* enable access for bmac registers */
+ /* Enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
/* Enable BMAC according to BMAC type*/
@@ -2797,7 +2897,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
BIGMAC_REGISTER_BMAC_CONTROL,
wb_data, 2);
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -2809,17 +2909,16 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
u32 init_crd, crd;
u32 count = 1000;
- /* disable port */
+ /* Disable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
- /* wait for init credit */
+ /* Wait for init credit */
init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
while ((init_crd != crd) && count) {
- msleep(5);
-
+ usleep_range(5000, 10000);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
count--;
}
@@ -2836,18 +2935,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
line_speed == SPEED_1000 ||
line_speed == SPEED_2500) {
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
- /* update init credit */
+ /* Update init credit */
init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVREHEAD)/16;
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
- /* update init credit */
+ /* Update init credit */
switch (line_speed) {
case SPEED_10000:
init_crd = thresh + 553 - 22;
@@ -2862,12 +2961,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
line_speed, init_crd);
- /* probe the credit changes */
+ /* Probe the credit changes */
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
- msleep(5);
+ usleep_range(5000, 10000);
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
- /* enable port */
+ /* Enable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
return 0;
}
@@ -2934,7 +3033,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (reg << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -2970,7 +3069,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (reg << 16) |
EMAC_MDIO_COMM_COMMAND_READ_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3008,7 +3107,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3029,7 +3128,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
*ret_val = 0;
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3077,7 +3176,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3097,7 +3196,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3187,23 +3286,23 @@ static int bnx2x_bsc_read(struct link_params *params,
xfer_cnt = 16 - lc_addr;
- /* enable the engine */
+ /* Enable the engine */
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
val |= MCPR_IMC_COMMAND_ENABLE;
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* program slave device ID */
+ /* Program slave device ID */
val = (sl_devid << 16) | sl_addr;
REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
- /* start xfer with 0 byte to update the address pointer ???*/
+ /* Start xfer with 0 byte to update the address pointer ???*/
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_WRITE_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3219,7 +3318,7 @@ static int bnx2x_bsc_read(struct link_params *params,
if (rc == -EFAULT)
return rc;
- /* start xfer with read op */
+ /* Start xfer with read op */
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_READ_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
@@ -3227,7 +3326,7 @@ static int bnx2x_bsc_read(struct link_params *params,
(xfer_cnt);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3330,7 +3429,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
port = port ^ 1;
lane = (port<<1) + path;
- } else { /* two port mode - no port swap */
+ } else { /* Two port mode - no port swap */
/* Figure out path swap value */
path_swap_ovr =
@@ -3408,7 +3507,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
val = SERDES_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3429,7 +3528,7 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
val = XGXS_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3521,7 +3620,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
{
u16 val;
struct bnx2x *bp = params->bp;
- /* read modify write pause advertizing */
+ /* Read modify write pause advertizing */
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
@@ -3656,44 +3755,35 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 val16 = 0, lane, bam37 = 0;
- struct bnx2x *bp = params->bp;
+ u16 val16 = 0, lane, i;
+ struct bnx2x *bp = params->bp;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
+ /* Disable Autoneg: re-enable it after adv is done. */
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
+ };
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
- MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, 0x7415);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
- /* Disable Autoneg: re-enable it after adv is done. */
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Check adding advertisement for 1G KX */
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u16 sd_digital;
+ u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
val16 |= (1<<5);
/* Enable CL37 1G Parallel Detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
- (sd_digital | 0x1));
-
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
DP(NETIF_MSG_LINK, "Advertize 1G\n");
}
if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3703,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
val16 |= (1<<7);
/* Enable 10G Parallel Detect */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
DP(NETIF_MSG_LINK, "Advertize 10G\n");
}
@@ -3737,10 +3827,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
+ 1);
DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
}
@@ -3754,11 +3843,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
}
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, &val16);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
/* Over 1G - AN local device user page 1 */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3775,50 +3861,35 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val;
-
- /* Disable Autoneg */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL3_UP1, 0x1);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
-
- /* Disable CL36 PCS Tx */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
-
- /* Double Wide Single Data Rate @ pll rate */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
-
- /* Leave cl72 training enable, needed for KR */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ u16 i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Disable Autoneg */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3f00},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
+ /* Disable CL36 PCS Tx */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
+ /* Double Wide Single Data Rate @ pll rate */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
+ /* Leave cl72 training enable, needed for KR */
+ {MDIO_PMA_DEVAD,
MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
- 0x2);
+ 0x2}
+ };
+
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Leave CL72 enabled */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- val | 0x3800);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3800);
/* Set speed via PMA/PMD register */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
@@ -3839,7 +3910,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, 0xF9);
- /* set and clear loopback to cause a reset to 64/66 decoder */
+ /* Set and clear loopback to cause a reset to 64/66 decoder */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3854,16 +3925,12 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
/* Hold rxSeqStart */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
/* Hold tx_fifo_reset */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
/* Disable CL73 AN */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
@@ -3875,10 +3942,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
/* Disable 100FX Idle detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0080);
/* Set Block address to Remote PHY & Clear forced_speed[5] */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3939,16 +4004,20 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
tx_driver_val);
/* Enable fiber mode, enable and invert sig_det */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
+
+ /* Enable LPI pass through */
+ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+ MDIO_WC_REG_EEE_COMBO_CONTROL0,
+ 0x7c);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
/* 10G XFI Full Duplex */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4138,40 +4207,35 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
u16 lane)
{
struct bnx2x *bp = params->bp;
- u16 val16;
-
+ u16 i;
+ static struct bnx2x_reg_set wc_regs[] = {
+ {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0x0195},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ 0x0007},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
+ };
/* Set XFI clock comp as default. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, (3<<13));
+
+ for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
+ wc_regs[i].val);
- bnx2x_warpcore_reset_lane(bp, phy, 1);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL1, 0x014a);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, 0x0800);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX_FIR_TAP, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
- bnx2x_warpcore_reset_lane(bp, phy, 0);
+
}
static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
@@ -4259,7 +4323,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
if (!vars->turn_to_run_wc_rt)
return;
- /* return if there is no link partner */
+ /* Return if there is no link partner */
if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
return;
@@ -4293,7 +4357,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_reset_lane(bp, phy, 0);
- /* restart Autoneg */
+ /* Restart Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
@@ -4310,6 +4374,23 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
} /*params->rx_tx_asic_rst*/
}
+static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ struct bnx2x *bp = params->bp;
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
+ SPEED_10000) &&
+ (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
+ }
+}
+
static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4370,19 +4451,11 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
break;
case PORT_HW_CFG_NET_SERDES_IF_SFI:
-
- bnx2x_warpcore_clear_regs(phy, params, lane);
- if (vars->line_speed == SPEED_10000) {
- DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
- bnx2x_warpcore_set_10G_XFI(phy, params, 0);
- } else if (vars->line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(
- phy, params, 1, 0);
- }
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
bnx2x_sfp_module_detection(phy, params);
+
+ bnx2x_warpcore_config_sfi(phy, params);
break;
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
@@ -4499,12 +4572,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
/* Enable 1G MDIO (1-copy) */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- val16 | 0x10);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ 0x10);
/* Set 1G loopback based on lane (1-copy) */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4517,22 +4587,19 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
} else {
/* 10G & 20G */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x4000);
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
}
}
-void bnx2x_sync_link(struct link_params *params,
- struct link_vars *vars)
+
+static void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
@@ -4605,7 +4672,7 @@ void bnx2x_sync_link(struct link_params *params,
USES_WARPCORE(bp) &&
(vars->line_speed == SPEED_1000))
vars->phy_flags |= PHY_SGMII_FLAG;
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
if (link_10g_plus) {
@@ -4619,7 +4686,7 @@ void bnx2x_sync_link(struct link_params *params,
else
vars->mac_type = MAC_TYPE_EMAC;
}
- } else { /* link down */
+ } else { /* Link down */
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
@@ -4628,10 +4695,12 @@ void bnx2x_sync_link(struct link_params *params,
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
+ vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
}
}
@@ -4697,7 +4766,7 @@ static void bnx2x_set_master_ln(struct link_params *params,
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- /* set the master_ln for AN */
+ /* Set the master_ln for AN */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
@@ -4720,7 +4789,7 @@ static int bnx2x_reset_unicore(struct link_params *params,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
- /* reset the unicore */
+ /* Reset the unicore */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4729,11 +4798,11 @@ static int bnx2x_reset_unicore(struct link_params *params,
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
- /* wait for the reset to self clear */
+ /* Wait for the reset to self clear */
for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
udelay(5);
- /* the reset erased the previous bank value */
+ /* The reset erased the previous bank value */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4951,7 +5020,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
-/* program SerDes, forced speed */
+/* Program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4959,7 +5028,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 reg_val;
- /* program duplex, disable autoneg and sgmii*/
+ /* Program duplex, disable autoneg and sgmii*/
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -4978,7 +5047,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
- /* clearing the speed value before setting the right speed */
+ /* Clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
@@ -5007,7 +5076,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
- /* set extended capabilities */
+ /* Set extended capabilities */
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
@@ -5027,7 +5096,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 val;
- /* for AN, we are always publishing full duplex */
+ /* For AN, we are always publishing full duplex */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
@@ -5089,14 +5158,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 control1;
- /* in SGMII mode, the unicore is always slave */
+ /* In SGMII mode, the unicore is always slave */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
&control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
- /* set sgmii mode (and not fiber) */
+ /* Set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
@@ -5105,9 +5174,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
control1);
- /* if forced speed */
+ /* If forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
- /* set speed, disable autoneg */
+ /* Set speed, disable autoneg */
u16 mii_control;
CL22_RD_OVER_CL45(bp, phy,
@@ -5128,16 +5197,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
break;
case SPEED_10:
- /* there is nothing to set for 10M */
+ /* There is nothing to set for 10M */
break;
default:
- /* invalid speed for SGMII */
+ /* Invalid speed for SGMII */
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
vars->line_speed);
break;
}
- /* setting the full duplex */
+ /* Setting the full duplex */
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
@@ -5147,7 +5216,7 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
mii_control);
} else { /* AN mode */
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, 0);
}
}
@@ -5243,7 +5312,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* resolve from gp_status in case of AN complete and not sgmii */
+ /* Resolve from gp_status in case of AN complete and not sgmii */
if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
/* Update the advertised flow-controled of LD/LP in AN */
if (phy->req_line_speed == SPEED_AUTO_NEG)
@@ -5467,7 +5536,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
bnx2x_xgxs_an_resolve(phy, params, vars,
gp_status);
}
- } else { /* link_down */
+ } else { /* Link_down */
if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
@@ -5616,12 +5685,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 tx_driver;
u16 bank;
- /* read precomp */
+ /* Read precomp */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP2, &lp_up2);
- /* bits [10:7] at lp_up2, positioned at [15:12] */
+ /* Bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
@@ -5635,7 +5704,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
bank,
MDIO_TX0_TX_DRIVER, &tx_driver);
- /* replace tx_driver bits [15:12] */
+ /* Replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
@@ -5731,16 +5800,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
bnx2x_set_preemphasis(phy, params);
- /* forced speed requested? */
+ /* Forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
- /* disable autoneg */
+ /* Disable autoneg */
bnx2x_set_autoneg(phy, params, vars, 0);
- /* program speed and duplex */
+ /* Program speed and duplex */
bnx2x_program_serdes(phy, params, vars);
} else { /* AN_mode */
@@ -5749,14 +5818,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
/* AN enabled */
bnx2x_set_brcm_cl37_advertisement(phy, params);
- /* program duplex & pause advertisement (for aneg) */
+ /* Program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisement(phy, params,
vars->ieee_fc);
- /* enable autoneg */
+ /* Enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, enable_cl73);
}
@@ -5792,12 +5861,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0);
- /* reset the SerDes and wait for reset bit return low */
- if (rc != 0)
+ /* Reset the SerDes and wait for reset bit return low */
+ if (rc)
return rc;
bnx2x_set_aer_mmd(params, phy);
- /* setting the masterLn_def again after the reset */
+ /* Setting the masterLn_def again after the reset */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
bnx2x_set_master_ln(params, phy);
bnx2x_set_swap_lanes(params, phy);
@@ -5822,7 +5891,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &ctrl);
if (!(ctrl & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt == 1000)
@@ -6053,7 +6122,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
if (!CHIP_IS_E3(bp)) {
- /* change the uni_phy_addr in the nig */
+ /* Change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
port*0x18));
@@ -6073,11 +6142,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
(MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
0x6041);
msleep(200);
- /* set aer mmd back */
+ /* Set aer mmd back */
bnx2x_set_aer_mmd(params, phy);
if (!CHIP_IS_E3(bp)) {
- /* and md_devad */
+ /* And md_devad */
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
md_devad);
}
@@ -6274,7 +6343,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
- /* link is up only if both local phy and external phy are up */
+ /* Link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
}
@@ -6295,7 +6364,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
serdes_phy_type = ((params->phy[phy_index].media_type ==
- ETH_PHY_SFP_FIBER) ||
+ ETH_PHY_SFPP_10G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_1G_FIBER) ||
(params->phy[phy_index].media_type ==
ETH_PHY_XFP_FIBER) ||
(params->phy[phy_index].media_type ==
@@ -6396,7 +6467,7 @@ static int bnx2x_link_initialize(struct link_params *params,
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
- /* reset the SerDes/XGXS */
+ /* Reset the SerDes/XGXS */
REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
(0x1ff << (params->port*16)));
}
@@ -6429,10 +6500,10 @@ static int bnx2x_update_link_down(struct link_params *params,
DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
- /* update shared memory */
+ /* Update shared memory */
vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
LINK_STATUS_LINK_UP |
LINK_STATUS_PHYSICAL_LINK_FLAG |
@@ -6445,15 +6516,15 @@ static int bnx2x_update_link_down(struct link_params *params,
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
- /* reset BigMac/Xmac */
+ usleep_range(10000, 20000);
+ /* Reset BigMac/Xmac */
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)) {
bnx2x_bmac_rx_disable(bp, params->port);
@@ -6462,6 +6533,16 @@ static int bnx2x_update_link_down(struct link_params *params,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
}
if (CHIP_IS_E3(bp)) {
+ /* Prevent LPI Generation by chip */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
+ 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
+ 0);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
@@ -6501,6 +6582,16 @@ static int bnx2x_update_link_up(struct link_params *params,
bnx2x_umac_enable(params, vars, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
+
+ if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
+ (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
+ DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
+ (params->port << 2), 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
+ (params->port << 2), 0xfc20);
+ }
}
if ((CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) {
@@ -6533,12 +6624,12 @@ static int bnx2x_update_link_up(struct link_params *params,
rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
- /* disable drain */
+ /* Disable drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
- /* update shared memory */
+ /* Update shared memory */
bnx2x_update_mng(params, vars->link_status);
-
+ bnx2x_update_mng_eee(params, vars->eee_status);
/* Check remote fault */
for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
@@ -6582,6 +6673,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
phy_vars[phy_index].phy_link_up = 0;
phy_vars[phy_index].link_up = 0;
phy_vars[phy_index].fault_detected = 0;
+ /* different consideration, since vars holds inner state */
+ phy_vars[phy_index].eee_status = vars->eee_status;
}
if (USES_WARPCORE(bp))
@@ -6602,7 +6695,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -6711,6 +6804,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars->link_status |= LINK_STATUS_SERDES_LINK;
else
vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+
+ vars->eee_status = phy_vars[active_external_phy].eee_status;
+
DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
active_external_phy);
}
@@ -6744,11 +6840,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
} else if (prev_line_speed != vars->line_speed) {
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
0);
- msleep(1);
+ usleep_range(1000, 2000);
}
}
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
bnx2x_link_int_ack(params, vars, link_10g_plus);
@@ -6814,7 +6910,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
@@ -6911,7 +7007,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_REG_GEN_CTRL,
0x0001);
- /* ucode reboot and rst */
+ /* Ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
@@ -6955,7 +7051,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- msleep(1);
+ usleep_range(1000, 2000);
} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
((fw_msgout & 0xff) != 0x03 && (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7049,11 +7145,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
"XAUI workaround has completed\n");
return 0;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
break;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
return -EINVAL;
@@ -7127,7 +7223,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
bnx2x_cl45_write(bp, phy,
@@ -7275,7 +7371,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
- /* clear the interrupt LASI status register */
+ /* Clear the interrupt LASI status register */
bnx2x_cl45_read(bp, phy,
MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
bnx2x_cl45_read(bp, phy,
@@ -7600,7 +7696,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
u16 i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7654,11 +7750,33 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
}
+static void bnx2x_warpcore_power_module(struct link_params *params,
+ struct bnx2x_phy *phy,
+ u8 power)
+{
+ u32 pin_cfg;
+ struct bnx2x *bp = params->bp;
+
+ pin_cfg = (REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+ PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+ PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+ if (pin_cfg == PIN_CFG_NA)
+ return;
+ DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+ power, pin_cfg);
+ /* Low ==> corresponding SFP+ module is powered
+ * high ==> the SFP+ module is powered down
+ */
+ bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params,
u16 addr, u8 byte_cnt,
@@ -7669,7 +7787,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
u32 data_array[4];
u16 addr32;
struct bnx2x *bp = params->bp;
- if (byte_cnt > 16) {
+
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 16 bytes\n");
return -EINVAL;
@@ -7678,6 +7797,12 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* 4 byte aligned address */
addr32 = addr & (~0x3);
do {
+ if (cnt == I2C_WA_PWR_ITER) {
+ bnx2x_warpcore_power_module(params, phy, 0);
+ /* Note that 100us are not enough here */
+ usleep_range(1000,1000);
+ bnx2x_warpcore_power_module(params, phy, 1);
+ }
rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
data_array);
} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
@@ -7699,7 +7824,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val, i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7736,7 +7861,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Wait appropriate time for two-wire command to finish before
* polling the status register
*/
- msleep(1);
+ usleep_range(1000, 2000);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
@@ -7772,7 +7897,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
@@ -7782,7 +7907,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
- int rc = -EINVAL;
+ int rc = -EOPNOTSUPP;
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -7807,7 +7932,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 val, check_limiting_mode = 0;
+ u8 val[2], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED;
@@ -7815,13 +7940,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_CON_TYPE_ADDR,
- 1,
- &val) != 0) {
+ 2,
+ (u8 *)val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
- switch (val) {
+ switch (val[0]) {
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
@@ -7859,13 +7984,29 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
- phy->media_type = ETH_PHY_SFP_FIBER;
- DP(NETIF_MSG_LINK, "Optic module detected\n");
check_limiting_mode = 1;
+ if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
+ SFP_EEPROM_COMP_CODE_LR_MASK |
+ SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ phy->media_type = ETH_PHY_SFP_1G_FIBER;
+ phy->req_line_speed = SPEED_1000;
+ } else {
+ int idx, cfg_idx = 0;
+ DP(NETIF_MSG_LINK, "10G Optic module detected\n");
+ for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
+ if (params->phy[idx].type == phy->type) {
+ cfg_idx = LINK_CONFIG_IDX(idx);
+ break;
+ }
+ }
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+ phy->req_line_speed = params->req_line_speed[cfg_idx];
+ }
break;
default:
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
- val);
+ val[0]);
return -EINVAL;
}
sync_offset = params->shmem_base +
@@ -7951,7 +8092,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
return 0;
}
- /* format the warning message */
+ /* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_VENDOR_NAME_ADDR,
@@ -7997,7 +8138,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
timeout * 5);
return 0;
}
- msleep(5);
+ usleep_range(5000, 10000);
}
return -EINVAL;
}
@@ -8200,29 +8341,6 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
}
-static void bnx2x_warpcore_power_module(struct link_params *params,
- struct bnx2x_phy *phy,
- u8 power)
-{
- u32 pin_cfg;
- struct bnx2x *bp = params->bp;
-
- pin_cfg = (REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
- PORT_HW_CFG_E3_PWR_DIS_MASK) >>
- PORT_HW_CFG_E3_PWR_DIS_SHIFT;
-
- if (pin_cfg == PIN_CFG_NA)
- return;
- DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
- power, pin_cfg);
- /* Low ==> corresponding SFP+ module is powered
- * high ==> the SFP+ module is powered down
- */
- bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
-}
-
static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
@@ -8332,7 +8450,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
} else if (bnx2x_verify_sfp_module(phy, params) != 0) {
- /* check SFP+ module compatibility */
+ /* Check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
@@ -8395,14 +8513,34 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_power_sfp_module(params, phy, 1);
bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
gpio_port);
- if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
bnx2x_sfp_module_detection(phy, params);
- else
+ if (CHIP_IS_E3(bp)) {
+ u16 rx_tx_in_reset;
+ /* In case WC is out of reset, reconfigure the
+ * link speed while taking into account 1G
+ * module limitation.
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6,
+ &rx_tx_in_reset);
+ if (!rx_tx_in_reset) {
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_config_sfi(phy, params);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ }
+ }
+ } else {
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ }
} else {
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -8463,7 +8601,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
MDIO_PMA_LASI_TXCTRL);
- /* clear LASI indication*/
+ /* Clear LASI indication*/
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
bnx2x_cl45_read(bp, phy,
@@ -8531,7 +8669,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
if (val)
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
if ((params->feature_config_flags &
@@ -8660,7 +8798,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 150ms for microcode load */
+ /* Wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
@@ -8854,6 +8992,63 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
+static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 tmp1, val;
+ /* Set option 1G speed */
+ if ((phy->req_line_speed == SPEED_1000) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /* Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /* Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+}
+
static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -8871,7 +9066,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
rx_alarm_ctrl_val);
@@ -8923,56 +9118,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
- /* Set option 1G speed */
- if (phy->req_line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /* Power down the XAUI until link is up in case of dual-media
- * and 1G
- */
- if (DUAL_MEDIA(params)) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, &val);
- val |= (3<<10);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, val);
- }
- } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
-
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
- } else {
- /* Since the 8727 has only single reset pin, need to set the 10G
- * registers although it is default
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
- 0x0020);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
- 0x0008);
- }
-
+ bnx2x_8727_config_speed(phy, params);
/* Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
@@ -9099,6 +9245,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+
+ /* Reconfigure link speed based on module type limitations */
+ bnx2x_8727_config_speed(phy, params);
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
@@ -9579,9 +9728,9 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
u16 fw_cmd,
- u16 cmd_args[])
+ u16 cmd_args[], int argc)
{
- u32 idx;
+ int idx;
u16 val;
struct bnx2x *bp = params->bp;
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
@@ -9593,7 +9742,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
MDIO_84833_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (idx >= PHY84833_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9601,7 +9750,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
}
/* Prepare argument(s) and issue command */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
cmd_args[idx]);
@@ -9614,7 +9763,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if ((idx >= PHY84833_CMDHDLR_WAIT) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9622,7 +9771,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
return -EINVAL;
}
/* Gather returning data */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
@@ -9656,7 +9805,7 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
data[1] = (u16)pair_swap;
status = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data);
+ PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -9734,6 +9883,95 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
+static int bnx2x_8483x_eee_timers(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 eee_idle = 0, eee_mode;
+ struct bnx2x *bp = params->bp;
+
+ eee_idle = bnx2x_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+ DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+ return -EINVAL;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+ return -EINVAL;
+ vars->eee_status |= eee_mode;
+ }
+
+ return 0;
+}
+
+static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 0;
+
+ DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
+
+ /* Make Certain LPI is disabled */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+
+ /* Prevent Phy from working in EEE and advertising it */
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE disable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return 0;
+}
+
+static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 1;
+
+ DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE enable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
+
+ /* Mask events preventing LPI generation */
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return 0;
+}
+
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9746,9 +9984,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
- msleep(1);
+ usleep_range(1000, 2000);
- if (!(CHIP_IS_E1(bp)))
+ if (!(CHIP_IS_E1x(bp)))
port = BP_PATH(bp);
else
port = params->port;
@@ -9833,8 +10071,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args);
- if (rc != 0)
+ PHY84833_CMD_SET_EEE_MODE, cmd_args,
+ PHY84833_CMDHDLR_MAX_ARGS);
+ if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
if (initialize)
@@ -9858,6 +10097,48 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_CTL_REG_84823_USER_CTRL_REG, val);
}
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_FW_REV, &val);
+
+ /* Configure EEE support */
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
+ phy->flags |= FLAGS_EEE_10GBT;
+ vars->eee_status |= SHMEM_EEE_10G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+ /* Propogate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ rc = bnx2x_8483x_eee_timers(params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_8483x_disable_eee(phy, params, vars);
+ return rc;
+ }
+
+ if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
+ (params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
+ rc = bnx2x_8483x_enable_eee(phy, params, vars);
+ else
+ rc = bnx2x_8483x_disable_eee(phy, params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
+ return rc;
+ }
+ } else {
+ phy->flags &= ~FLAGS_EEE_10GBT;
+ vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+ }
+
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read(bp, phy,
@@ -9912,17 +10193,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
legacy_status);
link_up = ((legacy_status & (1<<11)) == (1<<11));
- if (link_up) {
- legacy_speed = (legacy_status & (3<<9));
- if (legacy_speed == (0<<9))
- vars->line_speed = SPEED_10;
- else if (legacy_speed == (1<<9))
- vars->line_speed = SPEED_100;
- else if (legacy_speed == (2<<9))
- vars->line_speed = SPEED_1000;
- else /* Should not happen */
- vars->line_speed = 0;
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else { /* Should not happen: Treat as link down */
+ vars->line_speed = 0;
+ link_up = 0;
+ }
+ if (link_up) {
if (legacy_status & (1<<8))
vars->duplex = DUPLEX_FULL;
else
@@ -9950,7 +10233,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
}
}
if (link_up) {
- DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
vars->line_speed);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
@@ -9989,6 +10272,31 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
if (val & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ /* Determine if EEE was negotiated */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ u32 eee_shmem = 0;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_EEE_ADV, &val2);
+ if ((val1 & val2) & 0x8) {
+ DP(NETIF_MSG_LINK, "EEE negotiated\n");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+
+ if (val2 & 0x12)
+ eee_shmem |= SHMEM_EEE_100M_ADV;
+ if (val2 & 0x4)
+ eee_shmem |= SHMEM_EEE_1G_ADV;
+ if (val2 & 0x68)
+ eee_shmem |= SHMEM_EEE_10G_ADV;
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (eee_shmem <<
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ }
}
return link_up;
@@ -10267,7 +10575,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
u32 cfg_pin;
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* This works with E3 only, no need to check the chip
* before determining the port.
@@ -10336,7 +10644,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
- /* read all advertisement */
+ /* Read all advertisement */
bnx2x_cl22_read(bp, phy,
0x09,
&an_1000_val);
@@ -10373,7 +10681,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* set 100 speed advertisement */
+ /* Set 100 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
@@ -10387,7 +10695,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 100M\n");
}
- /* set 10 speed advertisement */
+ /* Set 10 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
@@ -10526,7 +10834,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
/* Get speed operation status */
bnx2x_cl22_read(bp, phy,
- 0x19,
+ MDIO_REG_GPHY_AUX_STATUS,
&legacy_status);
DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
@@ -10753,7 +11061,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up print the AN outcome of the SFX7101 PHY */
+ /* If link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -10765,7 +11073,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
- /* read LP advertised speeds */
+ /* Read LP advertised speeds */
if (val2 & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
@@ -11084,7 +11392,7 @@ static struct bnx2x_phy phy_8706 = {
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_SFP_FIBER,
+ .media_type = ETH_PHY_SFPP_10G_FIBER,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
@@ -11243,7 +11551,8 @@ static struct bnx2x_phy phy_84833 = {
.def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL |
- FLAGS_TX_ERROR_CHECK),
+ FLAGS_TX_ERROR_CHECK |
+ FLAGS_EEE_10GBT),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11422,7 +11731,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause);
- phy->media_type = ETH_PHY_SFP_FIBER;
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
break;
case PORT_HW_CFG_NET_SERDES_IF_KR:
phy->media_type = ETH_PHY_KR;
@@ -11962,7 +12271,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
@@ -12011,6 +12320,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
break;
}
bnx2x_update_mng(params, vars->link_status);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
return 0;
}
@@ -12020,19 +12331,22 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
- /* disable attentions */
+ /* Disable attentions */
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable nig egress interface */
+ /* Disable nig egress interface */
if (!CHIP_IS_E3(bp)) {
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
@@ -12045,15 +12359,15 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
+ usleep_range(10000, 20000);
/* The PHY reset is controlled by GPIO 1
* Hold it as vars low
*/
- /* clear link led */
+ /* Clear link led */
bnx2x_set_mdio_clk(bp, params->chip_id, port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
@@ -12083,9 +12397,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
- /* disable nig ingress interface */
+ /* Disable nig ingress interface */
if (!CHIP_IS_E3(bp)) {
- /* reset BigMac */
+ /* Reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
@@ -12142,7 +12456,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "populate_phy failed\n");
return -EINVAL;
}
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -12216,7 +12530,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
- msleep(15);
+ usleep_range(15000, 30000);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
@@ -12248,7 +12562,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
bnx2x_ext_phy_hw_reset(bp, 0);
- msleep(5);
+ usleep_range(5000, 10000);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
@@ -12355,11 +12669,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Initiate PHY reset*/
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(5);
+ usleep_range(5000, 10000);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
@@ -12453,7 +12767,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &val);
if (!(val & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt >= 1500) {
DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12543,7 +12857,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
}
- if (rc != 0)
+ if (rc)
netdev_err(bp->dev, "Warning: PHY was not initialized,"
" Port %d\n",
0);
@@ -12624,30 +12938,41 @@ static void bnx2x_check_over_curr(struct link_params *params,
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
}
-static void bnx2x_analyze_link_error(struct link_params *params,
- struct link_vars *vars, u32 lss_status,
- u8 notify)
+/* Returns 0 if no change occured since last check; 1 otherwise. */
+static u8 bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 status,
+ u32 phy_flag, u32 link_flag, u8 notify)
{
struct bnx2x *bp = params->bp;
/* Compare new value with previous value */
u8 led_mode;
- u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+ u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
- if ((lss_status ^ half_open_conn) == 0)
- return;
+ if ((status ^ old_status) == 0)
+ return 0;
/* If values differ */
- DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
- half_open_conn, lss_status);
+ switch (phy_flag) {
+ case PHY_HALF_OPEN_CONN_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
+ break;
+ case PHY_SFP_TX_FAULT_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
+ }
+ DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
+ old_status, status);
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
- if (lss_status) {
- DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+ if (status) {
vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_status |= link_flag;
vars->link_up = 0;
- vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags |= phy_flag;
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
@@ -12656,10 +12981,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
*/
led_mode = LED_MODE_OFF;
} else {
- DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status &= ~link_flag;
vars->link_up = 1;
- vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags &= ~phy_flag;
led_mode = LED_MODE_OPER;
/* Clear nig drain */
@@ -12676,6 +13001,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
if (notify)
bnx2x_notify_link_changed(bp);
+
+ return 1;
}
/******************************************************************************
@@ -12717,7 +13044,9 @@ int bnx2x_check_half_open_conn(struct link_params *params,
if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
lss_status = 1;
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
/* Check E1X / E2 BMAC */
@@ -12734,11 +13063,55 @@ int bnx2x_check_half_open_conn(struct link_params *params,
REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
lss_status = (wb_data[0] > 0);
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
}
return 0;
}
+static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin, value = 0;
+ u8 led_change, port = params->port;
+
+ /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
+ cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_TX_FAULT_MASK) >>
+ PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
+ DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
+ return;
+ }
+
+ led_change = bnx2x_analyze_link_error(params, vars, value,
+ PHY_SFP_TX_FAULT_FLAG,
+ LINK_STATUS_SFP_TX_FAULT, 1);
+
+ if (led_change) {
+ /* Change TX_Fault led, set link status for further syncs */
+ u8 led_mode;
+
+ if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
+ led_mode = MISC_REGISTERS_GPIO_HIGH;
+ vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
+ } else {
+ led_mode = MISC_REGISTERS_GPIO_LOW;
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ }
+
+ /* If module is unapproved, led should be on regardless */
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+ DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
+ led_mode);
+ bnx2x_set_e3_module_fault_led(params, led_mode);
+ }
+ }
+}
void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{
u16 phy_idx;
@@ -12757,7 +13130,26 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
bnx2x_check_over_curr(params, vars);
- bnx2x_warpcore_config_runtime(phy, params, vars);
+ if (vars->rx_tx_asic_rst)
+ bnx2x_warpcore_config_runtime(phy, params, vars);
+
+ if ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg))
+ & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SFI) {
+ if (bnx2x_is_sfp_module_plugged(phy, params)) {
+ bnx2x_sfp_tx_fault_detection(phy, params, vars);
+ } else if (vars->link_status &
+ LINK_STATUS_SFP_TX_FAULT) {
+ /* Clean trail, interrupt corrects the leds */
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ }
+ }
+
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ea4371f..51cac81 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,7 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
@@ -125,6 +126,11 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
struct link_params *params, u32 action);
+struct bnx2x_reg_set {
+ u8 devad;
+ u16 reg;
+ u16 val;
+};
struct bnx2x_phy {
u32 type;
@@ -149,6 +155,7 @@ struct bnx2x_phy {
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
+#define FLAGS_EEE_10GBT (1<<13)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -162,14 +169,15 @@ struct bnx2x_phy {
u32 supported;
u32 media_type;
-#define ETH_PHY_UNSPECIFIED 0x0
-#define ETH_PHY_SFP_FIBER 0x1
-#define ETH_PHY_XFP_FIBER 0x2
-#define ETH_PHY_DA_TWINAX 0x3
-#define ETH_PHY_BASE_T 0x4
-#define ETH_PHY_KR 0xf0
-#define ETH_PHY_CX4 0xf1
-#define ETH_PHY_NOT_PRESENT 0xff
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFPP_10G_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_SFP_1G_FIBER 0x5
+#define ETH_PHY_KR 0xf0
+#define ETH_PHY_CX4 0xf1
+#define ETH_PHY_NOT_PRESENT 0xff
/* The address in which version is located*/
u32 ver_addr;
@@ -265,6 +273,30 @@ struct link_params {
u8 num_phys;
u8 rsrv;
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28 -
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * microseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in microseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
+#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
+#define EEE_MODE_NVRAM_MASK (0x3)
+#define EEE_MODE_TIMER_MASK (0xfffff)
+#define EEE_MODE_OUTPUT_TIME (1<<28)
+#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
+#define EEE_MODE_ENABLE_LPI (1<<30)
+#define EEE_MODE_ADV_LPI (1<<31)
+
u16 hw_led_mode; /* part of the hw_config read from the shmem */
u32 multi_phy_config;
@@ -282,6 +314,7 @@ struct link_vars {
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
+#define PHY_SFP_TX_FAULT_FLAG (1<<5)
u8 mac_type;
#define MAC_TYPE_NONE 0
@@ -301,6 +334,7 @@ struct link_vars {
/* The same definitions as the shmem parameter */
u32 link_status;
+ u32 eee_status;
u8 fault_detected;
u8 rsrv1;
u16 periodic_flags;
@@ -459,8 +493,7 @@ struct bnx2x_ets_params {
struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
};
-/**
- * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
int bnx2x_update_pfc(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f755a66..9aaf863 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,6 +74,8 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -104,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-static int int_mode;
+int int_mode;
module_param(int_mode, int, 0);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
@@ -135,7 +137,10 @@ enum bnx2x_board_type {
BCM57800_MF,
BCM57810,
BCM57810_MF,
- BCM57840,
+ BCM57840_O,
+ BCM57840_4_10,
+ BCM57840_2_20,
+ BCM57840_MFO,
BCM57840_MF,
BCM57811,
BCM57811_MF
@@ -155,6 +160,9 @@ static struct {
{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
@@ -187,8 +195,17 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57810_MF
#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
#endif
-#ifndef PCI_DEVICE_ID_NX2_57840
-#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
+#ifndef PCI_DEVICE_ID_NX2_57840_O
+#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_4_10
+#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_2_20
+#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MFO
+#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
@@ -209,7 +226,10 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
- { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
@@ -758,7 +778,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Tx */
for_each_cos_in_tx_queue(fp, cos)
{
- txdata = fp->txdata[cos];
+ txdata = *fp->txdata_ptr[cos];
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -876,7 +896,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -1583,7 +1603,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
- struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+ struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
DP(BNX2X_MSG_SP,
"fp %d cid %d got ramrod #%d state is %x type is %d\n",
@@ -1710,7 +1730,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
/* Handle Rx or Tx according to SB id */
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
@@ -2124,6 +2144,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
}
}
+ if (load_mode == LOAD_LOOPBACK_EXT) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_EXT;
+ }
+
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -2916,7 +2941,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
u8 cos)
{
- txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
@@ -3030,9 +3055,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
memcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN - 1);
- bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
+ bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
ether_stat->mtu_size = bp->dev->mtu;
@@ -3055,7 +3080,8 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_stats_info *fcoe_stat =
&bp->slowpath->drv_info_to_mcp.fcoe_stat;
- memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3063,11 +3089,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
/* insert FCoE stats from ramrod response */
if (!NO_FCOE(bp)) {
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
tstorm_queue_statistics;
struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
xstorm_queue_statistics;
struct fcoe_statistics_params *fw_fcoe_stat =
@@ -3146,7 +3172,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
struct iscsi_stats_info *iscsi_stat =
&bp->slowpath->drv_info_to_mcp.iscsi_stat;
- memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -3176,6 +3203,12 @@ static void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_eee_event(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
@@ -3742,6 +3775,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_AFEX_EVENT_MASK)
bnx2x_handle_afex_cmd(bp,
val & DRV_STATUS_AFEX_EVENT_MASK);
+ if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+ bnx2x_handle_eee_event(bp);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
@@ -4615,11 +4650,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
#ifdef BCM_CNIC
- if (cid == BNX2X_ISCSI_ETH_CID)
+ if (cid == BNX2X_ISCSI_ETH_CID(bp))
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
else
#endif
- vlan_mac_obj = &bp->fp[cid].mac_obj;
+ vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
case BNX2X_FILTER_MCAST_PENDING:
@@ -4717,7 +4752,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
for_each_eth_queue(bp, q) {
/* Set the appropriate Queue object */
fp = &bp->fp[q];
- queue_params.q_obj = &fp->q_obj;
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* send the ramrod */
rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4728,8 +4763,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp)) {
- fp = &bp->fp[FCOE_IDX];
- queue_params.q_obj = &fp->q_obj;
+ fp = &bp->fp[FCOE_IDX(bp)];
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* clear pending completion bit */
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
@@ -4761,11 +4796,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
{
DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
#ifdef BCM_CNIC
- if (cid == BNX2X_FCOE_ETH_CID)
- return &bnx2x_fcoe(bp, q_obj);
+ if (cid == BNX2X_FCOE_ETH_CID(bp))
+ return &bnx2x_fcoe_sp_obj(bp, q_obj);
else
#endif
- return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+ return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
}
static void bnx2x_eq_int(struct bnx2x *bp)
@@ -5647,15 +5682,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init tx data */
for_each_cos_in_tx_queue(fp, cos) {
- bnx2x_init_txdata(bp, &fp->txdata[cos],
- CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
- FP_COS_TO_TXQ(fp, cos),
- BNX2X_TX_SB_INDEX_BASE + cos);
- cids[cos] = fp->txdata[cos].cid;
+ bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
+ FP_COS_TO_TXQ(fp, cos, bp),
+ BNX2X_TX_SB_INDEX_BASE + cos, fp);
+ cids[cos] = fp->txdata_ptr[cos]->cid;
}
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
+ fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
/**
@@ -5706,7 +5741,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
for_each_tx_queue(bp, i)
for_each_cos_in_tx_queue(&bp->fp[i], cos)
- bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
@@ -7055,12 +7090,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
- ilt->lines[cdu_ilt_start + i].page =
- bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
- /* cdu ilt pages are allocated manually so there's no need to
- set the size */
+ bp->context[i].cxt_mapping;
+ ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
bnx2x_ilt_init_op(bp, INITOP_SET);
@@ -7327,6 +7360,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
void bnx2x_free_mem(struct bnx2x *bp)
{
+ int i;
+
/* fastpath */
bnx2x_free_fp_mem(bp);
/* end of fastpath */
@@ -7340,9 +7375,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
- bp->context.size);
-
+ for (i = 0; i < L2_ILT_LINES(bp); i++)
+ BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
+ bp->context[i].size);
bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
BNX2X_FREE(bp->ilt->lines);
@@ -7428,6 +7463,8 @@ alloc_mem_err:
int bnx2x_alloc_mem(struct bnx2x *bp)
{
+ int i, allocated, context_size;
+
#ifdef BCM_CNIC
if (!CHIP_IS_E1x(bp))
/* size = the status block + ramrod buffers */
@@ -7457,11 +7494,29 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
- bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
-
- BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
- bp->context.size);
+ /* Allocate memory for CDU context:
+ * This memory is allocated separately and not in the generic ILT
+ * functions because CDU differs in few aspects:
+ * 1. There are multiple entities allocating memory for context -
+ * 'regular' driver, CNIC and SRIOV driver. Each separately controls
+ * its own ILT lines.
+ * 2. Since CDU page-size is not a single 4KB page (which is the case
+ * for the other ILT clients), to be efficient we want to support
+ * allocation of sub-page-size in the last entry.
+ * 3. Context pointers are used by the driver to pass to FW / update
+ * the context (for the other ILT clients the pointers are used just to
+ * free the memory during unload).
+ */
+ context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+ for (i = 0, allocated = 0; allocated < context_size; i++) {
+ bp->context[i].size = min(CDU_ILT_PAGE_SZ,
+ (context_size - allocated));
+ BNX2X_PCI_ALLOC(bp->context[i].vcxt,
+ &bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ allocated += bp->context[i].size;
+ }
BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -7563,8 +7618,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
/* Eth MAC is set on RSS leading client (fp[0]) */
- return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
- BNX2X_ETH_MAC, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
+ set, BNX2X_ETH_MAC, &ramrod_flags);
}
int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7579,7 +7634,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+void bnx2x_set_int_mode(struct bnx2x *bp)
{
switch (int_mode) {
case INT_MODE_MSI:
@@ -7590,11 +7645,6 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* Set number of queues for MSI-X mode */
- bnx2x_set_num_queues(bp);
-
- BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
-
/* if we can't use MSI-X we only need one fp,
* so try to enable MSI-X with the requested number of fp's
* and fallback to MSI or legacy INTx with one fp
@@ -7735,6 +7785,8 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
{
u8 cos;
+ int cxt_index, cxt_offset;
+
/* FCoE Queue uses Default SB, thus has no HC capabilities */
if (!IS_FCOE_FP(fp)) {
__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7771,9 +7823,13 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
fp->index, init_params->max_cos);
/* set the context pointers queue object */
- for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+ cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
+ cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
+ ILT_PAGE_CIDS);
init_params->cxts[cos] =
- &bp->context.vcxt[fp->txdata[cos].cid].eth;
+ &bp->context[cxt_index].vcxt[cxt_offset].eth;
+ }
}
int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7838,7 +7894,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7911,7 +7967,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7922,7 +7978,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
tx_index++){
/* ascertain this is a normal queue*/
- txdata = &fp->txdata[tx_index];
+ txdata = fp->txdata_ptr[tx_index];
DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
txdata->txq_index);
@@ -8289,7 +8345,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos)
- rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
#ifdef BNX2X_STOP_ON_ERROR
if (rc)
return;
@@ -8300,12 +8356,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
usleep_range(1000, 1000);
/* Clean all ETH MACs */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
+ false);
if (rc < 0)
BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
/* Clean up UC list */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
true);
if (rc < 0)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
@@ -9697,6 +9754,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
BC_SUPPORTS_PFC_STATS : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
+ BC_SUPPORTS_FCOE_FEATURES : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
+ BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -10082,7 +10144,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 config;
- u32 ext_phy_type, ext_phy_config;
+ u32 ext_phy_type, ext_phy_config, eee_mode;
bp->link_params.bp = bp;
bp->link_params.port = port;
@@ -10149,6 +10211,19 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
bp->common.shmem_base,
bp->common.shmem2_base);
+
+ /* Configure link feature according to nvram value */
+ eee_mode = (((SHMEM_RD(bp, dev_info.
+ port_feature_config[port].eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+ if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+ bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
+ EEE_MODE_ENABLE_LPI |
+ EEE_MODE_OUTPUT_TIME;
+ } else {
+ bp->link_params.eee_mode = 0;
+ }
}
void bnx2x_get_iscsi_info(struct bnx2x *bp)
@@ -10997,7 +11072,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
int rc;
struct net_device *dev = bp->dev;
struct netdev_hw_addr *ha;
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
unsigned long ramrod_flags = 0;
/* First schedule a cleanup up of old configuration */
@@ -11503,8 +11578,7 @@ static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
}
}
-/**
- * IRO array is stored in the following format:
+/* IRO array is stored in the following format:
* {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
*/
static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
@@ -11672,7 +11746,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
/* must be called after sriov-enable */
static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
- int cid_count = BNX2X_L2_CID_COUNT(bp);
+ int cid_count = BNX2X_L2_MAX_CID(bp);
#ifdef BCM_CNIC
cid_count += CNIC_CID_MAX;
@@ -11717,7 +11791,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
struct bnx2x *bp;
int pcie_width, pcie_speed;
int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count;
+ int rx_count, tx_count, rss_count, doorbell_size;
/*
* An estimated maximum supported CoS number according to the chip
* version.
@@ -11745,7 +11819,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
case BCM57800_MF:
case BCM57810:
case BCM57810_MF:
- case BCM57840:
+ case BCM57840_O:
+ case BCM57840_4_10:
+ case BCM57840_2_20:
+ case BCM57840_MFO:
case BCM57840_MF:
case BCM57811:
case BCM57811_MF:
@@ -11760,13 +11837,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
- /* !!! FIXME !!!
- * Do not allow the maximum SB count to grow above 16
- * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
- * We will use the FP_SB_MAX_E1x macro for this matter.
- */
- max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
-
WARN_ON(!max_non_def_sbs);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -11777,9 +11847,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/*
* Maximum number of netdev Tx queues:
- * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
- tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+ tx_count = rss_count * max_cos_est + FCOE_PRESENT;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11788,9 +11858,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp = netdev_priv(dev);
- BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
-
bp->igu_sb_cnt = max_non_def_sbs;
bp->msg_enable = debug;
pci_set_drvdata(pdev, dev);
@@ -11803,6 +11870,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
+ BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
@@ -11811,9 +11881,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
* Map doorbels here as we need the real value of bp->max_cos which
* is initialized in bnx2x_init_bp().
*/
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- min_t(u64, BNX2X_DB_SIZE(bp),
- pci_resource_len(pdev, 2)));
+ doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -11831,8 +11907,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
#endif
+
+ /* Set bp->num_queues for MSI-X mode*/
+ bnx2x_set_num_queues(bp);
+
/* Configure interrupt mode: try to enable MSI-X/MSI if
- * needed, set bp->num_queues appropriately.
+ * needed.
*/
bnx2x_set_int_mode(bp);
@@ -12176,6 +12256,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
{
struct eth_spe *spe;
+ int cxt_index, cxt_offset;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -12198,10 +12279,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
* ramrod
*/
if (type == ETH_CONNECTION_TYPE) {
- if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
- bnx2x_set_ctx_validation(bp, &bp->context.
- vcxt[BNX2X_ISCSI_ETH_CID].eth,
- BNX2X_ISCSI_ETH_CID);
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
+ cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
+ ILT_PAGE_CIDS;
+ cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
+ (cxt_index * ILT_PAGE_CIDS);
+ bnx2x_set_ctx_validation(bp,
+ &bp->context[cxt_index].
+ vcxt[cxt_offset].eth,
+ BNX2X_ISCSI_ETH_CID(bp));
+ }
}
/*
@@ -12488,21 +12575,45 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
break;
}
case DRV_CTL_ULP_REGISTER_CMD: {
- int ulp_type = ctl->data.ulp_type;
+ int ulp_type = ctl->data.register_data.ulp_type;
if (CHIP_IS_E3(bp)) {
int idx = BP_FW_MB_IDX(bp);
- u32 cap;
+ u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ int path = BP_PATH(bp);
+ int port = BP_PORT(bp);
+ int i;
+ u32 scratch_offset;
+ u32 *host_addr;
- cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ /* first write capability to shmem2 */
if (ulp_type == CNIC_ULP_ISCSI)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
else if (ulp_type == CNIC_ULP_FCOE)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+
+ if ((ulp_type != CNIC_ULP_FCOE) ||
+ (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
+ (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
+ break;
+
+ /* if reached here - should write fcoe capabilities */
+ scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
+ if (!scratch_offset)
+ break;
+ scratch_offset += offsetof(struct glob_ncsi_oem_data,
+ fcoe_features[path][port]);
+ host_addr = (u32 *) &(ctl->data.register_data.
+ fcoe_features);
+ for (i = 0; i < sizeof(struct fcoe_capabilities);
+ i += 4)
+ REG_WR(bp, scratch_offset + i,
+ *(host_addr + i/4));
}
break;
}
+
case DRV_CTL_ULP_UNREGISTER_CMD: {
int ulp_type = ctl->data.ulp_type;
@@ -12554,6 +12665,21 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
cp->num_irq = 2;
}
+void bnx2x_setup_cnic_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+}
+
static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
void *data)
{
@@ -12632,10 +12758,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->drv_ctl = bnx2x_drv_ctl;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
- cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_client_id =
bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
- cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 0000000..ddd5106
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,168 @@
+/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_MFW_REQ_H
+#define BNX2X_MFW_REQ_H
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+ u32 capability1;
+ /* Maximum number of I/Os per connection */
+ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
+ #define FCOE_IOS_PER_CONNECTION_SHIFT 0
+ /* Maximum number of Logins per port */
+ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
+ #define FCOE_LOGINS_PER_PORT_SHIFT 16
+
+ u32 capability2;
+ /* Maximum number of exchanges */
+ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
+ #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
+ /* Maximum NPIV WWN per port */
+ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
+ #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
+
+ u32 capability3;
+ /* Maximum number of targets supported */
+ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
+ #define FCOE_TARGETS_SUPPORTED_SHIFT 0
+ /* Maximum number of outstanding commands across all connections */
+ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
+ #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+ u32 capability4;
+ #define FCOE_CAPABILITY4_STATEFUL 0x00000001
+ #define FCOE_CAPABILITY4_STATELESS 0x00000002
+ #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
+};
+
+struct glob_ncsi_oem_data {
+ u32 driver_version;
+ u32 unused[3];
+ struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
+#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bbd3874..ec62a5c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1488,6 +1488,121 @@
* 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
#define MISC_REG_CHIP_TYPE 0xac60
#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858
+/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
+ * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
+ * 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c
+/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
+ * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0
+/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
+ * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
+ * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
+ * the FW command that all Queues are empty is disabled. When 0 indicates
+ * that the FW command that all Queues are empty is enabled. [2] - FW Early
+ * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
+ * Exit command is disabled. When 0 indicates that the FW Early Exit command
+ * is enabled. This bit applicable only in the EXIT Events Mask registers.
+ * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
+ * is disabled. When 0 indicates that the PBF Request indication is enabled.
+ * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
+ * Request indication is disabled. When 0 indicates that the Tx Other Than
+ * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
+ * indicates that the RX EEE LPI Status indication is disabled. When 0
+ * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
+ * Events Masks registers; this bit masks the falling edge detect of the LPI
+ * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
+ * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
+ * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
+ * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
+ * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
+ * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
+ * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
+ * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
+ * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
+ * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
+ * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
+ * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
+ * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
+ * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
+ * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
+ * indicates that the P0 EEE LPI REQ indication is disabled. When =0
+ * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
+ * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
+ * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
+ * REQ indication is disabled. When =0 indicates that the L1 indication is
+ * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
+ * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
+ * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
+ * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers. [17] - L1
+ * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
+ * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
+ * When =0 indicates that the L1 Status Falling Edge Detect indication from
+ * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
+ * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880
+/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
+ * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
+ * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
+ * that the FW command that all Queues are empty is disabled. When 0
+ * indicates that the FW command that all Queues are empty is enabled. [2] -
+ * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
+ * Early Exit command is disabled. When 0 indicates that the FW Early Exit
+ * command is enabled. This bit applicable only in the EXIT Events Mask
+ * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
+ * indication is disabled. When 0 indicates that the PBF Request indication
+ * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
+ * Than PBF Request indication is disabled. When 0 indicates that the Tx
+ * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
+ * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
+ * When 0 indicates that the RX LPI Status indication is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
+ * indicates that the Tx Pause indication is disabled. When 0 indicates that
+ * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
+ * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
+ * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
+ * indicates that the QM IDLE indication is disabled. When 0 indicates that
+ * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
+ * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
+ * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
+ * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
+ * Status indication from the PCIE CORE is disabled. When 0 indicates that
+ * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
+ * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
+ * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
+ * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
+ * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
+ * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
+ * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
+ * indicates that the L1 REQ indication is disabled. When =0 indicates that
+ * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
+ * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
+ * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
+ * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
+ * LPI is on - off). This bit is applicable only in the EXIT Events Masks
+ * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
+ * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
+ * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
+ * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
+ * Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
/* [RW 32] The following driver registers(1...16) represent 16 drivers and
32 clients. Each client can be controlled by one driver only. One in each
bit represent that this driver control the appropriate client (Ex: bit 5
@@ -5372,6 +5487,8 @@
/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
* packets transmitted by the MAC */
#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_EEE_CTRL 0xd8
+#define XMAC_REG_EEE_TIMERS_HI 0xe4
#define XMAC_REG_PAUSE_CTRL 0x68
#define XMAC_REG_PFC_CTRL 0x70
#define XMAC_REG_PFC_CTRL_HI 0x74
@@ -5796,6 +5913,7 @@
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
@@ -6813,6 +6931,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
/*bcm*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
@@ -6866,6 +6986,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
/* These are mailbox register set used by 84833. */
@@ -6993,11 +7115,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
#define MDIO_WC_REG_TX66_CONTROL 0x83b0
#define MDIO_WC_REG_RX66_CONTROL 0x83c0
#define MDIO_WC_REG_RX66_SCW0 0x83c2
@@ -7036,6 +7160,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
#define MDIO_REG_INTR_STATUS 0x1a
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
@@ -7150,8 +7275,7 @@ Theotherbitsarereservedandshouldbezero*/
#define CDU_REGION_NUMBER_UCM_AG 4
-/**
- * String-to-compress [31:8] = CID (all 24 bits)
+/* String-to-compress [31:8] = CID (all 24 bits)
* String-to-compress [7:4] = Region
* String-to-compress [3:0] = Type
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6c14b4a..734fd87 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4107,6 +4107,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+
if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
@@ -4115,6 +4119,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
/* Hashing mask */
data->rss_result_mask = p->rss_result_mask;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index efd80bd..f83e033 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -167,9 +167,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
struct bnx2x_exeq_elem *elem);
-/**
- * @return positive is entry was optimized, 0 - if not, negative
- * in case of an error.
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
*/
typedef int (*exe_q_optimize)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
@@ -694,8 +693,10 @@ enum {
BNX2X_RSS_IPV4,
BNX2X_RSS_IPV4_TCP,
+ BNX2X_RSS_IPV4_UDP,
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
+ BNX2X_RSS_IPV6_UDP,
};
struct bnx2x_config_rss_params {
@@ -729,6 +730,10 @@ struct bnx2x_rss_config_obj {
/* Last configured indirection table */
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ /* flags for enabling 4-tupple hash on UDP */
+ u8 udp_rss_v4;
+ u8 udp_rss_v6;
+
int (*config_rss)(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
};
@@ -1280,12 +1285,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
struct bnx2x_rx_mode_obj *o);
/**
- * Send and RX_MODE ramrod according to the provided parameters.
+ * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
*
- * @param bp
- * @param p Command parameters
+ * @p: Command parameters
*
- * @return 0 - if operation was successfull and there is no pending completions,
+ * Return: 0 - if operation was successfull and there is no pending completions,
* positive number - if there are pending completions,
* negative - if there were errors
*/
@@ -1302,7 +1306,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Configure multicast MACs list. May configure a new list
+ * bnx2x_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
* provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
* (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
* configuration, continue to execute the pending commands
@@ -1313,11 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * @param bp
- * @param p
- * @param command to execute: BNX2X_MCAST_CMD_X
- *
- * @return 0 is operation was sucessfull and there are no pending completions,
+ * Return: 0 is operation was sucessfull and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
@@ -1342,21 +1346,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Updates RSS configuration according to provided parameters.
- *
- * @param bp
- * @param p
+ * bnx2x_config_rss - Updates RSS configuration according to provided parameters
*
- * @return 0 in case of success
+ * Return: 0 in case of success
*/
int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
/**
- * Return the current ind_table configuration.
+ * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
*
- * @param bp
- * @param ind_table buffer to fill with the current indirection
+ * @ind_table: buffer to fill with the current indirection
* table content. Should be at least
* T_ETH_INDIRECTION_TABLE_SIZE bytes long.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 1e2785c..667d890 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -785,6 +785,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_counter++;
+ if (CHIP_IS_E3(bp))
+ estats->eee_tx_lpi += REG_RD(bp,
+ MISC_REG_CPMU_LP_SM_ENT_CNT_P0);
+
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
@@ -855,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
struct tstorm_per_queue_stats *tclient =
&bp->fw_stats_data->queue_stats[i].
tstorm_queue_statistics;
- struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct tstorm_per_queue_stats *old_tclient =
+ &bnx2x_fp_stats(bp, fp)->old_tclient;
struct ustorm_per_queue_stats *uclient =
&bp->fw_stats_data->queue_stats[i].
ustorm_queue_statistics;
- struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct ustorm_per_queue_stats *old_uclient =
+ &bnx2x_fp_stats(bp, fp)->old_uclient;
struct xstorm_per_queue_stats *xclient =
&bp->fw_stats_data->queue_stats[i].
xstorm_queue_statistics;
- struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct xstorm_per_queue_stats *old_xclient =
+ &bnx2x_fp_stats(bp, fp)->old_xclient;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
u32 diff;
@@ -1048,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
tmp = estats->mac_discard;
- for_each_rx_queue(bp, i)
- tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ for_each_rx_queue(bp, i) {
+ struct tstorm_per_queue_stats *old_tclient =
+ &bp->fp_stats[i].old_tclient;
+ tmp += le32_to_cpu(old_tclient->checksum_discard);
+ }
nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
nstats->tx_dropped = 0;
@@ -1099,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
int i;
for_each_queue(bp, i) {
- struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
struct bnx2x_eth_q_stats_old *qstats_old =
- &bp->fp[i].eth_q_stats_old;
+ &bp->fp_stats[i].eth_q_stats_old;
UPDATE_ESTAT_QSTAT(driver_xoff);
UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
@@ -1309,12 +1321,9 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bnx2x_stats_comp(bp);
}
-/**
- * This function will prepare the statistics ramrod data the way
+/* This function will prepare the statistics ramrod data the way
* we will only have to increment the statistics counter and
* send the ramrod each time we have to.
- *
- * @param bp
*/
static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
@@ -1428,7 +1437,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
cur_query_entry->address.hi =
cpu_to_le32(U64_HI(cur_data_offset));
@@ -1479,15 +1488,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
/* function stats */
for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
-
- memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
- memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
- memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
if (bp->stats_init) {
- memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
- memset(&fp->eth_q_stats_old, 0,
- sizeof(fp->eth_q_stats_old));
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
}
}
@@ -1529,8 +1542,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
/* save queue statistics */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
@@ -1569,7 +1584,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct per_queue_stats *fcoe_q_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX];
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
&fcoe_q_stats->tstorm_queue_statistics;
@@ -1586,8 +1601,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
memset(afex_stats, 0, sizeof(struct afex_stats));
for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
ADD_64(afex_stats->rx_unicast_bytes_hi,
qstats->total_unicast_bytes_received_hi,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 93e689fd..24b8e50 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,8 @@ struct bnx2x_eth_stats {
/* Recovery */
u32 recoverable_error;
u32 unrecoverable_error;
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ u32 eee_tx_lpi;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c95e7b5..3b4fc61 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -256,11 +256,16 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
+ struct fcoe_capabilities *fcoe_cap =
+ &info.data.register_data.fcoe_features;
- if (reg)
+ if (reg) {
info.cmd = DRV_CTL_ULP_REGISTER_CMD;
- else
+ if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
+ memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
+ } else {
info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+ }
info.data.ulp_type = ulp_type;
ethdev->drv_ctl(dev->netdev, &info);
@@ -286,6 +291,9 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
+ if (!cp->ctx_tbl)
+ return -EINVAL;
+
for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
@@ -534,7 +542,8 @@ int cnic_unregister_driver(int ulp_type)
}
if (atomic_read(&ulp_ops->ref_count) != 0)
- netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+ pr_warn("%s: Failed waiting for ref count to go to zero\n",
+ __func__);
return 0;
out_unlock:
@@ -611,6 +620,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (ulp_type == CNIC_ULP_ISCSI)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ else if (ulp_type == CNIC_ULP_FCOE)
+ dev->fcoe_cap = NULL;
synchronize_rcu();
@@ -1053,12 +1064,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo = &udev->cnic_uinfo;
- uinfo->mem[0].addr = dev->netdev->base_addr;
+ uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
uinfo->mem[0].internal_addr = dev->regview;
- uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+ TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1080,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
@@ -2585,7 +2599,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
return;
}
- cqes[0] = (struct kcqe *) &kcqe;
+ cqes[0] = &kcqe;
cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
}
@@ -3213,6 +3227,9 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ break;
+
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
@@ -3943,6 +3960,15 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode);
break;
+ case L5CM_RAMROD_CMD_ID_CLOSE:
+ if (l4kcqe->status != 0) {
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
+ "status 0x%x\n", l4kcqe->status);
+ opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ /* Fall through */
+ } else {
+ break;
+ }
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4246,8 +4272,6 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
int i;
- cp->stop_cm(dev);
-
if (!cp->csk_tbl)
return 0;
@@ -4665,9 +4689,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &sblk->status_completion_producer_index;
+ &sblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
+ cp->kcq1.status_idx_ptr = &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
@@ -4693,9 +4717,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &msblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
- cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
+ &msblk->status_completion_producer_index;
+ cp->kcq1.status_idx_ptr = &msblk->status_idx;
+ cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@ -4977,8 +5001,14 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
+ u32 val;
+ pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
+ cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
+ ME_REG_ABS_PF_NUM_SHIFT);
+ func = CNIC_FUNC(cp);
+
+ val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
else
@@ -5283,6 +5313,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
i++;
}
cnic_shutdown_rings(dev);
+ cp->stop_cm(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -5512,9 +5543,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
rcu_read_unlock();
}
-/**
- * netdev event handler
- */
+/* netdev event handler */
static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 289274e..5cb8888 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -12,8 +12,10 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.10"
-#define CNIC_MODULE_RELDATE "March 21, 2012"
+#include "bnx2x/bnx2x_mfw_req.h"
+
+#define CNIC_MODULE_VERSION "2.5.12"
+#define CNIC_MODULE_RELDATE "June 29, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -131,6 +133,11 @@ struct drv_ctl_l2_ring {
u32 cid;
};
+struct drv_ctl_register_data {
+ int ulp_type;
+ struct fcoe_capabilities fcoe_features;
+};
+
struct drv_ctl_info {
int cmd;
union {
@@ -138,6 +145,7 @@ struct drv_ctl_info {
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
int ulp_type;
+ struct drv_ctl_register_data register_data;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -305,6 +313,7 @@ struct cnic_dev {
int max_rdma_conn;
union drv_info_to_mcp *stats_addr;
+ struct fcoe_capabilities *fcoe_cap;
void *cnic_priv;
};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index edeeb51..fce4c1e 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,6 +44,10 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#if IS_ENABLED(CONFIG_HWMON)
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
#include <net/checksum.h>
#include <net/ip.h>
@@ -298,6 +302,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -730,44 +735,131 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
tg3_ape_write32(tp, gnt + 4 * locknum, bit);
}
-static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
{
- int i;
u32 apedata;
- /* NCSI does not support APE events */
- if (tg3_flag(tp, APE_HAS_NCSI))
- return;
+ while (timeout_us) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+ return -EBUSY;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+ udelay(10);
+ timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
+ }
+
+ return timeout_us ? 0 : -EBUSY;
+}
+
+static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
+{
+ u32 i, apedata;
+
+ for (i = 0; i < timeout_us / 10; i++) {
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ udelay(10);
+ }
+
+ return i == timeout_us / 10;
+}
+
+int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
+{
+ int err;
+ u32 i, bufoff, msgoff, maxlen, apedata;
+
+ if (!tg3_flag(tp, APE_HAS_NCSI))
+ return 0;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
- return;
+ return -ENODEV;
apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
if (!(apedata & APE_FW_STATUS_READY))
- return;
+ return -EAGAIN;
- /* Wait for up to 1 millisecond for APE to service previous event. */
- for (i = 0; i < 10; i++) {
- if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
- return;
+ bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
+ TG3_APE_SHMEM_BASE;
+ msgoff = bufoff + 2 * sizeof(u32);
+ maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
- apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+ while (len) {
+ u32 length;
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
- event | APE_EVENT_STATUS_EVENT_PENDING);
+ /* Cap xfer sizes to scratchpad limits. */
+ length = (len > maxlen) ? maxlen : len;
+ len -= length;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return -EAGAIN;
+
+ /* Wait for up to 1 msec for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 1000);
+ if (err)
+ return err;
+
+ apedata = APE_EVENT_STATUS_DRIVER_EVNT |
+ APE_EVENT_STATUS_SCRTCHPD_READ |
+ APE_EVENT_STATUS_EVENT_PENDING;
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
+
+ tg3_ape_write32(tp, bufoff, base_off);
+ tg3_ape_write32(tp, bufoff + sizeof(u32), length);
tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- break;
+ base_off += length;
- udelay(100);
+ if (tg3_ape_wait_for_event(tp, 30000))
+ return -EAGAIN;
+
+ for (i = 0; length; i += 4, length -= 4) {
+ u32 val = tg3_ape_read32(tp, msgoff + i);
+ memcpy(data, &val, sizeof(u32));
+ data++;
+ }
}
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+ return 0;
+}
+
+static int tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+ int err;
+ u32 apedata;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return -EAGAIN;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return -EAGAIN;
+
+ /* Wait for up to 1 millisecond for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 1000);
+ if (err)
+ return err;
+
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+ event | APE_EVENT_STATUS_EVENT_PENDING);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+
+ return 0;
}
static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
@@ -9393,6 +9485,110 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
+{
+ int i;
+
+ for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
+ u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
+
+ tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
+ off += len;
+
+ if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
+ !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
+ memset(ocir, 0, TG3_OCIR_LEN);
+ }
+}
+
+/* sysfs attributes for hwmon */
+static ssize_t tg3_show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ u32 temperature;
+
+ spin_lock_bh(&tp->lock);
+ tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+ sizeof(temperature));
+ spin_unlock_bh(&tp->lock);
+ return sprintf(buf, "%u\n", temperature);
+}
+
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_SENSOR_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_CAUTION_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_MAX_OFFSET);
+
+static struct attribute *tg3_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tg3_group = {
+ .attrs = tg3_attributes,
+};
+
+#endif
+
+static void tg3_hwmon_close(struct tg3 *tp)
+{
+#if IS_ENABLED(CONFIG_HWMON)
+ if (tp->hwmon_dev) {
+ hwmon_device_unregister(tp->hwmon_dev);
+ tp->hwmon_dev = NULL;
+ sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
+ }
+#endif
+}
+
+static void tg3_hwmon_open(struct tg3 *tp)
+{
+#if IS_ENABLED(CONFIG_HWMON)
+ int i, err;
+ u32 size = 0;
+ struct pci_dev *pdev = tp->pdev;
+ struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
+
+ tg3_sd_scan_scratchpad(tp, ocirs);
+
+ for (i = 0; i < TG3_SD_NUM_RECS; i++) {
+ if (!ocirs[i].src_data_length)
+ continue;
+
+ size += ocirs[i].src_hdr_length;
+ size += ocirs[i].src_data_length;
+ }
+
+ if (!size)
+ return;
+
+ /* Register hwmon sysfs hooks */
+ err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
+ return;
+ }
+
+ tp->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(tp->hwmon_dev)) {
+ tp->hwmon_dev = NULL;
+ dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
+ sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
+ }
+#endif
+}
+
+
#define TG3_STAT_ADD32(PSTAT, REG) \
do { u32 __val = tr32(REG); \
(PSTAT)->low += __val; \
@@ -9908,7 +10104,7 @@ static bool tg3_enable_msix(struct tg3 *tp)
int i, rc;
struct msix_entry msix_ent[tp->irq_max];
- tp->irq_cnt = num_online_cpus();
+ tp->irq_cnt = netif_get_num_default_rss_queues();
if (tp->irq_cnt > 1) {
/* We want as many rx rings enabled as there are cpus.
* In multiqueue MSI-X mode, the first MSI-X vector
@@ -10101,6 +10297,8 @@ static int tg3_open(struct net_device *dev)
tg3_phy_start(tp);
+ tg3_hwmon_open(tp);
+
tg3_full_lock(tp, 0);
tg3_timer_start(tp);
@@ -10150,6 +10348,8 @@ static int tg3_close(struct net_device *dev)
tg3_timer_stop(tp);
+ tg3_hwmon_close(tp);
+
tg3_phy_stop(tp);
tg3_full_lock(tp, 1);
@@ -13857,14 +14057,9 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
}
}
-static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+static void __devinit tg3_probe_ncsi(struct tg3 *tp)
{
- int vlen;
u32 apedata;
- char *fwtype;
-
- if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
- return;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
@@ -13874,14 +14069,22 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
if (!(apedata & APE_FW_STATUS_READY))
return;
+ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+ tg3_flag_set(tp, APE_HAS_NCSI);
+}
+
+static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+{
+ int vlen;
+ u32 apedata;
+ char *fwtype;
+
apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
- if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
- tg3_flag_set(tp, APE_HAS_NCSI);
+ if (tg3_flag(tp, APE_HAS_NCSI))
fwtype = "NCSI";
- } else {
+ else
fwtype = "DASH";
- }
vlen = strlen(tp->fw_ver);
@@ -13915,20 +14118,17 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
tg3_read_sb_ver(tp, val);
else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
tg3_read_hwsb_ver(tp);
- else
- return;
-
- if (vpd_vers)
- goto done;
- if (tg3_flag(tp, ENABLE_APE)) {
- if (tg3_flag(tp, ENABLE_ASF))
- tg3_read_dash_ver(tp);
- } else if (tg3_flag(tp, ENABLE_ASF)) {
- tg3_read_mgmtfw_ver(tp);
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, ENABLE_APE)) {
+ tg3_probe_ncsi(tp);
+ if (!vpd_vers)
+ tg3_read_dash_ver(tp);
+ } else if (!vpd_vers) {
+ tg3_read_mgmtfw_ver(tp);
+ }
}
-done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
@@ -14275,7 +14475,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
}
- if (tg3_flag(tp, 5755_PLUS))
+ if (tg3_flag(tp, 5755_PLUS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_flag_set(tp, SHORT_DMA_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 93865f8..a1b75cd 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2311,10 +2311,11 @@
#define APE_LOCK_REQ_DRIVER 0x00001000
#define TG3_APE_LOCK_GRANT 0x004c
#define APE_LOCK_GRANT_DRIVER 0x00001000
-#define TG3_APE_SEG_SIG 0x4000
-#define APE_SEG_SIG_MAGIC 0x41504521
/* APE shared memory. Accessible through BAR1 */
+#define TG3_APE_SHMEM_BASE 0x4000
+#define TG3_APE_SEG_SIG 0x4000
+#define APE_SEG_SIG_MAGIC 0x41504521
#define TG3_APE_FW_STATUS 0x400c
#define APE_FW_STATUS_READY 0x00000100
#define TG3_APE_FW_FEATURES 0x4010
@@ -2327,6 +2328,8 @@
#define APE_FW_VERSION_REVMSK 0x0000ff00
#define APE_FW_VERSION_REVSFT 8
#define APE_FW_VERSION_BLDMSK 0x000000ff
+#define TG3_APE_SEG_MSG_BUF_OFF 0x401c
+#define TG3_APE_SEG_MSG_BUF_LEN 0x4020
#define TG3_APE_HOST_SEG_SIG 0x4200
#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
#define TG3_APE_HOST_SEG_LEN 0x4204
@@ -2353,6 +2356,8 @@
#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010
#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500
+#define APE_EVENT_STATUS_SCRTCHPD_READ 0x00001600
+#define APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700
#define APE_EVENT_STATUS_STATE_START 0x00010000
#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000
#define APE_EVENT_STATUS_STATE_WOL 0x00030000
@@ -2671,6 +2676,40 @@ struct tg3_hw_stats {
u8 __reserved4[0xb00-0x9c8];
};
+#define TG3_SD_NUM_RECS 3
+#define TG3_OCIR_LEN (sizeof(struct tg3_ocir))
+#define TG3_OCIR_SIG_MAGIC 0x5253434f
+#define TG3_OCIR_FLAG_ACTIVE 0x00000001
+
+#define TG3_TEMP_CAUTION_OFFSET 0xc8
+#define TG3_TEMP_MAX_OFFSET 0xcc
+#define TG3_TEMP_SENSOR_OFFSET 0xd4
+
+
+struct tg3_ocir {
+ u32 signature;
+ u16 version_flags;
+ u16 refresh_int;
+ u32 refresh_tmr;
+ u32 update_tmr;
+ u32 dst_base_addr;
+ u16 src_hdr_offset;
+ u16 src_hdr_length;
+ u16 src_data_offset;
+ u16 src_data_length;
+ u16 dst_hdr_offset;
+ u16 dst_data_offset;
+ u16 dst_reg_upd_offset;
+ u16 dst_sem_offset;
+ u32 reserved1[2];
+ u32 port0_flags;
+ u32 port1_flags;
+ u32 port2_flags;
+ u32 port3_flags;
+ u32 reserved2[1];
+};
+
+
/* 'mapping' is superfluous as the chip does not write into
* the tx/rx post rings so we could just fetch it from there.
* But the cache behavior is better how we are doing it now.
@@ -3206,6 +3245,10 @@ struct tg3 {
const char *fw_needed;
const struct firmware *fw;
u32 fw_len; /* includes BSS */
+
+#if IS_ENABLED(CONFIG_HWMON)
+ struct device *hwmon_dev;
+#endif
};
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 689e5e1..550d252 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -52,13 +52,7 @@ bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
}
/**
- * bfa_cee_attr_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE attributes
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes
*/
static u32
bfa_cee_attr_meminfo(void)
@@ -66,13 +60,7 @@ bfa_cee_attr_meminfo(void)
return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
}
/**
- * bfa_cee_stats_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE stats
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats
*/
static u32
bfa_cee_stats_meminfo(void)
@@ -81,14 +69,10 @@ bfa_cee_stats_meminfo(void)
}
/**
- * bfa_cee_get_attr_isr()
- *
- * @brief CEE ISR for get-attributes responses from f/w
- *
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
+ * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w
*
- * @return void
+ * @cee: Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -105,14 +89,10 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
}
/**
- * bfa_cee_get_attr_isr()
- *
- * @brief CEE ISR for get-stats responses from f/w
+ * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w
*
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
- *
- * @return void
+ * @cee: Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -147,13 +127,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
}
/**
- * bfa_nw_cee_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE module
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module
*/
u32
bfa_nw_cee_meminfo(void)
@@ -162,15 +136,11 @@ bfa_nw_cee_meminfo(void)
}
/**
- * bfa_nw_cee_mem_claim()
- *
- * @brief Initialized CEE DMA Memory
- *
- * @param[in] cee CEE module pointer
- * dma_kva Kernel Virtual Address of CEE DMA Memory
- * dma_pa Physical Address of CEE DMA Memory
+ * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory
*
- * @return void
+ * @cee: CEE module pointer
+ * @dma_kva: Kernel Virtual Address of CEE DMA Memory
+ * @dma_pa: Physical Address of CEE DMA Memory
*/
void
bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
@@ -185,13 +155,11 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
- * bfa_cee_get_attr()
- *
- * @brief Send the request to the f/w to fetch CEE attributes.
+ * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
*
- * @param[in] Pointer to the CEE module data structure.
+ * @cee: Pointer to the CEE module data structure.
*
- * @return Status
+ * Return: status
*/
enum bfa_status
bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
@@ -220,13 +188,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
}
/**
- * bfa_cee_isrs()
- *
- * @brief Handles Mail-box interrupts for CEE module.
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return void
+ * bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
*/
static void
@@ -253,14 +215,9 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
}
/**
- * bfa_cee_notify()
- *
- * @brief CEE module heart-beat failure handler.
- * @brief CEE module IOC event handler.
- *
- * @param[in] IOC event type
+ * bfa_cee_notify - CEE module heart-beat failure handler.
*
- * @return void
+ * @event: IOC event type
*/
static void
@@ -307,17 +264,13 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
}
/**
- * bfa_nw_cee_attach()
- *
- * @brief CEE module-attach API
+ * bfa_nw_cee_attach - CEE module-attach API
*
- * @param[in] cee - Pointer to the CEE module data structure
- * ioc - Pointer to the ioc module data structure
- * dev - Pointer to the device driver module data structure
- * The device driver specific mbox ISR functions have
- * this pointer as one of the parameters.
- *
- * @return void
+ * @cee: Pointer to the CEE module data structure
+ * @ioc: Pointer to the ioc module data structure
+ * @dev: Pointer to the device driver module data structure.
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
*/
void
bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 3da1a94..ad004a4 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -16,23 +16,18 @@
* www.brocade.com
*/
-/**
- * @file bfa_cs.h BFA common services
- */
+/* BFA common services */
#ifndef __BFA_CS_H__
#define __BFA_CS_H__
#include "cna.h"
-/**
- * @ BFA state machine interfaces
- */
+/* BFA state machine interfaces */
typedef void (*bfa_sm_t)(void *sm, int event);
-/**
- * oc - object class eg. bfa_ioc
+/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
@@ -45,9 +40,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
#define bfa_sm_get_state(_sm) ((_sm)->sm)
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
-/**
- * For converting from state machine function to state encoding.
- */
+/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
int state; /*!< state machine encoding */
@@ -55,13 +48,10 @@ struct bfa_sm_table {
};
#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
-/**
- * State machine with entry actions.
- */
+/* State machine with entry actions. */
typedef void (*bfa_fsm_t)(void *fsm, int event);
-/**
- * oc - object class eg. bfa_ioc
+/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
@@ -90,9 +80,7 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
return smt[i].state;
}
-/**
- * @ Generic wait counter.
- */
+/* Generic wait counter. */
typedef void (*bfa_wc_resume_t) (void *cbarg);
@@ -116,9 +104,7 @@ bfa_wc_down(struct bfa_wc *wc)
wc->wc_resume(wc->wc_cbarg);
}
-/**
- * Initialize a waiting counter.
- */
+/* Initialize a waiting counter. */
static inline void
bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
{
@@ -128,9 +114,7 @@ bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
bfa_wc_up(wc);
}
-/**
- * Wait for counter to reach zero
- */
+/* Wait for counter to reach zero */
static inline void
bfa_wc_wait(struct bfa_wc *wc)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 48f8773..e423f82 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -26,13 +26,9 @@
#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
-/**
- * ---------------------- adapter definitions ------------
- */
+/* ---------------------- adapter definitions ------------ */
-/**
- * BFA adapter level attributes.
- */
+/* BFA adapter level attributes. */
enum {
BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
/*
@@ -74,18 +70,14 @@ struct bfa_adapter_attr {
u8 trunk_capable;
};
-/**
- * ---------------------- IOC definitions ------------
- */
+/* ---------------------- IOC definitions ------------ */
enum {
BFA_IOC_DRIVER_LEN = 16,
BFA_IOC_CHIP_REV_LEN = 8,
};
-/**
- * Driver and firmware versions.
- */
+/* Driver and firmware versions. */
struct bfa_ioc_driver_attr {
char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
@@ -95,9 +87,7 @@ struct bfa_ioc_driver_attr {
char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
};
-/**
- * IOC PCI device attributes
- */
+/* IOC PCI device attributes */
struct bfa_ioc_pci_attr {
u16 vendor_id; /*!< PCI vendor ID */
u16 device_id; /*!< PCI device ID */
@@ -108,9 +98,7 @@ struct bfa_ioc_pci_attr {
char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
};
-/**
- * IOC states
- */
+/* IOC states */
enum bfa_ioc_state {
BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
BFA_IOC_RESET = 2, /*!< IOC is in reset state */
@@ -127,9 +115,7 @@ enum bfa_ioc_state {
BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
};
-/**
- * IOC firmware stats
- */
+/* IOC firmware stats */
struct bfa_fw_ioc_stats {
u32 enable_reqs;
u32 disable_reqs;
@@ -139,9 +125,7 @@ struct bfa_fw_ioc_stats {
u32 unknown_reqs;
};
-/**
- * IOC driver stats
- */
+/* IOC driver stats */
struct bfa_ioc_drv_stats {
u32 ioc_isrs;
u32 ioc_enables;
@@ -157,9 +141,7 @@ struct bfa_ioc_drv_stats {
u32 rsvd;
};
-/**
- * IOC statistics
- */
+/* IOC statistics */
struct bfa_ioc_stats {
struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
@@ -171,9 +153,7 @@ enum bfa_ioc_type {
BFA_IOC_TYPE_LL = 3,
};
-/**
- * IOC attributes returned in queries
- */
+/* IOC attributes returned in queries */
struct bfa_ioc_attr {
enum bfa_ioc_type ioc_type;
enum bfa_ioc_state state; /*!< IOC state */
@@ -187,22 +167,16 @@ struct bfa_ioc_attr {
u8 rsvd[4]; /*!< 64bit align */
};
-/**
- * Adapter capability mask definition
- */
+/* Adapter capability mask definition */
enum {
BFA_CM_HBA = 0x01,
BFA_CM_CNA = 0x02,
BFA_CM_NIC = 0x04,
};
-/**
- * ---------------------- mfg definitions ------------
- */
+/* ---------------------- mfg definitions ------------ */
-/**
- * Checksum size
- */
+/* Checksum size */
#define BFA_MFG_CHKSUM_SIZE 16
#define BFA_MFG_PARTNUM_SIZE 14
@@ -213,8 +187,7 @@ enum {
#pragma pack(1)
-/**
- * @brief BFA adapter manufacturing block definition.
+/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
*/
@@ -256,9 +229,7 @@ struct bfa_mfg_block {
#pragma pack()
-/**
- * ---------------------- pci definitions ------------
- */
+/* ---------------------- pci definitions ------------ */
/*
* PCI device ID information
@@ -275,9 +246,7 @@ enum {
#define bfa_asic_id_ctc(device) \
(bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
-/**
- * PCI sub-system device and vendor ID information
- */
+/* PCI sub-system device and vendor ID information */
enum {
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
BFA_PCI_CT2_SSID_FCoE = 0x22,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index 8ab33ee..b39c5f2 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -20,10 +20,7 @@
#include "bfa_defs.h"
-/**
- * @brief
- * FC physical port statistics.
- */
+/* FC physical port statistics. */
struct bfa_port_fc_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 tx_frames; /*!< Tx frames */
@@ -59,10 +56,7 @@ struct bfa_port_fc_stats {
u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
};
-/**
- * @brief
- * Eth Physical Port statistics.
- */
+/* Eth Physical Port statistics. */
struct bfa_port_eth_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 frame_64; /*!< Frames 64 bytes */
@@ -108,10 +102,7 @@ struct bfa_port_eth_stats {
u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
};
-/**
- * @brief
- * Port statistics.
- */
+/* Port statistics. */
union bfa_port_stats_u {
struct bfa_port_fc_stats fc;
struct bfa_port_eth_stats eth;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 6681fe8..7fb396f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -20,33 +20,23 @@
#include "bfa_defs.h"
-/**
- * Manufacturing block version
- */
+/* Manufacturing block version */
#define BFA_MFG_VERSION 3
#define BFA_MFG_VERSION_UNINIT 0xFF
-/**
- * Manufacturing block encrypted version
- */
+/* Manufacturing block encrypted version */
#define BFA_MFG_ENC_VER 2
-/**
- * Manufacturing block version 1 length
- */
+/* Manufacturing block version 1 length */
#define BFA_MFG_VER1_LEN 128
-/**
- * Manufacturing block header length
- */
+/* Manufacturing block header length */
#define BFA_MFG_HDR_LEN 4
#define BFA_MFG_SERIALNUM_SIZE 11
#define STRSZ(_n) (((_n) + 4) & ~3)
-/**
- * Manufacturing card type
- */
+/* Manufacturing card type */
enum {
BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
@@ -70,9 +60,7 @@ enum {
#pragma pack(1)
-/**
- * Check if Mezz card
- */
+/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
(type) == BFA_MFG_TYPE_WANCHESE || \
@@ -127,9 +115,7 @@ do { \
} \
} while (0)
-/**
- * VPD data length
- */
+/* VPD data length */
#define BFA_MFG_VPD_LEN 512
#define BFA_MFG_VPD_LEN_INVALID 0
@@ -137,9 +123,7 @@ do { \
#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
-/**
- * VPD vendor tag
- */
+/* VPD vendor tag */
enum {
BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
@@ -151,8 +135,7 @@ enum {
BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
};
-/**
- * @brief BFA adapter flash vpd data definition.
+/* BFA adapter flash vpd data definition.
*
* All numerical fields are in big-endian format.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
index 7c5fe6c..ea9af9a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -18,8 +18,7 @@
#ifndef __BFA_DEFS_STATUS_H__
#define __BFA_DEFS_STATUS_H__
-/**
- * API status return values
+/* API status return values
*
* NOTE: The error msgs are auto generated from the comments. Only singe line
* comments are supported
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0b640fa..959c58e 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -20,13 +20,9 @@
#include "bfi_reg.h"
#include "bfa_defs.h"
-/**
- * IOC local definitions
- */
+/* IOC local definitions */
-/**
- * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
- */
+/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
@@ -96,9 +92,7 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
-/**
- * IOC state machine definitions/declarations
- */
+/* IOC state machine definitions/declarations */
enum ioc_event {
IOC_E_RESET = 1, /*!< IOC reset request */
IOC_E_ENABLE = 2, /*!< IOC enable request */
@@ -148,9 +142,7 @@ static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
static void bfa_iocpf_stop(struct bfa_ioc *ioc);
-/**
- * IOCPF state machine events
- */
+/* IOCPF state machine events */
enum iocpf_event {
IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
@@ -166,9 +158,7 @@ enum iocpf_event {
IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
};
-/**
- * IOCPF states
- */
+/* IOCPF states */
enum bfa_iocpf_state {
BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
@@ -215,21 +205,15 @@ static struct bfa_sm_table iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
-/**
- * IOC State Machine
- */
+/* IOC State Machine */
-/**
- * Beginning state. IOC uninit state.
- */
+/* Beginning state. IOC uninit state. */
static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC is in uninit state.
- */
+/* IOC is in uninit state. */
static void
bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -243,18 +227,14 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Reset entry actions -- initialize state machine
- */
+/* Reset entry actions -- initialize state machine */
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
{
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
-/**
- * IOC is in reset state.
- */
+/* IOC is in reset state. */
static void
bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -282,8 +262,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
bfa_iocpf_enable(ioc);
}
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
+/* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
@@ -325,9 +304,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
+/* Semaphore should be acquired for version check. */
static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
{
@@ -336,9 +313,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
bfa_ioc_send_getattr(ioc);
}
-/**
- * IOC configuration in progress. Timer is active.
- */
+/* IOC configuration in progress. Timer is active. */
static void
bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -419,9 +394,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
bfa_iocpf_disable(ioc);
}
-/**
- * IOC is being disabled
- */
+/* IOC is being disabled */
static void
bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -449,9 +422,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOC disable completion entry.
- */
+/* IOC disable completion entry. */
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
{
@@ -485,9 +456,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
{
}
-/**
- * Hardware initialization retry.
- */
+/* Hardware initialization retry. */
static void
bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -534,9 +503,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC failure.
- */
+/* IOC failure. */
static void
bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -568,9 +535,7 @@ bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC failure.
- */
+/* IOC failure. */
static void
bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -593,13 +558,9 @@ bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOCPF State Machine
- */
+/* IOCPF State Machine */
-/**
- * Reset entry actions -- initialize state machine
- */
+/* Reset entry actions -- initialize state machine */
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
{
@@ -607,9 +568,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
iocpf->auto_recover = bfa_nw_auto_recover;
}
-/**
- * Beginning state. IOC is in reset state.
- */
+/* Beginning state. IOC is in reset state. */
static void
bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -626,9 +585,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
+/* Semaphore should be acquired for version check. */
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
{
@@ -636,9 +593,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Awaiting h/w semaphore to continue with version check.
- */
+/* Awaiting h/w semaphore to continue with version check. */
static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -683,9 +638,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Notify enable completion callback
- */
+/* Notify enable completion callback */
static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
{
@@ -698,9 +651,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
msecs_to_jiffies(BFA_IOC_TOV));
}
-/**
- * Awaiting firmware version match.
- */
+/* Awaiting firmware version match. */
static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -727,18 +678,14 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Request for semaphore.
- */
+/* Request for semaphore. */
static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Awaiting semaphore for h/w initialzation.
- */
+/* Awaiting semaphore for h/w initialzation. */
static void
bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -778,8 +725,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
bfa_ioc_reset(iocpf->ioc, false);
}
-/**
- * Hardware is being initialized. Interrupts are enabled.
+/* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
static void
@@ -822,8 +768,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
bfa_ioc_send_enable(iocpf->ioc);
}
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
+/* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
@@ -896,9 +841,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
bfa_ioc_send_disable(iocpf->ioc);
}
-/**
- * IOC is being disabled
- */
+/* IOC is being disabled */
static void
bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -935,9 +878,7 @@ bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * IOC hb ack request is being removed.
- */
+/* IOC hb ack request is being removed. */
static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -963,9 +904,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * IOC disable completion entry.
- */
+/* IOC disable completion entry. */
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
{
@@ -1000,9 +939,7 @@ bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Hardware initialization failed.
- */
+/* Hardware initialization failed. */
static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1046,9 +983,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
{
}
-/**
- * Hardware initialization failed.
- */
+/* Hardware initialization failed. */
static void
bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1084,9 +1019,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * IOC is in failed state.
- */
+/* IOC is in failed state. */
static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1134,10 +1067,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
{
}
-/**
- * @brief
- * IOC is in failed state.
- */
+/* IOC is in failed state. */
static void
bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1151,13 +1081,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * BFA IOC private functions
- */
+/* BFA IOC private functions */
-/**
- * Notify common modules registered for notification.
- */
+/* Notify common modules registered for notification. */
static void
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
@@ -1298,10 +1224,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
del_timer(&ioc->sem_timer);
}
-/**
- * @brief
- * Initialize LPU local memory (aka secondary memory / SRAM)
- */
+/* Initialize LPU local memory (aka secondary memory / SRAM) */
static void
bfa_ioc_lmem_init(struct bfa_ioc *ioc)
{
@@ -1366,9 +1289,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
-/**
- * Get driver and firmware versions.
- */
+/* Get driver and firmware versions. */
void
bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
@@ -1388,9 +1309,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/**
- * Returns TRUE if same.
- */
+/* Returns TRUE if same. */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
@@ -1408,8 +1327,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
return true;
}
-/**
- * Return true if current running version is valid. Firmware signature and
+/* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
static bool
@@ -1430,9 +1348,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
}
-/**
- * Conditionally flush any pending message from firmware at start.
- */
+/* Conditionally flush any pending message from firmware at start. */
static void
bfa_ioc_msgflush(struct bfa_ioc *ioc)
{
@@ -1443,9 +1359,6 @@ bfa_ioc_msgflush(struct bfa_ioc *ioc)
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
-/**
- * @img ioc_init_logic.jpg
- */
static void
bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
@@ -1603,10 +1516,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
del_timer(&ioc->hb_timer);
}
-/**
- * @brief
- * Initiate a full firmware download.
- */
+/* Initiate a full firmware download. */
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
@@ -1672,9 +1582,7 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
bfa_ioc_hwinit(ioc, force);
}
-/**
- * BFA ioc enable reply by firmware
- */
+/* BFA ioc enable reply by firmware */
static void
bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
u8 cap_bm)
@@ -1686,10 +1594,7 @@ bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
}
-/**
- * @brief
- * Update BFA configuration from firmware configuration.
- */
+/* Update BFA configuration from firmware configuration. */
static void
bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
{
@@ -1702,9 +1607,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
-/**
- * Attach time initialization of mbox logic.
- */
+/* Attach time initialization of mbox logic. */
static void
bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
{
@@ -1718,9 +1621,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
}
}
-/**
- * Mbox poll timer -- restarts any pending mailbox requests.
- */
+/* Mbox poll timer -- restarts any pending mailbox requests. */
static void
bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
{
@@ -1760,9 +1661,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
}
}
-/**
- * Cleanup any pending requests.
- */
+/* Cleanup any pending requests. */
static void
bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
{
@@ -1774,12 +1673,12 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
}
/**
- * Read data from SMEM to host through PCI memmap
+ * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
*
- * @param[in] ioc memory for IOC
- * @param[in] tbuf app memory to store data from smem
- * @param[in] soff smem offset
- * @param[in] sz size of smem in bytes
+ * @ioc: memory for IOC
+ * @tbuf: app memory to store data from smem
+ * @soff: smem offset
+ * @sz: size of smem in bytes
*/
static int
bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
@@ -1826,9 +1725,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
return 0;
}
-/**
- * Retrieve saved firmware trace from a prior IOC failure.
- */
+/* Retrieve saved firmware trace from a prior IOC failure. */
int
bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
{
@@ -1844,9 +1741,7 @@ bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
return status;
}
-/**
- * Save firmware trace if configured.
- */
+/* Save firmware trace if configured. */
static void
bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
{
@@ -1861,9 +1756,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
}
}
-/**
- * Retrieve saved firmware trace from a prior IOC failure.
- */
+/* Retrieve saved firmware trace from a prior IOC failure. */
int
bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
{
@@ -1892,9 +1785,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
bfa_nw_ioc_debug_save_ftrc(ioc);
}
-/**
- * IOCPF to IOC interface
- */
+/* IOCPF to IOC interface */
static void
bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
{
@@ -1928,9 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
}
-/**
- * IOC public
- */
+/* IOC public */
static enum bfa_status
bfa_ioc_pll_init(struct bfa_ioc *ioc)
{
@@ -1954,8 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
return BFA_STATUS_OK;
}
-/**
- * Interface used by diag module to do firmware boot with memory test
+/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
static void
@@ -1983,9 +1871,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
bfa_ioc_lpu_start(ioc);
}
-/**
- * Enable/disable IOC failure auto recovery.
- */
+/* Enable/disable IOC failure auto recovery. */
void
bfa_nw_ioc_auto_recover(bool auto_recover)
{
@@ -2056,10 +1942,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
}
/**
- * IOC attach time initialization and setup.
+ * bfa_nw_ioc_attach - IOC attach time initialization and setup.
*
- * @param[in] ioc memory for IOC
- * @param[in] bfa driver instance structure
+ * @ioc: memory for IOC
+ * @bfa: driver instance structure
*/
void
bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
@@ -2078,9 +1964,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
-/**
- * Driver detach time IOC cleanup.
- */
+/* Driver detach time IOC cleanup. */
void
bfa_nw_ioc_detach(struct bfa_ioc *ioc)
{
@@ -2091,9 +1975,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
}
/**
- * Setup IOC PCI properties.
+ * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
*
- * @param[in] pcidev PCI device information for this IOC
+ * @pcidev: PCI device information for this IOC
*/
void
bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
@@ -2160,10 +2044,10 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
}
/**
- * Initialize IOC dma memory
+ * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
*
- * @param[in] dm_kva kernel virtual address of IOC dma memory
- * @param[in] dm_pa physical address of IOC dma memory
+ * @dm_kva: kernel virtual address of IOC dma memory
+ * @dm_pa: physical address of IOC dma memory
*/
void
bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
@@ -2176,9 +2060,7 @@ bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
ioc->attr = (struct bfi_ioc_attr *) dm_kva;
}
-/**
- * Return size of dma memory required.
- */
+/* Return size of dma memory required. */
u32
bfa_nw_ioc_meminfo(void)
{
@@ -2201,9 +2083,7 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
-/**
- * Initialize memory for saving firmware trace.
- */
+/* Initialize memory for saving firmware trace. */
void
bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
{
@@ -2217,9 +2097,7 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
}
-/**
- * Register mailbox message handler function, to be called by common modules
- */
+/* Register mailbox message handler function, to be called by common modules */
void
bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
@@ -2231,11 +2109,12 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
}
/**
- * Queue a mailbox command request to firmware. Waits if mailbox is busy.
- * Responsibility of caller to serialize
+ * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
*
- * @param[in] ioc IOC instance
- * @param[i] cmd Mailbox command
+ * @ioc: IOC instance
+ * @cmd: Mailbox command
+ *
+ * Waits if mailbox is busy. Responsibility of caller to serialize
*/
bool
bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
@@ -2272,9 +2151,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
return false;
}
-/**
- * Handle mailbox interrupts
- */
+/* Handle mailbox interrupts */
void
bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
{
@@ -2314,9 +2191,7 @@ bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
-/**
- * return true if IOC is disabled
- */
+/* return true if IOC is disabled */
bool
bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
{
@@ -2324,17 +2199,14 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
-/**
- * return true if IOC is operational
- */
+/* return true if IOC is operational */
bool
bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
}
-/**
- * Add to IOC heartbeat failure notification queue. To be used by common
+/* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
void
@@ -2518,9 +2390,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
-/**
- * WWN public
- */
+/* WWN public */
static u64
bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
{
@@ -2533,9 +2403,7 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
return ioc->attr->mac;
}
-/**
- * Firmware failure detected. Start recovery actions.
- */
+/* Firmware failure detected. Start recovery actions. */
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
@@ -2545,10 +2413,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
-/**
- * @dg hal_iocpf_pvt BFA IOC PF private functions
- * @{
- */
+/* BFA IOC PF private functions */
static void
bfa_iocpf_enable(struct bfa_ioc *ioc)
@@ -2669,8 +2534,6 @@ bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
/*
* Send flash write request.
- *
- * @param[in] cbarg - callback argument
*/
static void
bfa_flash_write_send(struct bfa_flash *flash)
@@ -2699,10 +2562,10 @@ bfa_flash_write_send(struct bfa_flash *flash)
flash->offset += len;
}
-/*
- * Send flash read request.
+/**
+ * bfa_flash_read_send - Send flash read request.
*
- * @param[in] cbarg - callback argument
+ * @cbarg: callback argument
*/
static void
bfa_flash_read_send(void *cbarg)
@@ -2724,11 +2587,11 @@ bfa_flash_read_send(void *cbarg)
bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
}
-/*
- * Process flash response messages upon receiving interrupts.
+/**
+ * bfa_flash_intr - Process flash response messages upon receiving interrupts.
*
- * @param[in] flasharg - flash structure
- * @param[in] msg - message structure
+ * @flasharg: flash structure
+ * @msg: message structure
*/
static void
bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
@@ -2821,12 +2684,12 @@ bfa_nw_flash_meminfo(void)
return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
-/*
- * Flash attach API.
+/**
+ * bfa_nw_flash_attach - Flash attach API.
*
- * @param[in] flash - flash structure
- * @param[in] ioc - ioc structure
- * @param[in] dev - device structure
+ * @flash: flash structure
+ * @ioc: ioc structure
+ * @dev: device structure
*/
void
bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
@@ -2842,12 +2705,12 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
}
-/*
- * Claim memory for flash
+/**
+ * bfa_nw_flash_memclaim - Claim memory for flash
*
- * @param[in] flash - flash structure
- * @param[in] dm_kva - pointer to virtual memory address
- * @param[in] dm_pa - physical memory address
+ * @flash: flash structure
+ * @dm_kva: pointer to virtual memory address
+ * @dm_pa: physical memory address
*/
void
bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
@@ -2859,13 +2722,13 @@ bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
-/*
- * Get flash attribute.
+/**
+ * bfa_nw_flash_get_attr - Get flash attribute.
*
- * @param[in] flash - flash structure
- * @param[in] attr - flash attribute structure
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @attr: flash attribute structure
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
@@ -2895,17 +2758,17 @@ bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
return BFA_STATUS_OK;
}
-/*
- * Update flash partition.
+/**
+ * bfa_nw_flash_update_part - Update flash partition.
*
- * @param[in] flash - flash structure
- * @param[in] type - flash partition type
- * @param[in] instance - flash partition instance
- * @param[in] buf - update data buffer
- * @param[in] len - data buffer length
- * @param[in] offset - offset relative to the partition starting address
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @type: flash partition type
+ * @instance: flash partition instance
+ * @buf: update data buffer
+ * @len: data buffer length
+ * @offset: offset relative to the partition starting address
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
@@ -2944,17 +2807,17 @@ bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
return BFA_STATUS_OK;
}
-/*
- * Read flash partition.
+/**
+ * bfa_nw_flash_read_part - Read flash partition.
*
- * @param[in] flash - flash structure
- * @param[in] type - flash partition type
- * @param[in] instance - flash partition instance
- * @param[in] buf - read data buffer
- * @param[in] len - data buffer length
- * @param[in] offset - offset relative to the partition starting address
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @type: flash partition type
+ * @instance: flash partition instance
+ * @buf: read data buffer
+ * @len: data buffer length
+ * @offset: offset relative to the partition starting address
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 3b4460f..63a85e5 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -30,9 +30,7 @@
#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
BFI_IOC_TRC_HDR_SZ)
-/**
- * PCI device information required by IOC
- */
+/* PCI device information required by IOC */
struct bfa_pcidev {
int pci_slot;
u8 pci_func;
@@ -41,8 +39,7 @@ struct bfa_pcidev {
void __iomem *pci_bar_kva;
};
-/**
- * Structure used to remember the DMA-able memory block's KVA and Physical
+/* Structure used to remember the DMA-able memory block's KVA and Physical
* Address
*/
struct bfa_dma {
@@ -52,15 +49,11 @@ struct bfa_dma {
#define BFA_DMA_ALIGN_SZ 256
-/**
- * smem size for Crossbow and Catapult
- */
+/* smem size for Crossbow and Catapult */
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
-/**
- * @brief BFA dma address assignment macro. (big endian format)
- */
+/* BFA dma address assignment macro. (big endian format) */
#define bfa_dma_be_addr_set(dma_addr, pa) \
__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
static inline void
@@ -108,9 +101,7 @@ struct bfa_ioc_regs {
u32 smem_pg0;
};
-/**
- * IOC Mailbox structures
- */
+/* IOC Mailbox structures */
typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
struct bfa_mbox_cmd {
struct list_head qe;
@@ -119,9 +110,7 @@ struct bfa_mbox_cmd {
u32 msg[BFI_IOC_MSGSZ];
};
-/**
- * IOC mailbox module
- */
+/* IOC mailbox module */
typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
struct bfa_ioc_mbox_mod {
struct list_head cmd_q; /*!< pending mbox queue */
@@ -132,9 +121,7 @@ struct bfa_ioc_mbox_mod {
} mbhdlr[BFI_MC_MAX];
};
-/**
- * IOC callback function interfaces
- */
+/* IOC callback function interfaces */
typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
@@ -146,9 +133,7 @@ struct bfa_ioc_cbfn {
bfa_ioc_reset_cbfn_t reset_cbfn;
};
-/**
- * IOC event notification mechanism.
- */
+/* IOC event notification mechanism. */
enum bfa_ioc_event {
BFA_IOC_E_ENABLED = 1,
BFA_IOC_E_DISABLED = 2,
@@ -163,9 +148,7 @@ struct bfa_ioc_notify {
void *cbarg;
};
-/**
- * Initialize a IOC event notification structure
- */
+/* Initialize a IOC event notification structure */
#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
(__notify)->cbarg = (__cbarg); \
@@ -261,9 +244,7 @@ struct bfa_ioc_hwif {
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
-/**
- * IOC mailbox interface
- */
+/* IOC mailbox interface */
bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
struct bfa_mbox_cmd *cmd,
bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
@@ -271,9 +252,7 @@ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
-/**
- * IOC interfaces
- */
+/* IOC interfaces */
#define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index b6b036a..5df0b0c 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -87,9 +87,7 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
};
-/**
- * Called from bfa_ioc_attach() to map asic specific calls.
- */
+/* Called from bfa_ioc_attach() to map asic specific calls. */
void
bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
{
@@ -102,9 +100,7 @@ bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
ioc->ioc_hwif = &nw_hwif_ct2;
}
-/**
- * Return true if firmware of current driver matches the running firmware.
- */
+/* Return true if firmware of current driver matches the running firmware. */
static bool
bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
{
@@ -182,9 +178,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
-/**
- * Notify other functions on HB failure.
- */
+/* Notify other functions on HB failure. */
static void
bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{
@@ -195,9 +189,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
readl(ioc->ioc_regs.alt_ll_halt);
}
-/**
- * Host to LPU mailbox message addresses
- */
+/* Host to LPU mailbox message addresses */
static const struct {
u32 hfn_mbox;
u32 lpu_mbox;
@@ -209,9 +201,7 @@ static const struct {
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
-/**
- * Host <-> LPU mailbox command/status registers - port 0
- */
+/* Host <-> LPU mailbox command/status registers - port 0 */
static const struct {
u32 hfn;
u32 lpu;
@@ -222,9 +212,7 @@ static const struct {
{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
};
-/**
- * Host <-> LPU mailbox command/status registers - port 1
- */
+/* Host <-> LPU mailbox command/status registers - port 1 */
static const struct {
u32 hfn;
u32 lpu;
@@ -368,9 +356,7 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
ioc->ioc_regs.err_set = rb + ERR_SET_REG;
}
-/**
- * Initialize IOC to port mapping.
- */
+/* Initialize IOC to port mapping. */
#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
static void
@@ -398,9 +384,7 @@ bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
}
-/**
- * Set interrupt mode for a function: INTX or MSIX
- */
+/* Set interrupt mode for a function: INTX or MSIX */
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
{
@@ -443,9 +427,7 @@ bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
return false;
}
-/**
- * MSI-X resource allocation for 1860 with no asic block
- */
+/* MSI-X resource allocation for 1860 with no asic block */
#define HOSTFN_MSIX_DEFAULT 64
#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
@@ -473,9 +455,7 @@ bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
}
-/**
- * Cleanup hw semaphore and usecnt registers
- */
+/* Cleanup hw semaphore and usecnt registers */
static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
{
@@ -492,9 +472,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
bfa_nw_ioc_hw_sem_release(ioc);
}
-/**
- * Synchronized IOC failure processing routines
- */
+/* Synchronized IOC failure processing routines */
static bool
bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
{
@@ -518,9 +496,7 @@ bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
return bfa_ioc_ct_sync_complete(ioc);
}
-/**
- * Synchronized IOC failure processing routines
- */
+/* Synchronized IOC failure processing routines */
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index dd36427..55067d0 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -16,9 +16,7 @@
* www.brocade.com
*/
-/**
- * @file bfa_msgq.c MSGQ module source file.
- */
+/* MSGQ module source file. */
#include "bfi.h"
#include "bfa_msgq.h"
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 0d9df69..1f24c23 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -22,15 +22,11 @@
#pragma pack(1)
-/**
- * BFI FW image type
- */
+/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
-/**
- * Msg header common to all msgs
- */
+/* Msg header common to all msgs */
struct bfi_mhdr {
u8 msg_class; /*!< @ref enum bfi_mclass */
u8 msg_id; /*!< msg opcode with in the class */
@@ -65,17 +61,14 @@ struct bfi_mhdr {
#define BFI_I2H_OPCODE_BASE 128
#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
-/**
- ****************************************************************************
+/****************************************************************************
*
* Scatter Gather Element and Page definition
*
****************************************************************************
*/
-/**
- * DMA addresses
- */
+/* DMA addresses */
union bfi_addr_u {
struct {
u32 addr_lo;
@@ -83,9 +76,7 @@ union bfi_addr_u {
} a32;
};
-/**
- * Generic DMA addr-len pair.
- */
+/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
@@ -98,26 +89,20 @@ struct bfi_alen {
#define BFI_LMSG_PL_WSZ \
((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
-/**
- * Mailbox message structure
- */
+/* Mailbox message structure */
#define BFI_MBMSG_SZ 7
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
};
-/**
- * Supported PCI function class codes (personality)
- */
+/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
BFI_PCIFN_CLASS_FC = 0x0c04,
BFI_PCIFN_CLASS_ETH = 0x0200,
};
-/**
- * Message Classes
- */
+/* Message Classes */
enum bfi_mclass {
BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
@@ -159,15 +144,12 @@ enum bfi_mclass {
#define BFI_FWBOOT_ENV_OS 0
-/**
- *----------------------------------------------------------------------
+/*----------------------------------------------------------------------
* IOC
*----------------------------------------------------------------------
*/
-/**
- * Different asic generations
- */
+/* Different asic generations */
enum bfi_asic_gen {
BFI_ASIC_GEN_CB = 1,
BFI_ASIC_GEN_CT = 2,
@@ -196,9 +178,7 @@ enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
};
-/**
- * BFI_IOC_H2I_GETATTR_REQ message
- */
+/* BFI_IOC_H2I_GETATTR_REQ message */
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
@@ -231,30 +211,22 @@ struct bfi_ioc_attr {
u32 card_type; /*!< card type */
};
-/**
- * BFI_IOC_I2H_GETATTR_REPLY message
- */
+/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
};
-/**
- * Firmware memory page offsets
- */
+/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
#define BFI_IOC_SMEM_PG0_CT (0x180)
-/**
- * Firmware statistic offset
- */
+/* Firmware statistic offset */
#define BFI_IOC_FWSTATS_OFF (0x6B40)
#define BFI_IOC_FWSTATS_SZ (4096)
-/**
- * Firmware trace offset
- */
+/* Firmware trace offset */
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
#define BFI_IOC_TRC_ENT_SZ 16
@@ -299,9 +271,7 @@ struct bfi_ioc_hbeat {
u32 hb_count; /*!< current heart beat count */
};
-/**
- * IOC hardware/firmware state
- */
+/* IOC hardware/firmware state */
enum bfi_ioc_state {
BFI_IOC_UNINIT = 0, /*!< not initialized */
BFI_IOC_INITING = 1, /*!< h/w is being initialized */
@@ -345,9 +315,7 @@ enum {
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
-/**
- * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
- */
+/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */
struct bfi_ioc_ctrl_req {
struct bfi_mhdr mh;
u16 clscode;
@@ -355,9 +323,7 @@ struct bfi_ioc_ctrl_req {
u32 tv_sec;
};
-/**
- * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
- */
+/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< enable/disable status */
@@ -367,9 +333,7 @@ struct bfi_ioc_ctrl_reply {
};
#define BFI_IOC_MSGSZ 8
-/**
- * H2I Messages
- */
+/* H2I Messages */
union bfi_ioc_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_req enable_req;
@@ -378,17 +342,14 @@ union bfi_ioc_h2i_msg_u {
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
- * I2H Messages
- */
+/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
- *----------------------------------------------------------------------
+/*----------------------------------------------------------------------
* MSGQ
*----------------------------------------------------------------------
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index 4eecabe..6704a43 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -37,18 +37,14 @@ enum bfi_port_i2h {
BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
};
-/**
- * Generic REQ type
- */
+/* Generic REQ type */
struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
};
-/**
- * Generic RSP type
- */
+/* Generic RSP type */
struct bfi_port_generic_rsp {
struct bfi_mhdr mh; /*!< common msg header */
u8 status; /*!< port enable status */
@@ -56,44 +52,12 @@ struct bfi_port_generic_rsp {
u32 msgtag; /*!< msgtag for reply */
};
-/**
- * @todo
- * BFI_PORT_H2I_ENABLE_REQ
- */
-
-/**
- * @todo
- * BFI_PORT_I2H_ENABLE_RSP
- */
-
-/**
- * BFI_PORT_H2I_DISABLE_REQ
- */
-
-/**
- * BFI_PORT_I2H_DISABLE_RSP
- */
-
-/**
- * BFI_PORT_H2I_GET_STATS_REQ
- */
+/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
};
-/**
- * BFI_PORT_I2H_GET_STATS_RSP
- */
-
-/**
- * BFI_PORT_H2I_CLEAR_STATS_REQ
- */
-
-/**
- * BFI_PORT_I2H_CLEAR_STATS_RSP
- */
-
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_port_generic_req enable_req;
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index a90f1cf..eef6e1f 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -16,12 +16,9 @@
* www.brocade.com
*/
-/**
- * @file bfi_enet.h BNA Hardware and Firmware Interface
- */
+/* BNA Hardware and Firmware Interface */
-/**
- * Skipping statistics collection to avoid clutter.
+/* Skipping statistics collection to avoid clutter.
* Command is no longer needed:
* MTU
* TxQ Stop
@@ -64,9 +61,7 @@ union bfi_addr_be_u {
} a32;
};
-/**
- * T X Q U E U E D E F I N E S
- */
+/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
/* TxQ Entry Opcodes */
#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -106,10 +101,7 @@ struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
union bfi_addr_be_u addr;
};
-/**
- * TxQ Entry Structure
- *
- */
+/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
@@ -124,16 +116,12 @@ struct bfi_enet_txq_entry {
#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
-/**
- * R X Q U E U E D E F I N E S
- */
+/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
};
-/**
- * R X C O M P L E T I O N Q U E U E D E F I N E S
- */
+/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
@@ -174,9 +162,7 @@ struct bfi_enet_cq_entry {
u8 rxq_id;
};
-/**
- * E N E T C O N T R O L P A T H C O M M A N D S
- */
+/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
union bfi_addr_u pg_tbl;
union bfi_addr_u first_entry;
@@ -222,9 +208,7 @@ struct bfi_enet_ib {
u16 rsvd;
};
-/**
- * ENET command messages
- */
+/* ENET command messages */
enum bfi_enet_h2i_msgs {
/* Rx Commands */
BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
@@ -350,9 +334,7 @@ enum bfi_enet_i2h_msgs {
BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
};
-/**
- * The following error codes can be returned by the enet commands
- */
+/* The following error codes can be returned by the enet commands */
enum bfi_enet_err {
BFI_ENET_CMD_OK = 0,
BFI_ENET_CMD_FAIL = 1,
@@ -364,8 +346,7 @@ enum bfi_enet_err {
BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
};
-/**
- * Generic Request
+/* Generic Request
*
* bfi_enet_req is used by:
* BFI_ENET_H2I_RX_CFG_CLR_REQ
@@ -375,8 +356,7 @@ struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
};
-/**
- * Enable/Disable Request
+/* Enable/Disable Request
*
* bfi_enet_enable_req is used by:
* BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
@@ -391,9 +371,7 @@ struct bfi_enet_enable_req {
u8 rsvd[3];
};
-/**
- * Generic Response
- */
+/* Generic Response */
struct bfi_enet_rsp {
struct bfi_msgq_mhdr mh;
u8 error; /*!< if error see cmd_offset */
@@ -401,20 +379,16 @@ struct bfi_enet_rsp {
u16 cmd_offset; /*!< offset to invalid parameter */
};
-/**
- * GLOBAL CONFIGURATION
- */
+/* GLOBAL CONFIGURATION */
-/**
- * bfi_enet_attr_req is used by:
+/* bfi_enet_attr_req is used by:
* BFI_ENET_H2I_GET_ATTR_REQ
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
};
-/**
- * bfi_enet_attr_rsp is used by:
+/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
*/
struct bfi_enet_attr_rsp {
@@ -427,8 +401,7 @@ struct bfi_enet_attr_rsp {
u32 rit_size;
};
-/**
- * Tx Configuration
+/* Tx Configuration
*
* bfi_enet_tx_cfg is used by:
* BFI_ENET_H2I_TX_CFG_SET_REQ
@@ -477,8 +450,7 @@ struct bfi_enet_tx_cfg_rsp {
} q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
-/**
- * Rx Configuration
+/* Rx Configuration
*
* bfi_enet_rx_cfg is used by:
* BFI_ENET_H2I_RX_CFG_SET_REQ
@@ -553,8 +525,7 @@ struct bfi_enet_rx_cfg_rsp {
} q_handles[BFI_ENET_RX_QSET_MAX];
};
-/**
- * RIT
+/* RIT
*
* bfi_enet_rit_req is used by:
* BFI_ENET_H2I_RIT_CFG_REQ
@@ -566,8 +537,7 @@ struct bfi_enet_rit_req {
u8 table[BFI_ENET_RSS_RIT_MAX];
};
-/**
- * RSS
+/* RSS
*
* bfi_enet_rss_cfg_req is used by:
* BFI_ENET_H2I_RSS_CFG_REQ
@@ -591,8 +561,7 @@ struct bfi_enet_rss_cfg_req {
struct bfi_enet_rss_cfg cfg;
};
-/**
- * MAC Unicast
+/* MAC Unicast
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_MAC_UCAST_SET_REQ
@@ -606,17 +575,14 @@ struct bfi_enet_ucast_req {
u8 rsvd[2];
};
-/**
- * MAC Unicast + VLAN
- */
+/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
mac_t mac_addr;
};
-/**
- * MAC Multicast
+/* MAC Multicast
*
* bfi_enet_mac_mfilter_add_req is used by:
* BFI_ENET_H2I_MAC_MCAST_ADD_REQ
@@ -627,8 +593,7 @@ struct bfi_enet_mcast_add_req {
u8 rsvd[2];
};
-/**
- * bfi_enet_mac_mfilter_add_rsp is used by:
+/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
*/
struct bfi_enet_mcast_add_rsp {
@@ -640,8 +605,7 @@ struct bfi_enet_mcast_add_rsp {
u8 rsvd1[2];
};
-/**
- * bfi_enet_mac_mfilter_del_req is used by:
+/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
*/
struct bfi_enet_mcast_del_req {
@@ -650,8 +614,7 @@ struct bfi_enet_mcast_del_req {
u8 rsvd[2];
};
-/**
- * VLAN
+/* VLAN
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_RX_VLAN_SET_REQ
@@ -663,8 +626,7 @@ struct bfi_enet_rx_vlan_req {
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
};
-/**
- * PAUSE
+/* PAUSE
*
* bfi_enet_set_pause_req is used by:
* BFI_ENET_H2I_SET_PAUSE_REQ
@@ -676,8 +638,7 @@ struct bfi_enet_set_pause_req {
u8 rx_pause; /* 1 = enable; 0 = disable */
};
-/**
- * DIAGNOSTICS
+/* DIAGNOSTICS
*
* bfi_enet_diag_lb_req is used by:
* BFI_ENET_H2I_DIAG_LOOPBACK
@@ -689,16 +650,13 @@ struct bfi_enet_diag_lb_req {
u8 enable; /* 1 = enable; 0 = disable */
};
-/**
- * enum for Loopback opmodes
- */
+/* enum for Loopback opmodes */
enum {
BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
};
-/**
- * STATISTICS
+/* STATISTICS
*
* bfi_enet_stats_req is used by:
* BFI_ENET_H2I_STATS_GET_REQ
@@ -713,9 +671,7 @@ struct bfi_enet_stats_req {
union bfi_addr_u host_buffer;
};
-/**
- * defines for "stats_mask" above.
- */
+/* defines for "stats_mask" above. */
#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
@@ -881,8 +837,7 @@ struct bfi_enet_stats_mac {
u64 tx_fragments;
};
-/**
- * Complete statistics, DMAed from fw to host followed by
+/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
*/
struct bfi_enet_stats {
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index 0e094fe..c49fa31 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -221,9 +221,7 @@ enum {
#define __PMM_1T_RESET_P 0x00000001
#define PMM_1T_RESET_REG_P1 0x00023c1c
-/**
- * Brocade 1860 Adapter specific defines
- */
+/* Brocade 1860 Adapter specific defines */
#define CT2_PCI_CPQ_BASE 0x00030000
#define CT2_PCI_APP_BASE 0x00030100
#define CT2_PCI_ETH_BASE 0x00030400
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 4d7a5de..ede532b 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -25,11 +25,7 @@
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
-/**
- *
- * Macros and constants
- *
- */
+/* Macros and constants */
#define BNA_IOC_TIMER_FREQ 200
@@ -356,11 +352,7 @@ do { \
} \
} while (0)
-/**
- *
- * Inline functions
- *
- */
+/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
@@ -377,15 +369,9 @@ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
-/**
- *
- * Function prototypes
- *
- */
+/* Function prototypes */
-/**
- * BNA
- */
+/* BNA */
/* FW response handlers */
void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
@@ -413,24 +399,19 @@ struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
-/**
- * MBOX
- */
+/* MBOX */
/* API for BNAD */
void bna_mbox_handler(struct bna *bna, u32 intr_status);
-/**
- * ETHPORT
- */
+/* ETHPORT */
/* Callbacks for RX */
void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
-/**
- * TX MODULE AND TX
- */
+/* TX MODULE AND TX */
+
/* FW response handelrs */
void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
struct bfi_msgq_mhdr *msghdr);
@@ -462,9 +443,7 @@ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
void bna_tx_cleanup_complete(struct bna_tx *tx);
void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
-/**
- * RX MODULE, RX, RXF
- */
+/* RX MODULE, RX, RXF */
/* FW response handlers */
void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
@@ -522,9 +501,7 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-/**
- * ENET
- */
+/* ENET */
/* API for RX */
int bna_enet_mtu_get(struct bna_enet *enet);
@@ -544,18 +521,14 @@ void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
-/**
- * IOCETH
- */
+/* IOCETH */
/* APIs for BNAD */
void bna_ioceth_enable(struct bna_ioceth *ioceth);
void bna_ioceth_disable(struct bna_ioceth *ioceth,
enum bna_cleanup_type type);
-/**
- * BNAD
- */
+/* BNAD */
/* Callbacks for ENET */
void bnad_cb_ethport_link_status(struct bnad *bnad,
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 9ccc586..db14f69 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -378,9 +378,8 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
}
}
-/**
- * ETHPORT
- */
+/* ETHPORT */
+
#define call_ethport_stop_cbfn(_ethport) \
do { \
if ((_ethport)->stop_cbfn) { \
@@ -804,9 +803,8 @@ bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
}
}
-/**
- * ENET
- */
+/* ENET */
+
#define bna_enet_chld_start(enet) \
do { \
enum bna_tx_type tx_type = \
@@ -1328,9 +1326,8 @@ bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
*mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
}
-/**
- * IOCETH
- */
+/* IOCETH */
+
#define enable_mbox_intr(_ioceth) \
do { \
u32 intr_status; \
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 4c6aab2..b8c4e21 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -16,20 +16,15 @@
* www.brocade.com
*/
-/**
- * File for interrupt macros and functions
- */
+/* File for interrupt macros and functions */
#ifndef __BNA_HW_DEFS_H__
#define __BNA_HW_DEFS_H__
#include "bfi_reg.h"
-/**
- *
- * SW imposed limits
- *
- */
+/* SW imposed limits */
+
#define BFI_ENET_DEF_TXQ 1
#define BFI_ENET_DEF_RXP 1
#define BFI_ENET_DEF_UCAM 1
@@ -141,11 +136,8 @@
}
#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
-/**
- *
- * Interrupt related bits, flags and macros
- *
- */
+
+/* Interrupt related bits, flags and macros */
#define IB_STATUS_BITS 0x0000ffff
@@ -280,11 +272,7 @@ do { \
(writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
(_rcb)->q_dbell));
-/**
- *
- * TxQ, RxQ, CQ related bits, offsets, macros
- *
- */
+/* TxQ, RxQ, CQ related bits, offsets, macros */
/* TxQ Entry Opcodes */
#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -334,11 +322,7 @@ do { \
#define BNA_CQ_EF_LOCAL (1 << 20)
-/**
- *
- * Data structures
- *
- */
+/* Data structures */
struct bna_reg_offset {
u32 fn_int_status;
@@ -371,8 +355,7 @@ struct bna_txq_wi_vector {
struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
};
-/**
- * TxQ Entry Structure
+/* TxQ Entry Structure
*
* BEWARE: Load values into this structure with correct endianess.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 276fcb5..71144b39 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -18,9 +18,7 @@
#include "bna.h"
#include "bfi.h"
-/**
- * IB
- */
+/* IB */
static void
bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
{
@@ -29,9 +27,7 @@ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
(u32)ib->coalescing_timeo, 0);
}
-/**
- * RXF
- */
+/* RXF */
#define bna_rxf_vlan_cfg_soft_reset(rxf) \
do { \
@@ -1312,9 +1308,7 @@ bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
return 0;
}
-/**
- * RX
- */
+/* RX */
#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
(qcfg)->num_paths : ((qcfg)->num_paths * 2))
@@ -2791,9 +2785,8 @@ const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
{1, 2},
};
-/**
- * TX
- */
+/* TX */
+
#define call_tx_stop_cbfn(tx) \
do { \
if ((tx)->stop_cbfn) { \
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e8d3ab7..d3eb8bd 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -23,11 +23,7 @@
#include "bfa_cee.h"
#include "bfa_msgq.h"
-/**
- *
- * Forward declarations
- *
- */
+/* Forward declarations */
struct bna_mcam_handle;
struct bna_txq;
@@ -40,11 +36,7 @@ struct bna_enet;
struct bna;
struct bnad;
-/**
- *
- * Enums, primitive data types
- *
- */
+/* Enums, primitive data types */
enum bna_status {
BNA_STATUS_T_DISABLED = 0,
@@ -331,11 +323,7 @@ struct bna_attr {
int max_rit_size;
};
-/**
- *
- * IOCEth
- *
- */
+/* IOCEth */
struct bna_ioceth {
bfa_fsm_t fsm;
@@ -351,11 +339,7 @@ struct bna_ioceth {
struct bna *bna;
};
-/**
- *
- * Enet
- *
- */
+/* Enet */
/* Pause configuration */
struct bna_pause_config {
@@ -390,11 +374,7 @@ struct bna_enet {
struct bna *bna;
};
-/**
- *
- * Ethport
- *
- */
+/* Ethport */
struct bna_ethport {
bfa_fsm_t fsm;
@@ -419,11 +399,7 @@ struct bna_ethport {
struct bna *bna;
};
-/**
- *
- * Interrupt Block
- *
- */
+/* Interrupt Block */
/* Doorbell structure */
struct bna_ib_dbell {
@@ -447,11 +423,7 @@ struct bna_ib {
int interpkt_timeo;
};
-/**
- *
- * Tx object
- *
- */
+/* Tx object */
/* Tx datapath control structure */
#define BNA_Q_NAME_SIZE 16
@@ -585,11 +557,7 @@ struct bna_tx_mod {
struct bna *bna;
};
-/**
- *
- * Rx object
- *
- */
+/* Rx object */
/* Rx datapath control structure */
struct bna_rcb {
@@ -898,11 +866,7 @@ struct bna_rx_mod {
u32 rid_mask;
};
-/**
- *
- * CAM
- *
- */
+/* CAM */
struct bna_ucam_mod {
struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
@@ -927,11 +891,7 @@ struct bna_mcam_mod {
struct bna *bna;
};
-/**
- *
- * Statistics
- *
- */
+/* Statistics */
struct bna_stats {
struct bna_dma_addr hw_stats_dma;
@@ -949,11 +909,7 @@ struct bna_stats_mod {
struct bfi_enet_stats_req stats_clr;
};
-/**
- *
- * BNA
- *
- */
+/* BNA */
struct bna {
struct bna_ident ident;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 67cd2ed..b441f33 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1302,8 +1302,7 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
return 0;
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Unregisters Tx MSIX vector(s) from the kernel
*/
static void
@@ -1322,8 +1321,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
}
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
*/
static int
@@ -1354,8 +1352,7 @@ err_return:
return -1;
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Unregisters Rx MSIX vector(s) from the kernel
*/
static void
@@ -1375,8 +1372,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
}
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
*/
static int
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 72742be..d783392 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -389,9 +389,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
void bnad_debugfs_init(struct bnad *bnad);
void bnad_debugfs_uninit(struct bnad *bnad);
-/**
- * MACROS
- */
+/* MACROS */
/* To set & get the stats counters */
#define BNAD_UPDATE_CTR(_bnad, _ctr) \
(((_bnad)->stats.drv_stats._ctr)++)
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index cfc22a6..6a68e8d 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -67,10 +67,10 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
{
switch (asic_gen) {
case BFI_ASIC_GEN_CT:
- return (u32 *)(bfi_image_ct_cna + off);
+ return (bfi_image_ct_cna + off);
break;
case BFI_ASIC_GEN_CT2:
- return (u32 *)(bfi_image_ct2_cna + off);
+ return (bfi_image_ct2_cna + off);
break;
default:
return NULL;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 1466bc4..033064b 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -179,13 +179,16 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
if (status_change) {
- if (phydev->link)
+ if (phydev->link) {
+ netif_carrier_on(dev);
netdev_info(dev, "link up (%d/%s)\n",
phydev->speed,
phydev->duplex == DUPLEX_FULL ?
"Full" : "Half");
- else
+ } else {
+ netif_carrier_off(dev);
netdev_info(dev, "link down\n");
+ }
}
}
@@ -1033,6 +1036,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
+ /* carrier starts down */
+ netif_carrier_off(dev);
+
/* if the phy is not yet register, retry later*/
if (!bp->phy_dev)
return -EAGAIN;
@@ -1406,6 +1412,8 @@ static int __init macb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
+ netif_carrier_off(dev);
+
netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
dev->irq, dev->dev_addr);
@@ -1469,6 +1477,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
+ netif_carrier_off(netdev);
netif_device_detach(netdev);
clk_disable(bp->hclk);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 11f667f..2b4b4f5 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -264,7 +264,7 @@
#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
-#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
/* XGMAC HW Features Register */
@@ -671,26 +671,23 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
- if (priv->rx_skbuff[entry] != NULL)
- continue;
-
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
- if (unlikely(skb == NULL))
- break;
-
- priv->rx_skbuff[entry] = skb;
- paddr = dma_map_single(priv->device, skb->data,
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+ if (priv->rx_skbuff[entry] == NULL) {
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ paddr = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+ }
netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
priv->rx_head, priv->rx_tail);
priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
- /* Ensure descriptor is in memory before handing to h/w */
- wmb();
desc_set_rx_owner(p);
}
}
@@ -933,6 +930,7 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
priv->tx_tail = 0;
priv->tx_head = 0;
+ writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
@@ -972,7 +970,7 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
- writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+ writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
XGMAC_CONTROL_CAR;
@@ -984,7 +982,8 @@ static int xgmac_hw_init(struct net_device *dev)
writel(value, ioaddr + XGMAC_DMA_CONTROL);
/* Set the HW DMA mode and the COE */
- writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+ writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
+ XGMAC_OMR_RTC_256,
ioaddr + XGMAC_OMR);
/* Reset the MMC counters */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index abb6ce7..6505070 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3050,7 +3050,7 @@ static struct pci_error_handlers t3_err_handler = {
static void set_nqsets(struct adapter *adap)
{
int i, j = 0;
- int num_cpus = num_online_cpus();
+ int num_cpus = netif_get_num_default_rss_queues();
int hwports = adap->params.nports;
int nqsets = adap->msix_nvectors - 1;
@@ -3173,6 +3173,9 @@ static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
pi->iscsic.mac_addr[3] |= 0x80;
}
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int __devinit init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3293,6 +3296,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
+ netdev->vlan_features |= netdev->features & VLAN_FEAT;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 65e4b28..2dbbcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -62,7 +62,9 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
static const unsigned int ATID_BASE = 0x10000;
static void cxgb_neigh_update(struct neighbour *neigh);
-static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
+static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
+ struct dst_entry *new, struct neighbour *new_neigh,
+ const void *daddr);
static inline int offload_activated(struct t3cdev *tdev)
{
@@ -575,7 +577,7 @@ static void t3_process_tid_release_list(struct work_struct *work)
if (!skb) {
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
- td->tid_release_list = (struct t3c_tid_entry *)p;
+ td->tid_release_list = p;
break;
}
mk_tid_release(skb, p - td->tid_maps.tid_tab);
@@ -968,8 +970,10 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
}
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
- cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
+ cxgb_redirect(nr->old, nr->old_neigh,
+ nr->new, nr->new_neigh,
+ nr->daddr);
+ cxgb_neigh_update(nr->new_neigh);
break;
}
default:
@@ -1107,10 +1111,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
tdev->send(tdev, skb);
}
-static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
+static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
+ struct dst_entry *new, struct neighbour *new_neigh,
+ const void *daddr)
{
struct net_device *olddev, *newdev;
- struct neighbour *n;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1118,15 +1123,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- n = dst_get_neighbour_noref(old);
- if (!n)
- return;
- olddev = n->dev;
-
- n = dst_get_neighbour_noref(new);
- if (!n)
- return;
- newdev = n->dev;
+ olddev = old_neigh->dev;
+ newdev = new_neigh->dev;
if (!is_offloading(olddev))
return;
@@ -1144,7 +1142,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new, newdev);
+ e = t3_l2t_get(tdev, new, newdev, daddr);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 3fa3c88..8d53438 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -299,7 +299,7 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
}
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
- struct net_device *dev)
+ struct net_device *dev, const void *daddr)
{
struct l2t_entry *e = NULL;
struct neighbour *neigh;
@@ -311,7 +311,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
int smt_idx;
rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
+ neigh = dst_neigh_lookup(dst, daddr);
if (!neigh)
goto done_rcu;
@@ -360,6 +360,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
done_unlock:
write_unlock_bh(&d->lock);
done_rcu:
+ if (neigh)
+ neigh_release(neigh);
rcu_read_unlock();
return e;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c4e8643..8cffcdf 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -110,7 +110,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
- struct net_device *dev);
+ struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index cfb60e1..dd901c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2877,7 +2877,7 @@ static void sge_timer_tx(unsigned long data)
mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
}
-/*
+/**
* sge_timer_rx - perform periodic maintenance of an SGE qset
* @data: the SGE queue set to maintain
*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 44ac2f4..bff8a3c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1076,7 +1076,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
return 0;
}
-/*
+/**
* t3_load_fw - download firmware
* @adapter: the adapter
* @fw_data: the firmware image to write
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e1f96fb..5ed49af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3493,8 +3493,8 @@ static void __devinit cfg_queues(struct adapter *adap)
*/
if (n10g)
q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
- if (q10g > num_online_cpus())
- q10g = num_online_cpus();
+ if (q10g > netif_get_num_default_rss_queues())
+ q10g = netif_get_num_default_rss_queues();
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e111d97..8596aca 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -753,7 +753,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
end = (void *)q->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32e1dd5..fa947df 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2010,7 +2010,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
-/*
+/**
* t4_mem_win_read_len - read memory through PCIE memory window
* @adap: the adapter
* @addr: address of first byte requested aligned on 32b.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 25e3308..9dad561 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -418,7 +418,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
* restart a TX Ethernet Queue which was stopped for lack of
* free TX Queue Descriptors ...
*/
- const struct cpl_sge_egr_update *p = (void *)cpl;
+ const struct cpl_sge_egr_update *p = cpl;
unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge;
struct sge_txq *tq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0bd585b..f2d1ecd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -934,7 +934,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
end = (void *)tq->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
@@ -1323,8 +1323,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely((void *)sgl == (void *)tq->stat)) {
sgl = (void *)tq->desc;
- end = (void *)((void *)tq->desc +
- ((void *)end - (void *)tq->stat));
+ end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
}
write_sgl(skb, tq, sgl, end, 0, addr);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 8132c78..ad1468b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1300,8 +1300,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
}
- skb->dev = netdev;
-
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index d3cd489..f879e92 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3973,7 +3973,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
tmp = srom_rd(aprom_addr, i);
*p++ = cpu_to_le16(tmp);
}
- de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ de4x5_dbg_srom(&lp->srom);
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c5c4c0e..d266c86 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.2.220u"
+#define DRV_VER "4.4.31.0u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -389,6 +389,7 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
+ struct delayed_work func_recovery_work;
u32 flags;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
@@ -396,9 +397,10 @@ struct be_adapter {
u32 *pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
- bool eeh_err;
- bool ue_detected;
+ bool eeh_error;
bool fw_timeout;
+ bool hw_error;
+
u32 port_num;
bool promiscuous;
u32 function_mode;
@@ -435,6 +437,7 @@ struct be_adapter {
u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
u32 uc_macs; /* Count of secondary UC MAC programmed */
u32 msg_enable;
+ int be_get_temp_freq;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -454,6 +457,9 @@ struct be_adapter {
#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
(adapter->pdev->device == OC_DEVICE_ID4))
+#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5)
+
+
#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
(adapter->function_mode & RDMA_ENABLED))
@@ -573,6 +579,11 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
return val;
}
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
@@ -593,7 +604,19 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
static inline bool be_error(struct be_adapter *adapter)
{
- return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+ return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
+}
+
+static inline bool be_crit_error(struct be_adapter *adapter)
+{
+ return adapter->eeh_error || adapter->hw_error;
+}
+
+static inline void be_clear_all_error(struct be_adapter *adapter)
+{
+ adapter->eeh_error = false;
+ adapter->hw_error = false;
+ adapter->fw_timeout = false;
}
static inline bool be_is_wol_excluded(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 8d06ea3..7fac97b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,9 +19,6 @@
#include "be.h"
#include "be_cmds.h"
-/* Must be a power of 2 or else MODULO will BUG_ON */
-static int be_get_temp_freq = 64;
-
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
return wrb->payload.embedded_payload;
@@ -115,22 +112,22 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
}
} else {
if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
- be_get_temp_freq = 0;
+ adapter->be_get_temp_freq = 0;
if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
compl_status == MCC_STATUS_ILLEGAL_REQUEST)
goto done;
if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
- dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
- "permitted to execute this cmd (opcode %d)\n",
- opcode);
+ dev_warn(&adapter->pdev->dev,
+ "opcode %d-%d is not permitted\n",
+ opcode, subsystem);
} else {
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
CQE_STATUS_EXTD_MASK;
- dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
- "status %d, extd-status %d\n",
- opcode, compl_status, extd_status);
+ dev_err(&adapter->pdev->dev,
+ "opcode %d-%d failed:status %d-%d\n",
+ opcode, subsystem, compl_status, extd_status);
}
}
done:
@@ -144,6 +141,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
+ /* Ignore physical link event */
+ if (lancer_chip(adapter) &&
+ !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
+ return;
+
/* For the initial link status do not rely on the ASYNC event as
* it may not be received in some cases.
*/
@@ -352,7 +354,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
adapter->fw_timeout = true;
- be_detect_dump_ue(adapter);
+ be_detect_error(adapter);
return -1;
}
@@ -429,12 +431,65 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
return 0;
}
-int be_cmd_POST(struct be_adapter *adapter)
+int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 30
+ u32 sliport_status;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+ break;
+
+ msleep(1000);
+ }
+
+ if (i == SLIPORT_READY_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status, err, reset_needed;
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+ if (err && reset_needed) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+
+ /* check adapter has corrected the error */
+ status = lancer_wait_ready(adapter);
+ sliport_status = ioread32(adapter->db +
+ SLIPORT_STATUS_OFFSET);
+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+ SLIPORT_STATUS_RN_MASK);
+ if (status || sliport_status)
+ status = -1;
+ } else if (err || reset_needed) {
+ status = -1;
+ }
+ }
+ return status;
+}
+
+int be_fw_wait_ready(struct be_adapter *adapter)
{
u16 stage;
int status, timeout = 0;
struct device *dev = &adapter->pdev->dev;
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ return status;
+ }
+
do {
status = be_POST_stage_get(adapter, &stage);
if (status) {
@@ -565,6 +620,9 @@ int be_cmd_fw_init(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (lancer_chip(adapter))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -592,6 +650,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (lancer_chip(adapter))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -610,6 +671,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
mutex_unlock(&adapter->mbox_lock);
return status;
}
+
int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay)
{
@@ -1132,7 +1194,7 @@ err:
* Uses MCCQ
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
+ u32 *if_handle, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -1152,17 +1214,13 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
- if (mac)
- memcpy(req->mac_addr, mac, ETH_ALEN);
- else
- req->pmac_invalid = true;
+
+ req->pmac_invalid = true;
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- if (mac)
- *pmac_id = le32_to_cpu(resp->pmac_id);
}
err:
@@ -1210,9 +1268,6 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
- be_cmd_get_die_temperature(adapter);
-
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1581,7 +1636,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
*/
- req->if_flags_mask |=
+ if (!lancer_chip(adapter) || be_physfn(adapter))
+ req->if_flags_mask |=
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
@@ -1692,6 +1748,20 @@ int be_cmd_reset_function(struct be_adapter *adapter)
struct be_cmd_req_hdr *req;
int status;
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_test_and_set_rdy_state(adapter);
+ }
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter in non recoverable error\n");
+ }
+ return status;
+ }
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -1728,6 +1798,13 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
+
+ if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
+ req->hdr.version = 1;
+ req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6);
+ }
+
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
memcpy(req->cpu_table, rsstable, table_size);
memcpy(req->hash, myhash, sizeof(myhash));
@@ -1805,8 +1882,9 @@ err:
}
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_written, u8 *addn_status)
+ u32 data_size, u32 data_offset,
+ const char *obj_name, u32 *data_written,
+ u8 *change_status, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_write_object *req;
@@ -1862,10 +1940,12 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
status = adapter->flash_status;
resp = embedded_payload(wrb);
- if (!status)
+ if (!status) {
*data_written = le32_to_cpu(resp->actual_write_len);
- else
+ *change_status = resp->change_status;
+ } else {
*addn_status = resp->additional_status;
+ }
return status;
@@ -2330,8 +2410,8 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- bool *pmac_id_active, u32 *pmac_id, u8 *mac)
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2376,8 +2456,9 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
get_mac_list_cmd.va;
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
- * or one or more pseudo permanant mac addresses. If an active
- * mac_id is present, return first active mac_id found
+ * or one or more true or pseudo permanant mac addresses.
+ * If an active mac_id is present, return first active mac_id
+ * found.
*/
for (i = 0; i < mac_count; i++) {
struct get_list_macaddr *mac_entry;
@@ -2396,7 +2477,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
goto out;
}
}
- /* If no active mac_id found, return first pseudo mac addr */
+ /* If no active mac_id found, return first mac addr */
*pmac_id_active = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
ETH_ALEN);
@@ -2648,6 +2729,44 @@ err:
return status;
}
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_port_name *req;
+ int status;
+
+ if (!lancer_chip(adapter)) {
+ *port_name = adapter->hba_port_num + '0';
+ return 0;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
+ NULL);
+ req->hdr.version = 1;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
+ *port_name = resp->port_name[adapter->hba_port_num];
+ } else {
+ *port_name = adapter->hba_port_num + '0';
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 9625bf4..250f19b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -93,6 +93,7 @@ enum {
LINK_UP = 0x1
};
#define LINK_STATUS_MASK 0x1
+#define LOGICAL_LINK_STATUS_MASK 0x2
/* When the event code of an async trailer is link-state, the mcc_compl
* must be interpreted as follows
@@ -186,6 +187,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
+#define OPCODE_COMMON_GET_PORT_NAME 77
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -1081,13 +1083,25 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
-/******************** RSS Config *******************/
-/* RSS types */
+/******************** RSS Config ****************************************/
+/* RSS type Input parameters used to compute RX hash
+ * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
+ * RSS_ENABLE_TCP_IPV4 SRC IPv4, DST IPv4, TCP SRC PORT, TCP DST PORT
+ * RSS_ENABLE_IPV6 SRC IPv6, DST IPv6
+ * RSS_ENABLE_TCP_IPV6 SRC IPv6, DST IPv6, TCP SRC PORT, TCP DST PORT
+ * RSS_ENABLE_UDP_IPV4 SRC IPv4, DST IPv4, UDP SRC PORT, UDP DST PORT
+ * RSS_ENABLE_UDP_IPV6 SRC IPv6, DST IPv6, UDP SRC PORT, UDP DST PORT
+ *
+ * When multiple RSS types are enabled, HW picks the best hash policy
+ * based on the type of the received packet.
+ */
#define RSS_ENABLE_NONE 0x0
#define RSS_ENABLE_IPV4 0x1
#define RSS_ENABLE_TCP_IPV4 0x2
#define RSS_ENABLE_IPV6 0x4
#define RSS_ENABLE_TCP_IPV6 0x8
+#define RSS_ENABLE_UDP_IPV4 0x10
+#define RSS_ENABLE_UDP_IPV6 0x20
struct be_cmd_req_rss_config {
struct be_cmd_req_hdr hdr;
@@ -1163,6 +1177,8 @@ struct lancer_cmd_req_write_object {
u32 addr_high;
};
+#define LANCER_NO_RESET_NEEDED 0x00
+#define LANCER_FW_RESET_NEEDED 0x02
struct lancer_cmd_resp_write_object {
u8 opcode;
u8 subsystem;
@@ -1173,6 +1189,8 @@ struct lancer_cmd_resp_write_object {
u32 resp_len;
u32 actual_resp_len;
u32 actual_write_len;
+ u8 change_status;
+ u8 rsvd3[3];
};
/************************ Lancer Read FW info **************/
@@ -1502,6 +1520,17 @@ struct be_cmd_resp_get_hsw_config {
u32 rsvd;
};
+/******************* get port names ***************/
+struct be_cmd_req_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0;
+};
+
+struct be_cmd_resp_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u8 port_name[4];
+};
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1566,7 +1595,7 @@ struct be_hw_stats_v1 {
u32 rsvd0[BE_TXP_SW_SZ];
struct be_erx_stats_v1 erx;
struct be_pmem_stats pmem;
- u32 rsvd1[3];
+ u32 rsvd1[18];
};
struct be_cmd_req_get_stats_v1 {
@@ -1656,7 +1685,7 @@ struct be_cmd_req_set_ext_fat_caps {
};
extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_cmd_POST(struct be_adapter *adapter);
+extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
@@ -1664,8 +1693,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
int pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
- u32 domain);
+ u32 en_flags, u32 *if_handle, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
@@ -1719,10 +1747,11 @@ extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
extern int lancer_cmd_write_object(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name,
- u32 *data_written, u8 *addn_status);
+ struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name,
+ u32 *data_written, u8 *change_status,
+ u8 *addn_status);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset, const char *obj_name,
u32 *data_read, u32 *eof, u8 *addn_status);
@@ -1745,14 +1774,15 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
extern int be_cmd_get_phy_info(struct be_adapter *adapter);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern void be_detect_error(struct be_adapter *adapter);
extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- bool *pmac_id_active, u32 *pmac_id, u8 *mac);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id,
+ u8 domain);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
@@ -1765,4 +1795,7 @@ extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd,
struct be_fat_conf_params *cfgs);
+extern int lancer_wait_ready(struct be_adapter *adapter);
+extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 63e51d4..e34be1c 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -648,7 +648,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
struct be_adapter *adapter = netdev_priv(netdev);
int status;
- if (ecmd->autoneg != 0)
+ if (ecmd->autoneg != adapter->phy.fc_autoneg)
return -EINVAL;
adapter->tx_fc = ecmd->tx_pause;
adapter->rx_fc = ecmd->rx_pause;
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index d9fb0c5..b755f70 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -45,20 +45,19 @@
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
#define SLIPORT_CONTROL_OFFSET 0x408
#define SLIPORT_ERROR1_OFFSET 0x40C
#define SLIPORT_ERROR2_OFFSET 0x410
+#define PHYSDEV_CONTROL_OFFSET 0x414
#define SLIPORT_STATUS_ERR_MASK 0x80000000
#define SLIPORT_STATUS_RN_MASK 0x01000000
#define SLIPORT_STATUS_RDY_MASK 0x00800000
-
-
#define SLI_PORT_CONTROL_IP_MASK 0x08000000
-
-#define PCICFG_CUST_SCRATCHPAD_CSR 0x1EC
+#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
+#define PHYSDEV_CONTROL_INP_MASK 0x40000000
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08efd30..4d96771 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -155,7 +155,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
{
u32 reg, enabled;
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
@@ -201,7 +201,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
DB_EQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
if (arm)
@@ -220,7 +220,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
if (arm)
@@ -558,6 +558,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
wrb->frag_pa_hi = upper_32_bits(addr);
wrb->frag_pa_lo = addr & 0xFFFFFFFF;
wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
+ wrb->rsvd0 = 0;
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
@@ -576,6 +577,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}
+static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) || adapter->pvid;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
@@ -703,39 +709,64 @@ dma_err:
return 0;
}
+static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u16 vlan_tag = 0;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return skb;
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ __vlan_put_tag(skb, vlan_tag);
+ skb->vlan_tci = 0;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
struct be_queue_info *txq = &txo->q;
+ struct iphdr *ip = NULL;
u32 wrb_cnt = 0, copied = 0;
- u32 start = txq->head;
+ u32 start = txq->head, eth_hdr_len;
bool dummy_wrb, stopped = false;
- /* For vlan tagged pkts, BE
- * 1) calculates checksum even when CSO is not requested
- * 2) calculates checksum wrongly for padded pkt less than
- * 60 bytes long.
- * As a workaround disable TX vlan offloading in such cases.
+ eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
+ VLAN_ETH_HLEN : ETH_HLEN;
+
+ /* HW has a bug which considers padding bytes as legal
+ * and modifies the IPv4 hdr's 'tot_len' field
*/
- if (unlikely(vlan_tx_tag_present(skb) &&
- (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (unlikely(!skb))
- goto tx_drop;
+ if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
+ is_ipv4_pkt(skb)) {
+ ip = (struct iphdr *)ip_hdr(skb);
+ pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ }
- skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ /* HW has a bug wherein it will calculate CSUM for VLAN
+ * pkts even though it is disabled.
+ * Manually insert VLAN in pkt.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL &&
+ be_vlan_tag_chk(adapter, skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb);
if (unlikely(!skb))
goto tx_drop;
-
- skb->vlan_tci = 0;
}
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
if (copied) {
+ int gso_segs = skb_shinfo(skb)->gso_segs;
+
/* record the sent skb in the sent_skb table */
BUG_ON(txo->sent_skb_list[start]);
txo->sent_skb_list[start] = skb;
@@ -753,8 +784,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(txo, wrb_cnt, copied,
- skb_shinfo(skb)->gso_segs, stopped);
+ be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
} else {
txq->head = start;
dev_kfree_skb_any(skb);
@@ -785,19 +815,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
* A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
* If the user configures more, place BE in vlan promiscuous mode.
*/
-static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
+static int be_vid_config(struct be_adapter *adapter)
{
- struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
- u16 vtag[BE_NUM_VLANS_SUPPORTED];
- u16 ntags = 0, i;
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ u16 num = 0, i;
int status = 0;
- if (vf) {
- vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
- status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
- 1, 1, 0);
- }
-
/* No need to further configure vids if in promiscuous mode */
if (adapter->promiscuous)
return 0;
@@ -808,10 +831,10 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_N_VID; i++)
if (adapter->vlan_tag[i])
- vtag[ntags++] = cpu_to_le16(i);
+ vids[num++] = cpu_to_le16(i);
status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vtag, ntags, 1, 0);
+ vids, num, 1, 0);
/* Set to VLAN promisc mode as setting VLAN filter failed */
if (status) {
@@ -840,7 +863,7 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
- status = be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added++;
@@ -862,7 +885,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
- status = be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
@@ -889,7 +912,7 @@ static void be_set_rx_mode(struct net_device *netdev)
be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
if (adapter->vlans_added)
- be_vid_config(adapter, false, 0);
+ be_vid_config(adapter);
}
/* Enable multicast promisc if num configured exceeds what we support */
@@ -1056,13 +1079,16 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
u16 offset, stride;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
while (dev) {
vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
- if (dev->is_virtfn && dev->devfn == vf_fn) {
+ if (dev->is_virtfn && dev->devfn == vf_fn &&
+ dev->bus->number == pdev->bus->number) {
vfs++;
if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
assigned_vfs++;
@@ -1202,16 +1228,16 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
/* Copy data in the first descriptor of this completion */
curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
- /* Copy the header portion into skb_data */
- hdr_len = min(BE_HDR_LEN, curr_frag_len);
- memcpy(skb->data, start, hdr_len);
skb->len = curr_frag_len;
if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
+ memcpy(skb->data, start, curr_frag_len);
/* Complete packet has now been moved to data */
put_page(page_info->page);
skb->data_len = 0;
skb->tail += curr_frag_len;
} else {
+ hdr_len = ETH_HLEN;
+ memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
@@ -1708,9 +1734,10 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
int i;
for_all_evt_queues(adapter, eqo, i) {
- be_eq_clean(eqo);
- if (eqo->q.created)
+ if (eqo->q.created) {
+ be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ }
be_queue_free(adapter, &eqo->q);
}
}
@@ -1897,6 +1924,12 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
*/
adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
num_irqs(adapter) + 1 : 1;
+ if (adapter->num_rx_qs != MAX_RX_QS) {
+ rtnl_lock();
+ netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_qs);
+ rtnl_unlock();
+ }
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
@@ -2066,13 +2099,13 @@ int be_poll(struct napi_struct *napi, int budget)
return max_work;
}
-void be_detect_dump_ue(struct be_adapter *adapter)
+void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- if (adapter->eeh_err || adapter->ue_detected)
+ if (be_crit_error(adapter))
return;
if (lancer_chip(adapter)) {
@@ -2093,16 +2126,24 @@ void be_detect_dump_ue(struct be_adapter *adapter)
pci_read_config_dword(adapter->pdev,
PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
- ue_lo = (ue_lo & (~ue_lo_mask));
- ue_hi = (ue_hi & (~ue_hi_mask));
+ ue_lo = (ue_lo & ~ue_lo_mask);
+ ue_hi = (ue_hi & ~ue_hi_mask);
}
if (ue_lo || ue_hi ||
sliport_status & SLIPORT_STATUS_ERR_MASK) {
- adapter->ue_detected = true;
- adapter->eeh_err = true;
+ adapter->hw_error = true;
+ dev_err(&adapter->pdev->dev,
+ "Error detected in the card\n");
+ }
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ dev_err(&adapter->pdev->dev,
+ "ERR: sliport status 0x%x\n", sliport_status);
dev_err(&adapter->pdev->dev,
- "Unrecoverable error in the card\n");
+ "ERR: sliport error1 0x%x\n", sliport_err1);
+ dev_err(&adapter->pdev->dev,
+ "ERR: sliport error2 0x%x\n", sliport_err2);
}
if (ue_lo) {
@@ -2112,6 +2153,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
"UE: %s bit set\n", ue_status_low_desc[i]);
}
}
+
if (ue_hi) {
for (i = 0; ue_hi; ue_hi >>= 1, i++) {
if (ue_hi & 1)
@@ -2120,14 +2162,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
}
}
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "sliport status 0x%x\n", sliport_status);
- dev_err(&adapter->pdev->dev,
- "sliport error1 0x%x\n", sliport_err1);
- dev_err(&adapter->pdev->dev,
- "sliport error2 0x%x\n", sliport_err2);
- }
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -2140,12 +2174,14 @@ static void be_msix_disable(struct be_adapter *adapter)
static uint be_num_rss_want(struct be_adapter *adapter)
{
+ u32 num = 0;
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!sriov_want(adapter) && be_physfn(adapter) &&
- !be_is_mc(adapter))
- return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
- else
- return 0;
+ !be_is_mc(adapter)) {
+ num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
+ }
+ return num;
}
static void be_msix_enable(struct be_adapter *adapter)
@@ -2539,11 +2575,7 @@ static int be_clear(struct be_adapter *adapter)
be_tx_queues_destroy(adapter);
be_evt_queues_destroy(adapter);
- /* tell fw we're done with firing cmds */
- be_cmd_fw_clean(adapter);
-
be_msix_disable(adapter);
- pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
return 0;
}
@@ -2601,8 +2633,8 @@ static int be_vf_setup(struct be_adapter *adapter)
cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
- status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
- &vf_cfg->if_handle, NULL, vf + 1);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -2642,29 +2674,43 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->phy.forced_port_speed = -1;
}
-static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
+static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
+ bool *active_mac, u32 *pmac_id)
{
- u32 pmac_id;
- int status;
- bool pmac_id_active;
+ int status = 0;
- status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
- &pmac_id, mac);
- if (status != 0)
- goto do_none;
+ if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
+ memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
+ if (!lancer_chip(adapter) && !be_physfn(adapter))
+ *active_mac = true;
+ else
+ *active_mac = false;
- if (pmac_id_active) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK,
- false, adapter->if_handle, pmac_id);
+ return status;
+ }
- if (!status)
- adapter->pmac_id[0] = pmac_id;
+ if (lancer_chip(adapter)) {
+ status = be_cmd_get_mac_from_list(adapter, mac,
+ active_mac, pmac_id, 0);
+ if (*active_mac) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, if_handle,
+ *pmac_id);
+ }
+ } else if (be_physfn(adapter)) {
+ /* For BE3, for PF get permanent MAC */
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, true,
+ 0, 0);
+ *active_mac = false;
} else {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->if_handle, &adapter->pmac_id[0], 0);
+ /* For BE3, for VF get soft MAC assigned by PF*/
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ if_handle, 0);
+ *active_mac = true;
}
-do_none:
return status;
}
@@ -2685,12 +2731,12 @@ static int be_get_config(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
int status;
u8 mac[ETH_ALEN];
+ bool active_mac;
be_setup_init(adapter);
@@ -2716,14 +2762,6 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- memset(mac, 0, ETH_ALEN);
- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
- true /*permanent */, 0, 0);
- if (status)
- return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
-
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2733,27 +2771,36 @@ static int be_setup(struct be_adapter *adapter)
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
}
+
+ if (lancer_chip(adapter) && !be_physfn(adapter)) {
+ en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+ cap_flags = en_flags;
+ }
+
status = be_cmd_if_create(adapter, cap_flags, en_flags,
- netdev->dev_addr, &adapter->if_handle,
- &adapter->pmac_id[0], 0);
+ &adapter->if_handle, 0);
if (status != 0)
goto err;
- /* The VF's permanent mac queried from card is incorrect.
- * For BEx: Query the mac configued by the PF using if_handle
- * For Lancer: Get and use mac_list to obtain mac address.
- */
- if (!be_physfn(adapter)) {
- if (lancer_chip(adapter))
- status = be_add_mac_from_list(adapter, mac);
- else
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false,
- adapter->if_handle, 0);
- if (!status) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ memset(mac, 0, ETH_ALEN);
+ active_mac = false;
+ status = be_get_mac_addr(adapter, mac, adapter->if_handle,
+ &active_mac, &adapter->pmac_id[0]);
+ if (status != 0)
+ goto err;
+
+ if (!active_mac) {
+ status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status != 0)
+ goto err;
+ }
+
+ if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
}
status = be_tx_qs_create(adapter);
@@ -2762,7 +2809,8 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
- be_vid_config(adapter, false, 0);
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
be_set_rx_mode(adapter->netdev);
@@ -2772,8 +2820,6 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- pcie_set_readrq(adapter->pdev, 4096);
-
if (be_physfn(adapter) && num_vfs) {
if (adapter->dev_num_vfs)
be_vf_setup(adapter);
@@ -2787,8 +2833,6 @@ static int be_setup(struct be_adapter *adapter)
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
-
- pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
return 0;
err:
be_clear(adapter);
@@ -3032,6 +3076,40 @@ static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
return 0;
}
+static int lancer_wait_idle(struct be_adapter *adapter)
+{
+#define SLIPORT_IDLE_TIMEOUT 30
+ u32 reg_val;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
+ reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
+ if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
+ break;
+
+ ssleep(1);
+ }
+
+ if (i == SLIPORT_IDLE_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+static int lancer_fw_reset(struct be_adapter *adapter)
+{
+ int status = 0;
+
+ status = lancer_wait_idle(adapter);
+ if (status)
+ return status;
+
+ iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
+ PHYSDEV_CONTROL_OFFSET);
+
+ return status;
+}
+
static int lancer_fw_download(struct be_adapter *adapter,
const struct firmware *fw)
{
@@ -3046,6 +3124,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
u32 offset = 0;
int status = 0;
u8 add_status = 0;
+ u8 change_status;
if (!IS_ALIGNED(fw->size, sizeof(u32))) {
dev_err(&adapter->pdev->dev,
@@ -3078,9 +3157,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
memcpy(dest_image_ptr, data_ptr, chunk_size);
status = lancer_cmd_write_object(adapter, &flash_cmd,
- chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
-
+ chunk_size, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
if (status)
break;
@@ -3092,8 +3172,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
if (!status) {
/* Commit the FW written */
status = lancer_cmd_write_object(adapter, &flash_cmd,
- 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
+ 0, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
@@ -3106,6 +3188,20 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
+ if (change_status == LANCER_FW_RESET_NEEDED) {
+ status = lancer_fw_reset(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter busy for FW reset.\n"
+ "New FW will not be active.\n");
+ goto lancer_fw_exit;
+ }
+ } else if (change_status != LANCER_NO_RESET_NEEDED) {
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW"
+ " to be active\n");
+ }
+
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
lancer_fw_exit:
return status;
@@ -3236,7 +3332,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- netif_set_gso_max_size(netdev, 65535);
+ netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
netdev->netdev_ops = &be_netdev_ops;
@@ -3434,10 +3530,15 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_roce_dev_remove(adapter);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
unregister_netdev(adapter->netdev);
be_clear(adapter);
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+
be_stats_cleanup(adapter);
be_ctrl_cleanup(adapter);
@@ -3529,6 +3630,9 @@ static int be_get_initial_config(struct be_adapter *adapter)
if (be_is_wol_supported(adapter))
adapter->wol = true;
+ /* Must be a power of 2 or else MODULO will BUG_ON */
+ adapter->be_get_temp_freq = 64;
+
level = be_get_fw_log_level(adapter);
adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
@@ -3584,101 +3688,68 @@ static int be_dev_type_check(struct be_adapter *adapter)
return 0;
}
-static int lancer_wait_ready(struct be_adapter *adapter)
+static int lancer_recover_func(struct be_adapter *adapter)
{
-#define SLIPORT_READY_TIMEOUT 30
- u32 sliport_status;
- int status = 0, i;
+ int status;
- for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- if (sliport_status & SLIPORT_STATUS_RDY_MASK)
- break;
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status)
+ goto err;
- msleep(1000);
- }
+ if (netif_running(adapter->netdev))
+ be_close(adapter->netdev);
- if (i == SLIPORT_READY_TIMEOUT)
- status = -1;
+ be_clear(adapter);
- return status;
-}
+ adapter->hw_error = false;
+ adapter->fw_timeout = false;
-static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
-{
- int status;
- u32 sliport_status, err, reset_needed;
- status = lancer_wait_ready(adapter);
- if (!status) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- err = sliport_status & SLIPORT_STATUS_ERR_MASK;
- reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
- if (err && reset_needed) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
-
- /* check adapter has corrected the error */
- status = lancer_wait_ready(adapter);
- sliport_status = ioread32(adapter->db +
- SLIPORT_STATUS_OFFSET);
- sliport_status &= (SLIPORT_STATUS_ERR_MASK |
- SLIPORT_STATUS_RN_MASK);
- if (status || sliport_status)
- status = -1;
- } else if (err || reset_needed) {
- status = -1;
- }
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(adapter->netdev)) {
+ status = be_open(adapter->netdev);
+ if (status)
+ goto err;
}
+
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery succeeded\n");
+ return 0;
+err:
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery failed\n");
+
return status;
}
-static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+static void be_func_recovery_task(struct work_struct *work)
{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, func_recovery_work.work);
int status;
- u32 sliport_status;
-
- if (adapter->eeh_err || adapter->ue_detected)
- return;
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ be_detect_error(adapter);
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "Adapter in error state."
- "Trying to recover.\n");
+ if (adapter->hw_error && lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
- if (status)
- goto err;
+ if (adapter->eeh_error)
+ goto out;
+ rtnl_lock();
netif_device_detach(adapter->netdev);
+ rtnl_unlock();
- if (netif_running(adapter->netdev))
- be_close(adapter->netdev);
-
- be_clear(adapter);
-
- adapter->fw_timeout = false;
-
- status = be_setup(adapter);
- if (status)
- goto err;
-
- if (netif_running(adapter->netdev)) {
- status = be_open(adapter->netdev);
- if (status)
- goto err;
- }
-
- netif_device_attach(adapter->netdev);
+ status = lancer_recover_func(adapter);
- dev_err(&adapter->pdev->dev,
- "Adapter error recovery succeeded\n");
+ if (!status)
+ netif_device_attach(adapter->netdev);
}
- return;
-err:
- dev_err(&adapter->pdev->dev,
- "Adapter error recovery failed\n");
+
+out:
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
}
static void be_worker(struct work_struct *work)
@@ -3689,11 +3760,6 @@ static void be_worker(struct work_struct *work)
struct be_eq_obj *eqo;
int i;
- if (lancer_chip(adapter))
- lancer_test_and_recover_fn_err(adapter);
-
- be_detect_dump_ue(adapter);
-
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
@@ -3709,6 +3775,9 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
+ if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
for_all_rx_queues(adapter, rxo, i) {
if (rxo->rx_post_starved) {
rxo->rx_post_starved = false;
@@ -3726,10 +3795,7 @@ reschedule:
static bool be_reset_required(struct be_adapter *adapter)
{
- u32 reg;
-
- pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
- return reg;
+ return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
}
static int __devinit be_probe(struct pci_dev *pdev,
@@ -3738,6 +3804,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
int status = 0;
struct be_adapter *adapter;
struct net_device *netdev;
+ char port_name;
status = pci_enable_device(pdev);
if (status)
@@ -3748,7 +3815,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_dev;
pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
+ netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
if (netdev == NULL) {
status = -ENOMEM;
goto rel_reg;
@@ -3779,22 +3846,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto free_netdev;
- if (lancer_chip(adapter)) {
- status = lancer_wait_ready(adapter);
- if (!status) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
- status = lancer_test_and_set_rdy_state(adapter);
- }
- if (status) {
- dev_err(&pdev->dev, "Adapter in non recoverable error\n");
- goto ctrl_clean;
- }
- }
-
/* sync up with fw's ready state */
if (be_physfn(adapter)) {
- status = be_cmd_POST(adapter);
+ status = be_fw_wait_ready(adapter);
if (status)
goto ctrl_clean;
}
@@ -3825,6 +3879,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto stats_clean;
INIT_DELAYED_WORK(&adapter->work, be_worker);
+ INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
adapter->rx_fc = adapter->tx_fc = true;
status = be_setup(adapter);
@@ -3838,8 +3893,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_roce_dev_add(adapter);
- dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
- adapter->port_num);
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
+
+ be_cmd_query_port_name(adapter, &port_name);
+
+ dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
+ port_name);
return 0;
@@ -3871,6 +3931,8 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
if (adapter->wol)
be_setup_wol(adapter, true);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
netif_device_detach(netdev);
if (netif_running(netdev)) {
rtnl_lock();
@@ -3911,6 +3973,9 @@ static int be_resume(struct pci_dev *pdev)
be_open(netdev);
rtnl_unlock();
}
+
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
netif_device_attach(netdev);
if (adapter->wol)
@@ -3930,6 +3995,7 @@ static void be_shutdown(struct pci_dev *pdev)
return;
cancel_delayed_work_sync(&adapter->work);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
netif_device_detach(adapter->netdev);
@@ -3949,9 +4015,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
dev_err(&adapter->pdev->dev, "EEH error detected\n");
- adapter->eeh_err = true;
+ adapter->eeh_error = true;
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
+ rtnl_lock();
netif_device_detach(netdev);
+ rtnl_unlock();
if (netif_running(netdev)) {
rtnl_lock();
@@ -3979,9 +4049,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
int status;
dev_info(&adapter->pdev->dev, "EEH reset\n");
- adapter->eeh_err = false;
- adapter->ue_detected = false;
- adapter->fw_timeout = false;
+ be_clear_all_error(adapter);
status = pci_enable_device(pdev);
if (status)
@@ -3992,7 +4060,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
- status = be_cmd_POST(adapter);
+ status = be_fw_wait_ready(adapter);
if (status)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4014,6 +4082,10 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto err;
+
status = be_setup(adapter);
if (status)
goto err;
@@ -4023,6 +4095,9 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
}
+
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
netif_device_attach(netdev);
return;
err:
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a381678..94b7bfc 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -902,7 +902,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
};
/**
- * ethoc_probe() - initialize OpenCores ethernet MAC
+ * ethoc_probe - initialize OpenCores ethernet MAC
* pdev: platform device
*/
static int __devinit ethoc_probe(struct platform_device *pdev)
@@ -1057,7 +1057,7 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* Check the MAC again for validity, if it still isn't choose and
* program a random one. */
if (!is_valid_ether_addr(netdev->dev_addr)) {
- random_ether_addr(netdev->dev_addr);
+ eth_random_addr(netdev->dev_addr);
random_mac = true;
}
@@ -1140,7 +1140,7 @@ out:
}
/**
- * ethoc_remove() - shutdown OpenCores ethernet MAC
+ * ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
static int __devexit ethoc_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 16b0704..74d749e 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -479,9 +479,14 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
rxdes = ftgmac100_current_rxdes(priv);
} while (!done);
- if (skb->len <= 64)
+ /* Small frames are copied into linear part of skb to free one page */
+ if (skb->len <= 128) {
skb->truesize -= PAGE_SIZE;
- __pskb_pull_tail(skb, min(skb->len, 64U));
+ __pskb_pull_tail(skb, skb->len);
+ } else {
+ /* We pull the minimum amount into linear part */
+ __pskb_pull_tail(skb, ETH_HLEN);
+ }
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 829b109..b901a01 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -441,11 +441,14 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
skb->len += length;
skb->data_len += length;
- /* page might be freed in __pskb_pull_tail() */
- if (length > 64)
+ if (length > 128) {
skb->truesize += PAGE_SIZE;
- __pskb_pull_tail(skb, min(length, 64));
-
+ /* We pull the minimum amount into linear part */
+ __pskb_pull_tail(skb, ETH_HLEN);
+ } else {
+ /* Small frames are copied into linear part to free one page */
+ __pskb_pull_tail(skb, length);
+ }
ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
ftmac100_rx_pointer_advance(priv);
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index ff7f4c5..fffd205 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -49,6 +49,7 @@
#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
#include <asm/cacheflush.h>
@@ -1388,8 +1389,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * fec_poll_controller: FEC Poll controller function
+/**
+ * fec_poll_controller - FEC Poll controller function
* @dev: The FEC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
@@ -1506,18 +1507,25 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
static void __devinit fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
+ int msec = 1;
struct device_node *np = pdev->dev.of_node;
if (!np)
return;
+ of_property_read_u32(np, "phy-reset-duration", &msec);
+ /* A sane reset duration should not be longer than 1s */
+ if (msec > 1000)
+ msec = 1;
+
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
+ err = devm_gpio_request_one(&pdev->dev, phy_reset,
+ GPIOF_OUT_INIT_LOW, "phy-reset");
if (err) {
pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
return;
}
- msleep(1);
+ msleep(msec);
gpio_set_value(phy_reset, 1);
}
#else /* CONFIG_OF */
@@ -1546,6 +1554,7 @@ fec_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
static int dev_id;
struct pinctrl *pinctrl;
+ struct regulator *reg_phy;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1593,8 +1602,6 @@ fec_probe(struct platform_device *pdev)
fep->phy_interface = ret;
}
- fec_reset_phy(pdev);
-
for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
@@ -1634,6 +1641,18 @@ fec_probe(struct platform_device *pdev)
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
+ reg_phy = devm_regulator_get(&pdev->dev, "phy");
+ if (!IS_ERR(reg_phy)) {
+ ret = regulator_enable(reg_phy);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable phy regulator: %d\n", ret);
+ goto failed_regulator;
+ }
+ }
+
+ fec_reset_phy(pdev);
+
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1655,6 +1674,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
+failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
failed_pin:
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 97f947b..2933d08 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
length = status & BCOM_FEC_RX_BD_LEN_MASK;
skb_put(rskb, length - 4); /* length without CRC32 */
rskb->protocol = eth_type_trans(rskb, dev);
- if (!skb_defer_rx_timestamp(skb))
+ if (!skb_defer_rx_timestamp(rskb))
netif_rx(rskb);
spin_lock(&priv->lock);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f7f0bf5..9527b28 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -47,6 +47,9 @@
#include "gianfar.h"
#include "fsl_pq_mdio.h"
+/* Number of microseconds to wait for an MII register to respond */
+#define MII_TIMEOUT 1000
+
struct fsl_pq_mdio_priv {
void __iomem *map;
struct fsl_pq_mdio __iomem *regs;
@@ -64,6 +67,8 @@ struct fsl_pq_mdio_priv {
int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
int regnum, u16 value)
{
+ u32 status;
+
/* Set the PHY address and the register address we want to write */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -71,10 +76,10 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
out_be32(&regs->miimcon, value);
/* Wait for the transaction to finish */
- while (in_be32(&regs->miimind) & MIIMIND_BUSY)
- cpu_relax();
+ status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
+ MII_TIMEOUT, 0);
- return 0;
+ return status ? 0 : -ETIMEDOUT;
}
/*
@@ -91,6 +96,7 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
int mii_id, int regnum)
{
u16 value;
+ u32 status;
/* Set the PHY address and the register address we want to read */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -99,9 +105,12 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
out_be32(&regs->miimcom, 0);
out_be32(&regs->miimcom, MII_READ_COMMAND);
- /* Wait for the transaction to finish */
- while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
- cpu_relax();
+ /* Wait for the transaction to finish, normally less than 100us */
+ status = spin_event_timeout(!(in_be32(&regs->miimind) &
+ (MIIMIND_NOTVALID | MIIMIND_BUSY)),
+ MII_TIMEOUT, 0);
+ if (!status)
+ return -ETIMEDOUT;
/* Grab the value of the register from miimstat */
value = in_be32(&regs->miimstat);
@@ -144,7 +153,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
- int timeout = PHY_INIT_TIMEOUT;
+ u32 status;
mutex_lock(&bus->mdio_lock);
@@ -155,12 +164,12 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
/* Wait until the bus is free */
- while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
- cpu_relax();
+ status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
+ MII_TIMEOUT, 0);
mutex_unlock(&bus->mdio_lock);
- if (timeout < 0) {
+ if (!status) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
bus->name);
return -EBUSY;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 0741ade..4605f72 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,5 +1,4 @@
-/*
- * drivers/net/ethernet/freescale/gianfar.c
+/* drivers/net/ethernet/freescale/gianfar.c
*
* Gianfar Ethernet Driver
* This driver is designed for the non-CPM ethernet controllers
@@ -114,7 +113,7 @@ static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -266,8 +265,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
tx_queue->tx_bd_dma_base = addr;
tx_queue->dev = ndev;
/* enet DMA only understands physical addresses */
- addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
- vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+ addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+ vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
}
/* Start the rx descriptor ring where the tx ring leaves off */
@@ -276,15 +275,16 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
rx_queue->dev = ndev;
- addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
- vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+ addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+ vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
- tx_queue->tx_ring_size, GFP_KERNEL);
+ tx_queue->tx_ring_size,
+ GFP_KERNEL);
if (!tx_queue->tx_skbuff) {
netif_err(priv, ifup, ndev,
"Could not allocate tx_skbuff\n");
@@ -298,7 +298,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
- rx_queue->rx_ring_size, GFP_KERNEL);
+ rx_queue->rx_ring_size,
+ GFP_KERNEL);
if (!rx_queue->rx_skbuff) {
netif_err(priv, ifup, ndev,
@@ -327,15 +328,15 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
int i;
baddr = &regs->tbase0;
- for(i = 0; i < priv->num_tx_queues; i++) {
+ for (i = 0; i < priv->num_tx_queues; i++) {
gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
- baddr += 2;
+ baddr += 2;
}
baddr = &regs->rbase0;
- for(i = 0; i < priv->num_rx_queues; i++) {
+ for (i = 0; i < priv->num_rx_queues; i++) {
gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
- baddr += 2;
+ baddr += 2;
}
}
@@ -405,7 +406,8 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->attreli, attrs);
/* Start with defaults, and add stashing or locking
- * depending on the approprate variables */
+ * depending on the approprate variables
+ */
attrs = ATTR_INIT_SETTINGS;
if (priv->bd_stash_en)
@@ -426,16 +428,16 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
unsigned long tx_packets = 0, tx_bytes = 0;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_packets += priv->rx_queue[i]->stats.rx_packets;
- rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
}
dev->stats.rx_packets = rx_packets;
- dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_bytes = rx_bytes;
dev->stats.rx_dropped = rx_dropped;
for (i = 0; i < priv->num_tx_queues; i++) {
@@ -443,7 +445,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
- dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_bytes = tx_bytes;
dev->stats.tx_packets = tx_packets;
return &dev->stats;
@@ -468,7 +470,7 @@ static const struct net_device_ops gfar_netdev_ops = {
void lock_rx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
spin_lock(&priv->rx_queue[i]->rxlock);
@@ -476,7 +478,7 @@ void lock_rx_qs(struct gfar_private *priv)
void lock_tx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
spin_lock(&priv->tx_queue[i]->txlock);
@@ -484,7 +486,7 @@ void lock_tx_qs(struct gfar_private *priv)
void unlock_rx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
spin_unlock(&priv->rx_queue[i]->rxlock);
@@ -492,7 +494,7 @@ void unlock_rx_qs(struct gfar_private *priv)
void unlock_tx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
spin_unlock(&priv->tx_queue[i]->txlock);
@@ -508,13 +510,13 @@ static bool gfar_is_vlan_on(struct gfar_private *priv)
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
return gfar_is_vlan_on(priv) ||
- (priv->ndev->features & NETIF_F_RXCSUM) ||
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
+ (priv->ndev->features & NETIF_F_RXCSUM) ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
kfree(priv->tx_queue[i]);
@@ -522,7 +524,7 @@ static void free_tx_pointers(struct gfar_private *priv)
static void free_rx_pointers(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
kfree(priv->rx_queue[i]);
@@ -530,7 +532,7 @@ static void free_rx_pointers(struct gfar_private *priv)
static void unmap_group_regs(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < MAXGROUPS; i++)
if (priv->gfargrp[i].regs)
@@ -539,7 +541,7 @@ static void unmap_group_regs(struct gfar_private *priv)
static void disable_napi(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++)
napi_disable(&priv->gfargrp[i].napi);
@@ -547,14 +549,14 @@ static void disable_napi(struct gfar_private *priv)
static void enable_napi(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++)
napi_enable(&priv->gfargrp[i].napi);
}
static int gfar_parse_group(struct device_node *np,
- struct gfar_private *priv, const char *model)
+ struct gfar_private *priv, const char *model)
{
u32 *queue_mask;
@@ -580,15 +582,13 @@ static int gfar_parse_group(struct device_node *np,
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
priv->gfargrp[priv->num_grps].priv = priv;
spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
- if(priv->mode == MQ_MG_MODE) {
- queue_mask = (u32 *)of_get_property(np,
- "fsl,rx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].rx_bit_map =
- queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
- queue_mask = (u32 *)of_get_property(np,
- "fsl,tx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].tx_bit_map =
- queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ if (priv->mode == MQ_MG_MODE) {
+ queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
+ *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
+ *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
} else {
priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
@@ -652,7 +652,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->num_rx_queues = num_rx_qs;
priv->num_grps = 0x0;
- /* Init Rx queue filer rule set linked list*/
+ /* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
priv->rx_list.count = 0;
mutex_init(&priv->rx_queue_access);
@@ -673,7 +673,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
} else {
priv->mode = SQ_SG_MODE;
err = gfar_parse_group(np, priv, model);
- if(err)
+ if (err)
goto err_grp_init;
}
@@ -730,27 +730,27 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
mac_addr = of_get_mac_address(np);
+
if (mac_addr)
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
- priv->device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+ priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
if (model && !strcasecmp(model, "eTSEC"))
- priv->device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_PADDING |
- FSL_GIANFAR_DEV_HAS_CSUM |
- FSL_GIANFAR_DEV_HAS_VLAN |
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
- FSL_GIANFAR_DEV_HAS_TIMER;
+ priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_PADDING |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -781,7 +781,7 @@ err_grp_init:
}
static int gfar_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+ struct ifreq *ifr, int cmd)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
@@ -851,6 +851,7 @@ static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
{
unsigned int new_bit_map = 0x0;
int mask = 0x1 << (max_qs - 1), i;
+
for (i = 0; i < max_qs; i++) {
if (bit_map & mask)
new_bit_map = new_bit_map + (1 << i);
@@ -936,22 +937,22 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* MPC8313 Rev 2.0 and higher; All MPC837x */
if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_74;
/* MPC8313 and MPC837x all rev */
if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_76;
/* MPC8313 and MPC837x all rev */
if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_A002;
/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
- (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+ (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
priv->errata |= GFAR_ERRATA_12;
if (priv->errata)
@@ -960,7 +961,8 @@ static void gfar_detect_errata(struct gfar_private *priv)
}
/* Set up the ethernet device structure, private data,
- * and anything else we need before we start */
+ * and anything else we need before we start
+ */
static int gfar_probe(struct platform_device *ofdev)
{
u32 tempval;
@@ -991,8 +993,9 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_detect_errata(priv);
- /* Stop the DMA engine now, in case it was running before */
- /* (The firmware could have used it, and left it running). */
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
gfar_halt(dev);
/* Reset MAC layer */
@@ -1026,13 +1029,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++)
- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+ GFAR_DEV_WEIGHT);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
@@ -1081,7 +1085,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv->padding = 0;
if (dev->features & NETIF_F_IP_CSUM ||
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1098,28 +1102,32 @@ static int gfar_probe(struct platform_device *ofdev)
/* Need to reverse the bit maps as bit_map's MSB is q0
* but, for_each_set_bit parses from right to left, which
- * basically reverses the queue numbers */
+ * basically reverses the queue numbers
+ */
for (i = 0; i< priv->num_grps; i++) {
- priv->gfargrp[i].tx_bit_map = reverse_bitmap(
- priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
- priv->gfargrp[i].rx_bit_map = reverse_bitmap(
- priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ priv->gfargrp[i].tx_bit_map =
+ reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+ priv->gfargrp[i].rx_bit_map =
+ reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
}
/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
- * also assign queues to groups */
+ * also assign queues to groups
+ */
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+
for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
- priv->num_rx_queues) {
+ priv->num_rx_queues) {
priv->gfargrp[grp_idx].num_rx_queues++;
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
}
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+
for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
- priv->num_tx_queues) {
+ priv->num_tx_queues) {
priv->gfargrp[grp_idx].num_tx_queues++;
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
tstat = tstat | (TSTAT_CLEAR_THALT >> i);
@@ -1149,7 +1157,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
- /* always enable rx filer*/
+ /* always enable rx filer */
priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1165,7 +1173,8 @@ static int gfar_probe(struct platform_device *ofdev)
}
device_init_wakeup(&dev->dev,
- priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1189,13 +1198,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
- /* Even more device info helps when determining which kernel */
- /* provided which set of benchmarks. */
+ /* Even more device info helps when determining which kernel
+ * provided which set of benchmarks.
+ */
netdev_info(dev, "Running with NAPI enabled\n");
for (i = 0; i < priv->num_rx_queues; i++)
netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
i, priv->rx_queue[i]->rx_ring_size);
- for(i = 0; i < priv->num_tx_queues; i++)
+ for (i = 0; i < priv->num_tx_queues; i++)
netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
i, priv->tx_queue[i]->tx_ring_size);
@@ -1242,7 +1252,8 @@ static int gfar_suspend(struct device *dev)
u32 tempval;
int magic_packet = priv->wol_en &&
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
netif_device_detach(ndev);
@@ -1294,7 +1305,8 @@ static int gfar_resume(struct device *dev)
unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
if (!netif_running(ndev)) {
netif_device_attach(ndev);
@@ -1393,13 +1405,13 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
}
if (ecntrl & ECNTRL_REDUCED_MODE) {
- if (ecntrl & ECNTRL_REDUCED_MII_MODE)
+ if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
return PHY_INTERFACE_MODE_RMII;
+ }
else {
phy_interface_t interface = priv->interface;
- /*
- * This isn't autodetected right now, so it must
+ /* This isn't autodetected right now, so it must
* be set by the device tree or platform code.
*/
if (interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -1453,8 +1465,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-/*
- * Initialize TBI PHY interface for communicating with the
+/* Initialize TBI PHY interface for communicating with the
* SERDES lynx PHY on the chip. We communicate with this PHY
* through the MDIO bus on each controller, treating it as a
* "normal" PHY at the address found in the TBIPA register. We assume
@@ -1479,8 +1490,7 @@ static void gfar_configure_serdes(struct net_device *dev)
return;
}
- /*
- * If the link is already up, we must already be ok, and don't need to
+ /* If the link is already up, we must already be ok, and don't need to
* configure and reset the TBI<->SerDes link. Maybe U-Boot configured
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
@@ -1492,18 +1502,19 @@ static void gfar_configure_serdes(struct net_device *dev)
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, MII_ADVERTISE,
- ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM);
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM);
- phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
- BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+ phy_write(tbiphy, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
+ BMCR_SPEED1000);
}
static void init_registers(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
@@ -1554,15 +1565,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
{
u32 res;
- /*
- * Normaly TSEC should not hang on GRS commands, so we should
+ /* Normaly TSEC should not hang on GRS commands, so we should
* actually wait for IEVENT_GRSC flag.
*/
if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
return 0;
- /*
- * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+ /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
* the same as bits 23-30, the eTSEC Rx is assumed to be idle
* and the Rx can be safely reset.
*/
@@ -1580,7 +1589,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
u32 tempval;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
@@ -1594,8 +1603,8 @@ static void gfar_halt_nodisable(struct net_device *dev)
regs = priv->gfargrp[0].regs;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
- if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
- != (DMACTRL_GRS | DMACTRL_GTS)) {
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
+ (DMACTRL_GRS | DMACTRL_GTS)) {
int ret;
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
@@ -1660,7 +1669,7 @@ void stop_gfar(struct net_device *dev)
} else {
for (i = 0; i < priv->num_grps; i++)
free_irq(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
}
free_skb_resources(priv);
@@ -1679,13 +1688,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
continue;
dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
- j++) {
+ j++) {
txbdp++;
dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1705,8 +1714,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
dma_unmap_single(&priv->ofdev->dev,
- rxbdp->bufPtr, priv->rx_buffer_size,
- DMA_FROM_DEVICE);
+ rxbdp->bufPtr, priv->rx_buffer_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
rx_queue->rx_skbuff[i] = NULL;
}
@@ -1718,7 +1727,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
}
/* If there are any tx skbs or rx skbs still around, free them.
- * Then free tx_skbuff and rx_skbuff */
+ * Then free tx_skbuff and rx_skbuff
+ */
static void free_skb_resources(struct gfar_private *priv)
{
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -1728,24 +1738,25 @@ static void free_skb_resources(struct gfar_private *priv)
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
struct netdev_queue *txq;
+
tx_queue = priv->tx_queue[i];
txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
- if(tx_queue->tx_skbuff)
+ if (tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
netdev_tx_reset_queue(txq);
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if(rx_queue->rx_skbuff)
+ if (rx_queue->rx_skbuff)
free_skb_rx_queue(rx_queue);
}
dma_free_coherent(&priv->ofdev->dev,
- sizeof(struct txbd8) * priv->total_tx_ring_size +
- sizeof(struct rxbd8) * priv->total_rx_ring_size,
- priv->tx_queue[0]->tx_bd_base,
- priv->tx_queue[0]->tx_bd_dma_base);
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ priv->tx_queue[0]->tx_bd_base,
+ priv->tx_queue[0]->tx_bd_dma_base);
skb_queue_purge(&priv->rx_recycle);
}
@@ -1784,7 +1795,7 @@ void gfar_start(struct net_device *dev)
}
void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask)
+ unsigned long tx_mask, unsigned long rx_mask)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 __iomem *baddr;
@@ -1794,28 +1805,26 @@ void gfar_configure_coalescing(struct gfar_private *priv,
* multiple queues, there's only single reg to program
*/
gfar_write(&regs->txic, 0);
- if(likely(priv->tx_queue[0]->txcoalescing))
+ if (likely(priv->tx_queue[0]->txcoalescing))
gfar_write(&regs->txic, priv->tx_queue[0]->txic);
gfar_write(&regs->rxic, 0);
- if(unlikely(priv->rx_queue[0]->rxcoalescing))
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
if (priv->mode == MQ_MG_MODE) {
baddr = &regs->txic0;
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
- if (likely(priv->tx_queue[i]->txcoalescing)) {
- gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
gfar_write(baddr + i, priv->tx_queue[i]->txic);
- }
}
baddr = &regs->rxic0;
for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
- if (likely(priv->rx_queue[i]->rxcoalescing)) {
- gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
- }
}
}
}
@@ -1827,12 +1836,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
int err;
/* If the device has multiple interrupts, register for
- * them. Otherwise, only register for the one */
+ * them. Otherwise, only register for the one
+ */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
/* Install our interrupt handlers for Error,
- * Transmit, and Receive */
- if ((err = request_irq(grp->interruptError, gfar_error, 0,
- grp->int_name_er,grp)) < 0) {
+ * Transmit, and Receive
+ */
+ if ((err = request_irq(grp->interruptError, gfar_error,
+ 0, grp->int_name_er, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptError);
@@ -1840,21 +1851,21 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
}
if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
- 0, grp->int_name_tx, grp)) < 0) {
+ 0, grp->int_name_tx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptTransmit);
goto tx_irq_fail;
}
- if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
- grp->int_name_rx, grp)) < 0) {
+ if ((err = request_irq(grp->interruptReceive, gfar_receive,
+ 0, grp->int_name_rx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptReceive);
goto rx_irq_fail;
}
} else {
- if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
- grp->int_name_tx, grp)) < 0) {
+ if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
+ 0, grp->int_name_tx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptTransmit);
goto err_irq_fail;
@@ -1914,8 +1925,9 @@ irq_fail:
return err;
}
-/* Called when something needs to use the ethernet device */
-/* Returns 0 for success. */
+/* Called when something needs to use the ethernet device
+ * Returns 0 for success.
+ */
static int gfar_enet_open(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -1960,18 +1972,17 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
}
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
- int fcb_length)
+ int fcb_length)
{
- u8 flags = 0;
-
/* If we're here, it's a IP packet with a TCP or UDP
* payload. We set it to checksum, using a pseudo-header
* we provide
*/
- flags = TXFCB_DEFAULT;
+ u8 flags = TXFCB_DEFAULT;
- /* Tell the controller what the protocol is */
- /* And provide the already calculated phcs */
+ /* Tell the controller what the protocol is
+ * And provide the already calculated phcs
+ */
if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
flags |= TXFCB_UDP;
fcb->phcs = udp_hdr(skb)->check;
@@ -1981,7 +1992,8 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
/* l3os is the distance between the start of the
* frame (skb->data) and the start of the IP hdr.
* l4os is the distance between the start of the
- * l3 hdr and the l4 hdr */
+ * l3 hdr and the l4 hdr
+ */
fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb);
@@ -1995,7 +2007,7 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
}
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
- struct txbd8 *base, int ring_size)
+ struct txbd8 *base, int ring_size)
{
struct txbd8 *new_bd = bdp + stride;
@@ -2003,13 +2015,14 @@ static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
}
static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
- int ring_size)
+ int ring_size)
{
return skip_txbd(bdp, 1, base, ring_size);
}
-/* This is called by the kernel when a frame is ready for transmission. */
-/* It is pointed to by the dev->hard_start_xmit function pointer */
+/* This is called by the kernel when a frame is ready for transmission.
+ * It is pointed to by the dev->hard_start_xmit function pointer
+ */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -2024,13 +2037,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
- /*
- * TOE=1 frames larger than 2500 bytes may see excess delays
+ /* TOE=1 frames larger than 2500 bytes may see excess delays
* before start of transmission.
*/
if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
- skb->ip_summed == CHECKSUM_PARTIAL &&
- skb->len > 2500)) {
+ skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->len > 2500)) {
int ret;
ret = skb_checksum_help(skb);
@@ -2046,16 +2058,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* check if time stamp should be generated */
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en)) {
+ priv->hwts_tx_en)) {
do_tstamp = 1;
fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
}
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- vlan_tx_tag_present(skb) ||
- unlikely(do_tstamp)) &&
- (skb_headroom(skb) < fcb_length)) {
+ vlan_tx_tag_present(skb) ||
+ unlikely(do_tstamp)) &&
+ (skb_headroom(skb) < fcb_length)) {
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, fcb_length);
@@ -2065,10 +2077,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /* Steal sock reference for processing TX time stamps */
- swap(skb_new->sk, skb->sk);
- swap(skb_new->destructor, skb->destructor);
- kfree_skb(skb);
+ if (skb->sk)
+ skb_set_owner_w(skb_new, skb->sk);
+ consume_skb(skb);
skb = skb_new;
}
@@ -2099,12 +2110,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Time stamp insertion requires one additional TxBD */
if (unlikely(do_tstamp))
txbdp_tstamp = txbdp = next_txbd(txbdp, base,
- tx_queue->tx_ring_size);
+ tx_queue->tx_ring_size);
if (nr_frags == 0) {
if (unlikely(do_tstamp))
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
- TXBD_INTERRUPT);
+ TXBD_INTERRUPT);
else
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
@@ -2116,7 +2127,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
length = skb_shinfo(skb)->frags[i].size;
lstatus = txbdp->lstatus | length |
- BD_LFLAG(TXBD_READY);
+ BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */
if (i == nr_frags - 1)
@@ -2146,8 +2157,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (CHECKSUM_PARTIAL == skb->ip_summed) {
fcb = gfar_add_fcb(skb);
/* as specified by errata */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
- && ((unsigned long)fcb % 0x20) > 0x18)) {
+ if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ ((unsigned long)fcb % 0x20) > 0x18)) {
__skb_pull(skb, GMAC_FCB_LEN);
skb_checksum_help(skb);
} else {
@@ -2175,10 +2186,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ skb_headlen(skb), DMA_TO_DEVICE);
- /*
- * If time stamping is requested one additional TxBD must be set up. The
+ /* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
* GMAC_FCB_LEN. The second TxBD points to the actual frame data with
* the full frame length.
@@ -2186,7 +2196,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(do_tstamp)) {
txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_length);
+ (skb_headlen(skb) - fcb_length);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2194,8 +2204,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(txq, skb->len);
- /*
- * We can work in parallel with gfar_clean_tx_ring(), except
+ /* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
* when we were reading the num_txbdfree and checking for available
* space, that's because outside of this function it can only grow,
@@ -2208,8 +2217,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
spin_lock_irqsave(&tx_queue->txlock, flags);
- /*
- * The powerpc-specific eieio() is used, as wmb() has too strong
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
* semantics (it requires synchronization between cacheable and
* uncacheable mappings, which eieio doesn't provide and which we
* don't need), thus requiring a more expensive sync instruction. At
@@ -2225,9 +2233,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
/* Update the current skb pointer to the next entry we will use
- * (wrapping if necessary) */
+ * (wrapping if necessary)
+ */
tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
- TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+ TX_RING_MOD_MASK(tx_queue->tx_ring_size);
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
@@ -2235,7 +2244,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
- are full. We need to tell the kernel to stop sending us stuff. */
+ * are full. We need to tell the kernel to stop sending us stuff.
+ */
if (!tx_queue->num_txbdfree) {
netif_tx_stop_queue(txq);
@@ -2360,12 +2370,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
frame_size += priv->padding;
- tempsize =
- (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
+ tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
/* Only stop and start the controller if it isn't already
- * stopped, and we changed something */
+ * stopped, and we changed something
+ */
if ((oldsize != tempsize) && (dev->flags & IFF_UP))
stop_gfar(dev);
@@ -2378,11 +2388,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
/* If the mtu is larger than the max size for standard
* ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length */
+ * to allow huge frames, and to check the length
+ */
tempval = gfar_read(&regs->maccfg2);
if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
+ gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
else
tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2403,7 +2414,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
static void gfar_reset_task(struct work_struct *work)
{
struct gfar_private *priv = container_of(work, struct gfar_private,
- reset_task);
+ reset_task);
struct net_device *dev = priv->ndev;
if (dev->flags & IFF_UP) {
@@ -2430,7 +2441,7 @@ static void gfar_align_skb(struct sk_buff *skb)
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, RXBUF_ALIGNMENT -
- (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+ (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
}
/* Interrupt Handler for Transmit complete */
@@ -2464,8 +2475,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
frags = skb_shinfo(skb)->nr_frags;
- /*
- * When time stamping, one additional TxBD must be freed.
+ /* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
@@ -2479,7 +2489,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
/* Only clean completed frames */
if ((lstatus & BD_LFLAG(TXBD_READY)) &&
- (lstatus & BD_LENGTH_MASK))
+ (lstatus & BD_LENGTH_MASK))
break;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2489,11 +2499,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
buflen = bdp->length;
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
- buflen, DMA_TO_DEVICE);
+ buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
@@ -2506,23 +2517,20 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&priv->ofdev->dev,
- bdp->bufPtr,
- bdp->length,
- DMA_TO_DEVICE);
+ dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+ bdp->length, DMA_TO_DEVICE);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
}
bytes_sent += skb->len;
- /*
- * If there's room in the queue (limit it to rx_buffer_size)
+ /* If there's room in the queue (limit it to rx_buffer_size)
* we add this skb back into the pool, if it's the right size
*/
if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
- skb_recycle_check(skb, priv->rx_buffer_size +
- RXBUF_ALIGNMENT)) {
+ skb_recycle_check(skb, priv->rx_buffer_size +
+ RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
skb_queue_head(&priv->rx_recycle, skb);
} else
@@ -2531,7 +2539,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
skb_dirtytx = (skb_dirtytx + 1) &
- TX_RING_MOD_MASK(tx_ring_size);
+ TX_RING_MOD_MASK(tx_ring_size);
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
@@ -2561,8 +2569,7 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
__napi_schedule(&gfargrp->napi);
} else {
- /*
- * Clear IEVENT, so interrupts aren't called again
+ /* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived.
*/
gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
@@ -2579,7 +2586,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
}
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct net_device *dev = rx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
@@ -2590,7 +2597,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
gfar_init_rxbdp(rx_queue, bdp, buf);
}
-static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
+static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
@@ -2604,7 +2611,7 @@ static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
return skb;
}
-struct sk_buff * gfar_new_skb(struct net_device *dev)
+struct sk_buff *gfar_new_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
@@ -2622,8 +2629,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
- /* If the packet was truncated, none of the other errors
- * matter */
+ /* If the packet was truncated, none of the other errors matter */
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
@@ -2664,7 +2670,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
/* If valid headers were found, and valid sums
* were verified, then we tell the kernel that no
- * checksumming is necessary. Otherwise, it is */
+ * checksumming is necessary. Otherwise, it is [FIXME]
+ */
if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
@@ -2672,8 +2679,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
}
-/* gfar_process_frame() -- handle one incoming packet if skb
- * isn't NULL. */
+/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi)
{
@@ -2685,8 +2691,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* fcb is at the beginning if exists */
fcb = (struct rxfcb *)skb->data;
- /* Remove the FCB from the skb */
- /* Remove the padded bytes, if there are any */
+ /* Remove the FCB from the skb
+ * Remove the padded bytes, if there are any
+ */
if (amount_pull) {
skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
@@ -2696,6 +2703,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
if (priv->hwts_rx_en) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
u64 *ns = (u64 *) skb->data;
+
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(*ns);
}
@@ -2709,8 +2717,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
- /*
- * There's need to check for NETIF_F_HW_VLAN_RX here.
+ /* There's need to check for NETIF_F_HW_VLAN_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
@@ -2728,8 +2735,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
- * until the budget/quota has been reached. Returns the number
- * of frames handled
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{
@@ -2749,6 +2756,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
+
rmb();
/* Add another skb for the future */
@@ -2757,15 +2765,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) &&
- bdp->length > priv->rx_buffer_size))
+ bdp->length > priv->rx_buffer_size))
bdp->status = RXBD_LARGE;
/* We drop the frame if we failed to allocate a new buffer */
if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
- bdp->status & RXBD_ERR)) {
+ bdp->status & RXBD_ERR)) {
count_errors(bdp->status, dev);
if (unlikely(!newskb))
@@ -2784,7 +2792,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi);
+ &rx_queue->grp->napi);
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2803,9 +2811,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
/* update to point at the next skb */
- rx_queue->skb_currx =
- (rx_queue->skb_currx + 1) &
- RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
+ RX_RING_MOD_MASK(rx_queue->rx_ring_size);
}
/* Update the current rxbd pointer to be the next one */
@@ -2816,8 +2823,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
static int gfar_poll(struct napi_struct *napi, int budget)
{
- struct gfar_priv_grp *gfargrp = container_of(napi,
- struct gfar_priv_grp, napi);
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi);
struct gfar_private *priv = gfargrp->priv;
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -2831,11 +2838,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
budget_per_queue = budget/num_queues;
/* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived */
+ * because of the packets that have already arrived
+ */
gfar_write(&regs->ievent, IEVENT_RTX_MASK);
while (num_queues && left_over_budget) {
-
budget_per_queue = left_over_budget/num_queues;
left_over_budget = 0;
@@ -2846,12 +2853,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
tx_queue = priv->tx_queue[rx_queue->qindex];
tx_cleaned += gfar_clean_tx_ring(tx_queue);
- rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
- budget_per_queue);
+ rx_cleaned_per_queue =
+ gfar_clean_rx_ring(rx_queue, budget_per_queue);
rx_cleaned += rx_cleaned_per_queue;
- if(rx_cleaned_per_queue < budget_per_queue) {
+ if (rx_cleaned_per_queue < budget_per_queue) {
left_over_budget = left_over_budget +
- (budget_per_queue - rx_cleaned_per_queue);
+ (budget_per_queue -
+ rx_cleaned_per_queue);
set_bit(i, &serviced_queues);
num_queues--;
}
@@ -2869,25 +2877,25 @@ static int gfar_poll(struct napi_struct *napi, int budget)
gfar_write(&regs->imask, IMASK_DEFAULT);
- /* If we are coalescing interrupts, update the timer */
- /* Otherwise, clear it */
- gfar_configure_coalescing(priv,
- gfargrp->rx_bit_map, gfargrp->tx_bit_map);
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
}
return rx_cleaned;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void gfar_netpoll(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- int i = 0;
+ int i;
/* If the device has multiple interrupts, run tx/rx */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -2896,7 +2904,7 @@ static void gfar_netpoll(struct net_device *dev)
disable_irq(priv->gfargrp[i].interruptReceive);
disable_irq(priv->gfargrp[i].interruptError);
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptError);
enable_irq(priv->gfargrp[i].interruptReceive);
enable_irq(priv->gfargrp[i].interruptTransmit);
@@ -2905,7 +2913,7 @@ static void gfar_netpoll(struct net_device *dev)
for (i = 0; i < priv->num_grps; i++) {
disable_irq(priv->gfargrp[i].interruptTransmit);
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptTransmit);
}
}
@@ -2957,7 +2965,8 @@ static void adjust_link(struct net_device *dev)
u32 ecntrl = gfar_read(&regs->ecntrl);
/* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
+ * If not, we operate in half-duplex mode.
+ */
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
@@ -2983,7 +2992,8 @@ static void adjust_link(struct net_device *dev)
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
/* Reduced mode distinguishes
- * between 10 and 100 */
+ * between 10 and 100
+ */
if (phydev->speed == SPEED_100)
ecntrl |= ECNTRL_R100;
else
@@ -3022,7 +3032,8 @@ static void adjust_link(struct net_device *dev)
/* Update the hash table based on the current list of multicast
* addresses we subscribe to. Also, change the promiscuity of
* the device based on the flags (this function is called
- * whenever dev->flags is changed */
+ * whenever dev->flags is changed
+ */
static void gfar_set_multi(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -3084,7 +3095,8 @@ static void gfar_set_multi(struct net_device *dev)
/* If we have extended hash tables, we need to
* clear the exact match registers to prepare for
- * setting them */
+ * setting them
+ */
if (priv->extended_hash) {
em_num = GFAR_EM_NUM + 1;
gfar_clear_exact_match(dev);
@@ -3110,13 +3122,14 @@ static void gfar_set_multi(struct net_device *dev)
/* Clears each of the exact match registers to zero, so they
- * don't interfere with normal reception */
+ * don't interfere with normal reception
+ */
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
+ for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
}
@@ -3132,7 +3145,8 @@ static void gfar_clear_exact_match(struct net_device *dev)
* hash index which gaddr register to use, and the 5 other bits
* indicate which bit (assuming an IBM numbering scheme, which
* for PowerPC (tm) is usually the case) in the register holds
- * the entry. */
+ * the entry.
+ */
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
@@ -3164,8 +3178,9 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
macptr += num*2;
- /* Now copy it into the mac registers backwards, cuz */
- /* little endian is silly */
+ /* Now copy it into the mac registers backwards, cuz
+ * little endian is silly
+ */
for (idx = 0; idx < ETH_ALEN; idx++)
tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
@@ -3197,7 +3212,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
/* Hmm... */
if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
- netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ netdev_dbg(dev,
+ "error interrupt (ievent=0x%08x imask=0x%08x)\n",
events, gfar_read(&regs->imask));
/* Update the error counters */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8a02557..8971921 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -46,18 +46,24 @@
#include "gianfar.h"
extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+ int rx_work_limit);
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
- u64 * buf);
+ u64 *buf);
static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
-static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
-static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
-static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
-static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
-static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
"rx-dropped-by-kernel",
@@ -130,14 +136,15 @@ static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
else
memcpy(buf, stat_gstrings,
- GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+ GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
}
/* Fill in an array of 64-bit statistics from various sources.
* This array will be appended to the end of the ethtool_stats
* structure, and returned to user space
*/
-static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf)
{
int i;
struct gfar_private *priv = netdev_priv(dev);
@@ -174,8 +181,8 @@ static int gfar_sset_count(struct net_device *dev, int sset)
}
/* Fills in the drvinfo structure with some basic info */
-static void gfar_gdrvinfo(struct net_device *dev, struct
- ethtool_drvinfo *drvinfo)
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
@@ -226,7 +233,8 @@ static int gfar_reglen(struct net_device *dev)
}
/* Return a dump of the GFAR register space */
-static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *regbuf)
{
int i;
struct gfar_private *priv = netdev_priv(dev);
@@ -239,7 +247,8 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
/* Convert microseconds to ethernet clock ticks, which changes
* depending on what speed the controller is running at */
-static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
+ unsigned int usecs)
{
unsigned int count;
@@ -263,7 +272,8 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
}
/* Convert ethernet clock ticks to microseconds */
-static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
+ unsigned int ticks)
{
unsigned int count;
@@ -288,7 +298,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
/* Get the coalescing parameters, and put them in the cvals
* structure. */
-static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
@@ -353,7 +364,8 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
* Both cvals->*_usecs and cvals->*_frames have to be > 0
* in order for coalescing to be active
*/
-static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
int i = 0;
@@ -364,7 +376,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Set up rx coalescing */
/* As of now, we will enable/disable coalescing for all
* queues together in case of eTSEC2, this will be modified
- * along with the ethtool interface */
+ * along with the ethtool interface
+ */
if ((cvals->rx_coalesce_usecs == 0) ||
(cvals->rx_max_coalesced_frames == 0)) {
for (i = 0; i < priv->num_rx_queues; i++)
@@ -433,7 +446,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Fills in rvals with the current ring parameters. Currently,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
-static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -459,8 +473,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
/* Change the current ring parameters, stopping the controller if
* necessary so that we don't mess things up while we're in
* motion. We wait for the ring to be clean before reallocating
- * the rings. */
-static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+ * the rings.
+ */
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
int err = 0, i = 0;
@@ -486,7 +502,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
unsigned long flags;
/* Halt TX and RX, and process the frames which
- * have already been received */
+ * have already been received
+ */
local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -499,7 +516,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ priv->rx_queue[i]->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
@@ -509,7 +526,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
- priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
+ priv->tx_queue[i]->num_txbdfree =
+ priv->tx_queue[i]->tx_ring_size;
}
/* Rebuild the rings with the new size */
@@ -535,7 +553,8 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
if (dev->flags & IFF_UP) {
/* Halt TX and RX, and process the frames which
- * have already been received */
+ * have already been received
+ */
local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -548,7 +567,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ priv->rx_queue[i]->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
@@ -564,12 +583,14 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
static uint32_t gfar_get_msglevel(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
+
return priv->msg_enable;
}
static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
{
struct gfar_private *priv = netdev_priv(dev);
+
priv->msg_enable = data;
}
@@ -614,14 +635,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L2DA) {
fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -630,7 +651,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_VLAN) {
fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
@@ -639,7 +660,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_IP_SRC) {
fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -648,7 +669,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & (RXH_IP_DST)) {
fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -657,7 +678,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L3_PROTO) {
fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -666,7 +687,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_0_1) {
fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -675,7 +696,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_2_3) {
fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -683,7 +704,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
}
}
-static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
+ u64 class)
{
unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
@@ -694,9 +716,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
int ret = 1;
local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!local_rqfpr || !local_rqfcr) {
pr_err("Out of memory\n");
ret = 0;
@@ -726,9 +748,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
local_rqfpr[j] = priv->ftp_rqfpr[i];
local_rqfcr[j] = priv->ftp_rqfcr[i];
j--;
- if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
- RQFCR_CLE |RQFCR_AND)) &&
- (priv->ftp_rqfpr[i] == cmp_rqfpr))
+ if ((priv->ftp_rqfcr[i] ==
+ (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
break;
}
@@ -743,12 +765,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
*/
for (l = i+1; l < MAX_FILER_IDX; l++) {
if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
- !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
- RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+ RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
priv->ftp_rqfpr[l] = FPR_FILER_MASK;
gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
- priv->ftp_rqfpr[l]);
+ priv->ftp_rqfpr[l]);
break;
}
@@ -773,7 +795,7 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
gfar_write_filer(priv, priv->cur_filer_idx,
- local_rqfcr[k], local_rqfpr[k]);
+ local_rqfcr[k], local_rqfpr[k]);
if (!priv->cur_filer_idx)
break;
priv->cur_filer_idx = priv->cur_filer_idx - 1;
@@ -785,7 +807,8 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+static int gfar_set_hash_opts(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd)
{
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
@@ -810,10 +833,10 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
netdev_info(priv->ndev,
- "Receive Queue Filtering enabled\n");
+ "Receive Queue Filtering enabled\n");
} else {
netdev_warn(priv->ndev,
- "Receive Queue Filtering disabled\n");
+ "Receive Queue Filtering disabled\n");
return -EOPNOTSUPP;
}
}
@@ -823,16 +846,17 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
i &= RCTRL_PRSDEP_MASK;
if (i == RCTRL_PRSDEP_MASK) {
netdev_info(priv->ndev,
- "Receive Queue Filtering enabled\n");
+ "Receive Queue Filtering enabled\n");
} else {
netdev_warn(priv->ndev,
- "Receive Queue Filtering disabled\n");
+ "Receive Queue Filtering disabled\n");
return -EOPNOTSUPP;
}
}
/* Sets the properties for arbitrary filer rule
- * to the first 4 Layer 4 Bytes */
+ * to the first 4 Layer 4 Bytes
+ */
regs->rbifx = 0xC0C1C2C3;
return 0;
}
@@ -870,14 +894,14 @@ static void gfar_set_mask(u32 mask, struct filer_table *tab)
static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
{
gfar_set_mask(mask, tab);
- tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
- | RQFCR_AND;
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
+ RQFCR_AND;
tab->fe[tab->index].prop = value;
tab->index++;
}
static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
- struct filer_table *tab)
+ struct filer_table *tab)
{
gfar_set_mask(mask, tab);
tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
@@ -885,8 +909,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
tab->index++;
}
-/*
- * For setting a tuple of value and mask of type flag
+/* For setting a tuple of value and mask of type flag
* Example:
* IP-Src = 10.0.0.0/255.0.0.0
* value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
@@ -901,7 +924,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
* Further the all masks are one-padded for better hardware efficiency.
*/
static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
- struct filer_table *tab)
+ struct filer_table *tab)
{
switch (flag) {
/* 3bit */
@@ -959,7 +982,8 @@ static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
/* Translates value and mask for UDP, TCP or SCTP */
static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
- struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
+ struct ethtool_tcpip4_spec *mask,
+ struct filer_table *tab)
{
gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
@@ -970,97 +994,92 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
/* Translates value and mask for RAW-IP4 */
static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
- struct ethtool_usrip4_spec *mask, struct filer_table *tab)
+ struct ethtool_usrip4_spec *mask,
+ struct filer_table *tab)
{
gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
- tab);
+ tab);
}
/* Translates value and mask for ETHER spec */
static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 upper_temp_mask = 0;
u32 lower_temp_mask = 0;
+
/* Source address */
if (!is_broadcast_ether_addr(mask->h_source)) {
-
if (is_zero_ether_addr(mask->h_source)) {
upper_temp_mask = 0xFFFFFFFF;
lower_temp_mask = 0xFFFFFFFF;
} else {
- upper_temp_mask = mask->h_source[0] << 16
- | mask->h_source[1] << 8
- | mask->h_source[2];
- lower_temp_mask = mask->h_source[3] << 16
- | mask->h_source[4] << 8
- | mask->h_source[5];
+ upper_temp_mask = mask->h_source[0] << 16 |
+ mask->h_source[1] << 8 |
+ mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16 |
+ mask->h_source[4] << 8 |
+ mask->h_source[5];
}
/* Upper 24bit */
- gfar_set_attribute(
- value->h_source[0] << 16 | value->h_source[1]
- << 8 | value->h_source[2],
- upper_temp_mask, RQFCR_PID_SAH, tab);
+ gfar_set_attribute(value->h_source[0] << 16 |
+ value->h_source[1] << 8 |
+ value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
/* And the same for the lower part */
- gfar_set_attribute(
- value->h_source[3] << 16 | value->h_source[4]
- << 8 | value->h_source[5],
- lower_temp_mask, RQFCR_PID_SAL, tab);
+ gfar_set_attribute(value->h_source[3] << 16 |
+ value->h_source[4] << 8 |
+ value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
}
/* Destination address */
if (!is_broadcast_ether_addr(mask->h_dest)) {
-
/* Special for destination is limited broadcast */
- if ((is_broadcast_ether_addr(value->h_dest)
- && is_zero_ether_addr(mask->h_dest))) {
+ if ((is_broadcast_ether_addr(value->h_dest) &&
+ is_zero_ether_addr(mask->h_dest))) {
gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
} else {
-
if (is_zero_ether_addr(mask->h_dest)) {
upper_temp_mask = 0xFFFFFFFF;
lower_temp_mask = 0xFFFFFFFF;
} else {
- upper_temp_mask = mask->h_dest[0] << 16
- | mask->h_dest[1] << 8
- | mask->h_dest[2];
- lower_temp_mask = mask->h_dest[3] << 16
- | mask->h_dest[4] << 8
- | mask->h_dest[5];
+ upper_temp_mask = mask->h_dest[0] << 16 |
+ mask->h_dest[1] << 8 |
+ mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16 |
+ mask->h_dest[4] << 8 |
+ mask->h_dest[5];
}
/* Upper 24bit */
- gfar_set_attribute(
- value->h_dest[0] << 16
- | value->h_dest[1] << 8
- | value->h_dest[2],
- upper_temp_mask, RQFCR_PID_DAH, tab);
+ gfar_set_attribute(value->h_dest[0] << 16 |
+ value->h_dest[1] << 8 |
+ value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
/* And the same for the lower part */
- gfar_set_attribute(
- value->h_dest[3] << 16
- | value->h_dest[4] << 8
- | value->h_dest[5],
- lower_temp_mask, RQFCR_PID_DAL, tab);
+ gfar_set_attribute(value->h_dest[3] << 16 |
+ value->h_dest[4] << 8 |
+ value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
}
}
gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
-
}
/* Convert a rule to binary filter format of gianfar */
static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 vlan = 0, vlan_mask = 0;
u32 id = 0, id_mask = 0;
u32 cfi = 0, cfi_mask = 0;
u32 prio = 0, prio_mask = 0;
-
u32 old_index = tab->index;
/* Check if vlan is wanted */
@@ -1076,13 +1095,16 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
- prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
vlan |= RQFPR_CFI;
vlan_mask |= RQFPR_CFI;
- } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ } else if (cfi != VLAN_TAG_PRESENT &&
+ cfi_mask == VLAN_TAG_PRESENT) {
vlan_mask |= RQFPR_CFI;
}
}
@@ -1090,34 +1112,36 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
- RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
- &rule->m_u.tcp_ip4_spec, tab);
+ &rule->m_u.tcp_ip4_spec, tab);
break;
case UDP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
- RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
- &rule->m_u.udp_ip4_spec, tab);
+ &rule->m_u.udp_ip4_spec, tab);
break;
case SCTP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
- tab);
+ tab);
gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
- gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
- (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
+ (struct ethtool_tcpip4_spec *)&rule->m_u,
+ tab);
break;
case IP_USER_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
- tab);
+ tab);
gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
- (struct ethtool_usrip4_spec *) &rule->m_u, tab);
+ (struct ethtool_usrip4_spec *) &rule->m_u,
+ tab);
break;
case ETHER_FLOW:
if (vlan)
gfar_set_parse_bits(vlan, vlan_mask, tab);
gfar_set_ether((struct ethhdr *) &rule->h_u,
- (struct ethhdr *) &rule->m_u, tab);
+ (struct ethhdr *) &rule->m_u, tab);
break;
default:
return -1;
@@ -1152,7 +1176,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
}
- /* In rare cases the cache can be full while there is free space in hw */
+ /* In rare cases the cache can be full while there is
+ * free space in hw
+ */
if (tab->index > MAX_FILER_CACHE_IDX - 1)
return -EBUSY;
@@ -1161,7 +1187,7 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
/* Copy size filer entries */
static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
+ struct gfar_filer_entry src[0], s32 size)
{
while (size > 0) {
size--;
@@ -1171,10 +1197,12 @@ static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
}
/* Delete the contents of the filer-table between start and end
- * and collapse them */
+ * and collapse them
+ */
static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
{
int length;
+
if (end > MAX_FILER_CACHE_IDX || end < begin)
return -EINVAL;
@@ -1200,14 +1228,14 @@ static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
/* Make space on the wanted location */
static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
+ struct filer_table *tab)
{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
- > MAX_FILER_CACHE_IDX)
+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
+ begin > MAX_FILER_CACHE_IDX)
return -EINVAL;
gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
+ tab->index - length + 1);
tab->index += length;
return 0;
@@ -1215,9 +1243,10 @@ static int gfar_expand_filer_entries(u32 begin, u32 length,
static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
- == (RQFCR_AND | RQFCR_CLE))
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_AND | RQFCR_CLE))
return start;
}
return -1;
@@ -1225,16 +1254,16 @@ static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
- == (RQFCR_CLE))
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_CLE))
return start;
}
return -1;
}
-/*
- * Uses hardwares clustering option to reduce
+/* Uses hardwares clustering option to reduce
* the number of filer table entries
*/
static void gfar_cluster_filer(struct filer_table *tab)
@@ -1244,8 +1273,7 @@ static void gfar_cluster_filer(struct filer_table *tab)
while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
j = i;
while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /*
- * The cluster entries self and the previous one
+ /* The cluster entries self and the previous one
* (a mask) must be identical!
*/
if (tab->fe[i].ctrl != tab->fe[j].ctrl)
@@ -1260,21 +1288,21 @@ static void gfar_cluster_filer(struct filer_table *tab)
jend = gfar_get_next_cluster_end(j, tab);
if (jend == -1 || iend == -1)
break;
- /*
- * First we make some free space, where our cluster
+
+ /* First we make some free space, where our cluster
* element should be. Then we copy it there and finally
* delete in from its old location.
*/
-
- if (gfar_expand_filer_entries(iend, (jend - j), tab)
- == -EINVAL)
+ if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
+ -EINVAL)
break;
gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
+ &(tab->fe[jend + 1]), jend - j);
if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j), tab) == -EINVAL)
+ jend + (jend - j),
+ tab) == -EINVAL)
return;
/* Mask out cluster bit */
@@ -1285,8 +1313,9 @@ static void gfar_cluster_filer(struct filer_table *tab)
/* Swaps the masked bits of a1<>a2 and b1<>b2 */
static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
+ struct gfar_filer_entry *a2,
+ struct gfar_filer_entry *b1,
+ struct gfar_filer_entry *b2, u32 mask)
{
u32 temp[4];
temp[0] = a1->ctrl & mask;
@@ -1305,13 +1334,12 @@ static void gfar_swap_bits(struct gfar_filer_entry *a1,
b2->ctrl |= temp[2];
}
-/*
- * Generate a list consisting of masks values with their start and
+/* Generate a list consisting of masks values with their start and
* end of validity and block as indicator for parts belonging
* together (glued by ANDs) in mask_table
*/
static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 i, and_index = 0, block_index = 1;
@@ -1327,13 +1355,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
and_index++;
}
/* cluster starts and ends will be separated because they should
- * hold their position */
+ * hold their position
+ */
if (tab->fe[i].ctrl & RQFCR_CLE)
block_index++;
/* A not set AND indicates the end of a depended block */
if (!(tab->fe[i].ctrl & RQFCR_AND))
block_index++;
-
}
mask_table[and_index - 1].end = i - 1;
@@ -1341,14 +1369,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
return and_index;
}
-/*
- * Sorts the entries of mask_table by the values of the masks.
+/* Sorts the entries of mask_table by the values of the masks.
* Important: The 0xFF80 flags of the first and last entry of a
* block must hold their position (which queue, CLusterEnable, ReJEct,
* AND)
*/
static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
+ struct filer_table *temp_table, u32 and_index)
{
/* Pointer to compare function (_asc or _desc) */
int (*gfar_comp)(const void *, const void *);
@@ -1359,16 +1386,16 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
gfar_comp = &gfar_comp_desc;
for (i = 0; i < and_index; i++) {
-
if (prev != mask_table[i].block) {
old_first = mask_table[start].start + 1;
old_last = mask_table[i - 1].end;
sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
+ sizeof(struct gfar_mask_entry),
+ gfar_comp, &gfar_swap);
/* Toggle order for every block. This makes the
- * thing more efficient! */
+ * thing more efficient!
+ */
if (gfar_comp == gfar_comp_desc)
gfar_comp = &gfar_comp_asc;
else
@@ -1378,12 +1405,11 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
new_last = mask_table[i - 1].end;
gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND
- );
+ &temp_table->fe[old_first],
+ &temp_table->fe[new_last],
+ &temp_table->fe[old_last],
+ RQFCR_QUEUE | RQFCR_CLE |
+ RQFCR_RJE | RQFCR_AND);
start = i;
size = 0;
@@ -1391,11 +1417,9 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
size++;
prev = mask_table[i].block;
}
-
}
-/*
- * Reduces the number of masks needed in the filer table to save entries
+/* Reduces the number of masks needed in the filer table to save entries
* This is done by sorting the masks of a depended block. A depended block is
* identified by gluing ANDs or CLE. The sorting order toggles after every
* block. Of course entries in scope of a mask must change their location with
@@ -1410,13 +1434,14 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
s32 ret = 0;
/* We need a copy of the filer table because
- * we want to change its order */
+ * we want to change its order
+ */
temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
if (mask_table == NULL) {
ret = -ENOMEM;
@@ -1428,7 +1453,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
gfar_sort_mask_table(mask_table, temp_table, and_index);
/* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says */
+ * the real one in the order the mask table says
+ */
for (i = 0; i < and_index; i++) {
size = mask_table[i].end - mask_table[i].start + 1;
gfar_copy_filer_entries(&(tab->fe[j]),
@@ -1437,7 +1463,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
}
/* And finally we just have to check for duplicated masks and drop the
- * second ones */
+ * second ones
+ */
for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
if (tab->fe[i].ctrl == 0x80) {
previous_mask = i++;
@@ -1448,7 +1475,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
if (tab->fe[i].ctrl == 0x80) {
if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
/* Two identical ones found!
- * So drop the second one! */
+ * So drop the second one!
+ */
gfar_trim_filer_entries(i, i, tab);
} else
/* Not identical! */
@@ -1463,7 +1491,7 @@ end: kfree(temp_table);
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 i = 0;
if (tab->index > MAX_FILER_IDX - 1)
@@ -1473,13 +1501,15 @@ static int gfar_write_filer_table(struct gfar_private *priv,
lock_rx_qs(priv);
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+ i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
for (; i < MAX_FILER_IDX - 1; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
- * because that's what people expect */
+ * because that's what people expect
+ */
gfar_write_filer(priv, i, 0x20, 0x0);
unlock_rx_qs(priv);
@@ -1488,21 +1518,21 @@ static int gfar_write_filer_table(struct gfar_private *priv,
}
static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
- struct gfar_private *priv)
+ struct gfar_private *priv)
{
if (flow->flow_type & FLOW_EXT) {
if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
netdev_warn(priv->ndev,
- "User-specific data not supported!\n");
+ "User-specific data not supported!\n");
if (~flow->m_ext.vlan_etype)
netdev_warn(priv->ndev,
- "VLAN-etype not supported!\n");
+ "VLAN-etype not supported!\n");
}
if (flow->flow_type == IP_USER_FLOW)
if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
netdev_warn(priv->ndev,
- "IP-Version differing from IPv4 not supported!\n");
+ "IP-Version differing from IPv4 not supported!\n");
return 0;
}
@@ -1520,15 +1550,18 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
return -ENOMEM;
/* Now convert the existing filer data from flow_spec into
- * filer tables binary format */
+ * filer tables binary format
+ */
list_for_each_entry(j, &priv->rx_list.list, list) {
ret = gfar_convert_to_filer(&j->fs, tab);
if (ret == -EBUSY) {
- netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ netdev_err(priv->ndev,
+ "Rule not added: No free space!\n");
goto end;
}
if (ret == -1) {
- netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
+ netdev_err(priv->ndev,
+ "Rule not added: Unsupported Flow-type!\n");
goto end;
}
}
@@ -1540,9 +1573,9 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
gfar_optimize_filer_masks(tab);
pr_debug("\n\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
+ "\tData on hardware: %d\n"
+ "\tCompression rate: %d%%\n",
+ tab->index, 100 - (100 * tab->index) / i);
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
@@ -1551,7 +1584,8 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
goto end;
}
-end: kfree(tab);
+end:
+ kfree(tab);
return ret;
}
@@ -1569,7 +1603,7 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
}
static int gfar_add_cls(struct gfar_private *priv,
- struct ethtool_rx_flow_spec *flow)
+ struct ethtool_rx_flow_spec *flow)
{
struct ethtool_flow_spec_container *temp, *comp;
int ret = 0;
@@ -1591,7 +1625,6 @@ static int gfar_add_cls(struct gfar_private *priv,
list_add(&temp->list, &priv->rx_list.list);
goto process;
} else {
-
list_for_each_entry(comp, &priv->rx_list.list, list) {
if (comp->fs.location > flow->location) {
list_add_tail(&temp->list, &comp->list);
@@ -1599,8 +1632,8 @@ static int gfar_add_cls(struct gfar_private *priv,
}
if (comp->fs.location == flow->location) {
netdev_err(priv->ndev,
- "Rule not added: ID %d not free!\n",
- flow->location);
+ "Rule not added: ID %d not free!\n",
+ flow->location);
ret = -EBUSY;
goto clean_mem;
}
@@ -1642,7 +1675,6 @@ static int gfar_del_cls(struct gfar_private *priv, u32 loc)
}
return ret;
-
}
static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
@@ -1663,7 +1695,7 @@ static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
}
static int gfar_get_cls_all(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd, u32 *rule_locs)
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct ethtool_flow_spec_container *comp;
u32 i = 0;
@@ -1714,7 +1746,7 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
}
static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
@@ -1748,23 +1780,19 @@ static int gfar_get_ts_info(struct net_device *dev,
struct gfar_private *priv = netdev_priv(dev);
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
- info->so_timestamping =
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->phc_index = -1;
return 0;
}
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = gfar_phc_index;
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 9ac14f8..21c6574 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -185,7 +185,7 @@ static void mem_disp(u8 *addr, int size)
for (; (u32) i < (u32) addr + size4Aling; i += 4)
printk("%08x ", *((u32 *) (i)));
for (; (u32) i < (u32) addr + size; i++)
- printk("%02x", *((u8 *) (i)));
+ printk("%02x", *((i)));
if (notAlign == 1)
printk("\r\n");
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index d496673..3f4391b 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1217,7 +1217,7 @@ static int hp100_init_rxpdl(struct net_device *dev,
ringptr->pdl = pdlptr + 1;
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
/*
* Write address and length of first PDL Fragment (which is used for
@@ -1243,7 +1243,7 @@ static int hp100_init_txpdl(struct net_device *dev,
ringptr->pdl = pdlptr; /* +1; */
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
return roundup(MAX_TX_FRAG * 2 + 2, 4);
}
@@ -1628,7 +1628,7 @@ static void hp100_clean_txring(struct net_device *dev)
/* Conversion to new PCI API : NOP */
pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->txrhead->skb);
- lp->txrhead->skb = (void *) NULL;
+ lp->txrhead->skb = NULL;
lp->txrhead = lp->txrhead->next;
lp->txrcommit--;
}
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 6c2952c..3735bfa 100644
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -629,10 +629,10 @@ init_i596(struct net_device *dev) {
memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
lp->set_add.command = CmdIASetup;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
+ i596_add_cmd(dev, &lp->set_add);
lp->tdr.command = CmdTDR;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
+ i596_add_cmd(dev, &lp->tdr);
if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
return 1;
@@ -737,7 +737,7 @@ i596_cleanup_cmd(struct net_device *dev) {
lp = netdev_priv(dev);
while (lp->cmd_head) {
- cmd = (struct i596_cmd *)lp->cmd_head;
+ cmd = lp->cmd_head;
lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
lp->cmd_backlog--;
@@ -1281,7 +1281,7 @@ static void set_multicast_list(struct net_device *dev) {
lp->i596_config[8] |= 0x01;
}
- i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
+ i596_add_cmd(dev, &lp->set_conf);
}
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index cae17f4..353f57f6 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -571,7 +571,7 @@ static int init586(struct net_device *dev)
}
#endif
- ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+ ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */
/*
* alloc xmit-buffs / init xmit_cmds
@@ -584,7 +584,7 @@ static int init586(struct net_device *dev)
ptr = (char *) ptr + XMIT_BUFF_SIZE;
p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
ptr = (char *) ptr + sizeof(struct tbd_struct);
- if((void *)ptr > (void *)dev->mem_end)
+ if(ptr > (void *)dev->mem_end)
{
printk("%s: not enough shared-mem for your configuration!\n",dev->name);
return 1;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 4fb47f1..cb66f57 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -376,9 +376,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
return 0;
}
-/**
- * allocates memory for a queue and registers pages in phyp
- */
+/* allocates memory for a queue and registers pages in phyp */
static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
int nr_pages, int wqe_size, int act_nr_sges,
struct ehea_adapter *adapter, int h_call_q_selector)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 79b07ec..0cafe4f 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -122,8 +122,10 @@ config IGB_DCA
config IGB_PTP
bool "PTP Hardware Clock (PHC)"
- default y
- depends on IGB && PTP_1588_CLOCK
+ default n
+ depends on IGB && EXPERIMENTAL
+ select PPS
+ select PTP_1588_CLOCK
---help---
Say Y here if you want to use PTP Hardware Clock (PHC) in the
driver. Only the basic clock operations have been implemented.
@@ -223,7 +225,9 @@ config IXGBE_DCB
config IXGBE_PTP
bool "PTP Clock Support"
default n
- depends on IXGBE && PTP_1588_CLOCK
+ depends on IXGBE && EXPERIMENTAL
+ select PPS
+ select PTP_1588_CLOCK
---help---
Say Y here if you want support for 1588 Timestamping with a
PHC device, using the PTP 1588 Clock support. This is
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada720b..535f94f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1249,20 +1249,35 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
const struct firmware *fw = nic->fw;
u8 timer, bundle, min_size;
int err = 0;
+ bool required = false;
/* do not load u-code for ICH devices */
if (nic->flags & ich)
return NULL;
- /* Search for ucode match against h/w revision */
- if (nic->mac == mac_82559_D101M)
+ /* Search for ucode match against h/w revision
+ *
+ * Based on comments in the source code for the FreeBSD fxp
+ * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
+ *
+ * "fixes for bugs in the B-step hardware (specifically, bugs
+ * with Inline Receive)."
+ *
+ * So we must fail if it cannot be loaded.
+ *
+ * The other microcode files are only required for the optional
+ * CPUSaver feature. Nice to have, but no reason to fail.
+ */
+ if (nic->mac == mac_82559_D101M) {
fw_name = FIRMWARE_D101M;
- else if (nic->mac == mac_82559_D101S)
+ } else if (nic->mac == mac_82559_D101S) {
fw_name = FIRMWARE_D101S;
- else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
+ } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
fw_name = FIRMWARE_D102E;
- else /* No ucode on other devices */
+ required = true;
+ } else { /* No ucode on other devices */
return NULL;
+ }
/* If the firmware has not previously been loaded, request a pointer
* to it. If it was previously loaded, we are reinitializing the
@@ -1273,10 +1288,17 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
err = request_firmware(&fw, fw_name, &nic->pdev->dev);
if (err) {
- netif_err(nic, probe, nic->netdev,
- "Failed to load firmware \"%s\": %d\n",
- fw_name, err);
- return ERR_PTR(err);
+ if (required) {
+ netif_err(nic, probe, nic->netdev,
+ "Failed to load firmware \"%s\": %d\n",
+ fw_name, err);
+ return ERR_PTR(err);
+ } else {
+ netif_info(nic, probe, nic->netdev,
+ "CPUSaver disabled. Needs \"%s\": %d\n",
+ fw_name, err);
+ return NULL;
+ }
}
/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3103f0b..736a7d9 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1851,6 +1851,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c526279e4..3d68395 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -399,7 +399,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
}
/**
- * e1000_reset_hw: reset the hardware completely
+ * e1000_reset_hw - reset the hardware completely
* @hw: Struct containing variables accessed by shared code
*
* Reset the transmit and receive units; mask and clear all interrupts.
@@ -546,7 +546,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
}
/**
- * e1000_init_hw: Performs basic configuration of the adapter.
+ * e1000_init_hw - Performs basic configuration of the adapter.
* @hw: Struct containing variables accessed by shared code
*
* Assumes that the controller has previously been reset and is in a
@@ -2591,7 +2591,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* @hw: Struct containing variables accessed by shared code
* @speed: Speed of the connection
* @duplex: Duplex setting of the connection
-
+ *
* Detects the current speed and duplex settings of the hardware.
*/
s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
@@ -2959,7 +2959,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* @hw: Struct containing variables accessed by shared code
* @reg_addr: address of the PHY register to write
* @data: data to write to the PHY
-
+ *
* Writes a value to a PHY register
*/
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 95731c8..3bfbb8d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -721,9 +721,7 @@ void e1000_reset(struct e1000_adapter *adapter)
e1000_release_manageability(adapter);
}
-/**
- * Dump the eeprom for users having checksum issues
- **/
+/* Dump the eeprom for users having checksum issues */
static void e1000_dump_eeprom(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1078,18 +1076,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->features |= netdev->hw_features;
- netdev->hw_features |= NETIF_F_RXCSUM;
- netdev->hw_features |= NETIF_F_RXALL;
- netdev->hw_features |= NETIF_F_RXFCS;
+ netdev->hw_features |= (NETIF_F_RXCSUM |
+ NETIF_F_RXALL |
+ NETIF_F_RXFCS);
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_HW_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= (NETIF_F_TSO |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SG);
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -3056,14 +3054,13 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
mmiowb();
}
-/**
- * 82547 workaround to avoid controller hang in half-duplex environment.
+/* 82547 workaround to avoid controller hang in half-duplex environment.
* The workaround is to avoid queuing a large packet that would span
* the internal Tx FIFO ring boundary by notifying the stack to resend
* the packet at a later time. This gives the Tx FIFO an opportunity to
* flush all packets. When that occurs, we reset the Tx FIFO pointers
* to the beginning of the Tx FIFO.
- **/
+ */
#define E1000_FIFO_HDR 0x10
#define E1000_82547_PAD_LEN 0x3E0
@@ -4080,7 +4077,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
spin_lock_irqsave(&adapter->stats_lock,
irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, skb->data);
+ length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock,
irq_flags);
length--;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 36db4df..0b3bade 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
+ /* SYNCH bit and IV bit are sticky */
+ udelay(10);
+ rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
@@ -1677,16 +1680,18 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e_dbg("ANYSTATE -> DOWN\n");
} else {
/*
- * Check several times, if Sync and Config
- * both are consistently 1 then simply ignore
- * the Invalid bit and restart Autoneg
+ * Check several times, if SYNCH bit and CONFIG
+ * bit both are consistently 1 then simply ignore
+ * the IV bit and restart Autoneg
*/
for (i = 0; i < AN_RETRY_COUNT; i++) {
udelay(10);
rxcw = er32(RXCW);
- if ((rxcw & E1000_RXCW_IV) &&
- !((rxcw & E1000_RXCW_SYNCH) &&
- (rxcw & E1000_RXCW_C))) {
+ if ((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))
+ continue;
+
+ if (rxcw & E1000_RXCW_IV) {
mac->serdes_has_link = false;
mac->serdes_link_state =
e1000_serdes_link_down;
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351a409..76edbc1 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -103,6 +103,7 @@
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6e6fffb..cd15332 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -514,6 +514,7 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d863075..0349e24 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
- if (hw->phy.ops.check_reset_block(hw)) {
+ if (hw->phy.ops.check_reset_block &&
+ hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
return -EINVAL;
}
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
* PHY loopback cannot be performed if SoL/IDER
* sessions are active
*/
- if (hw->phy.ops.check_reset_block(hw)) {
+ if (hw->phy.ops.check_reset_block &&
+ hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
*data = 0;
goto out;
@@ -1895,7 +1897,6 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 4) &&
@@ -1914,9 +1915,9 @@ static int e1000_set_coalesce(struct net_device *netdev,
}
if (adapter->itr_setting != 0)
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
else
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, 0);
return 0;
}
@@ -2060,6 +2061,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_rxnfc = e1000_get_rxnfc,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index bbf70ba..e3a7b07 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -165,14 +165,14 @@
#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
/* Intel Rapid Start Technology Support */
-#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
-#define I217_SxCTRL_MASK 0x1000
+#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
#define I217_CGFREG PHY_REG(772, 29)
-#define I217_CGFREG_MASK 0x0002
+#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
#define I217_MEMPWR PHY_REG(772, 26)
-#define I217_MEMPWR_MASK 0x0010
+#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
/* Strapping Option Register - RO */
#define E1000_STRAP 0x0000C
@@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
**/
static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
- u16 phy_reg;
- u32 phy_id;
+ u16 phy_reg = 0;
+ u32 phy_id = 0;
+ s32 ret_val;
+ u16 retry_count;
+
+ for (retry_count = 0; retry_count < 2; retry_count++) {
+ ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF))
+ continue;
+ phy_id = (u32)(phy_reg << 16);
- e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
- phy_id = (u32)(phy_reg << 16);
- e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
- phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF)) {
+ phy_id = 0;
+ continue;
+ }
+ phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ break;
+ }
if (hw->phy.id) {
if (hw->phy.id == phy_id)
return true;
- } else {
- if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
- hw->phy.id = phy_id;
+ } else if (phy_id) {
+ hw->phy.id = phy_id;
+ hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
return true;
}
- return false;
+ /*
+ * In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000e_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+
+ return !ret_val;
}
/**
@@ -4089,12 +4111,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* power good.
*/
e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
- phy_reg |= I217_SxCTRL_MASK;
+ phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
/* Disable the SMB release on LCD reset. */
e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
- phy_reg &= ~I217_MEMPWR;
+ phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
}
@@ -4103,7 +4125,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
* Support
*/
e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
- phy_reg |= I217_CGFREG_MASK;
+ phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
release:
@@ -4176,7 +4198,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
if (ret_val)
goto release;
- phy_reg |= I217_MEMPWR_MASK;
+ phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
/* Disable Proxy */
@@ -4186,7 +4208,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
if (ret_val)
goto release;
- phy_reg &= ~I217_CGFREG_MASK;
+ phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
release:
if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 026e8b3..a134399 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
- if (hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
return 0;
/*
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a4b0435..95b2453 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
- __le16 csum, struct sk_buff *skb)
+ struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
if (status & E1000_RXD_STAT_IXSM)
return;
- /* TCP/UDP checksum error bit is set */
- if (errors & E1000_RXD_ERR_TCPE) {
+ /* TCP/UDP checksum error bit or IP checksum error bit is set */
+ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
/* let the stack verify checksum errors */
adapter->hw_csum_err++;
return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
return;
/* It must be a TCP or UDP packet with a valid checksum */
- if (status & E1000_RXD_STAT_TCPCS) {
- /* TCP checksum is good */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- /*
- * IP fragment with UDP payload
- * Hardware complements the payload checksum, so we undo it
- * and then put the value in host order for further stack use.
- */
- __sum16 sum = (__force __sum16)swab16((__force u16)csum);
- skb->csum = csum_unfold(~sum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
/* Receive Checksum Offload */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1341,8 +1328,7 @@ copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
}
}
- /* Receive Checksum Offload XXX recompute due to CRC strip? */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -2174,7 +2159,7 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
}
/**
- * @e1000_alloc_ring - allocate memory for a ring structure
+ * e1000_alloc_ring_dma - allocate memory for a ring structure
**/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
struct e1000_ring *ring)
@@ -2489,6 +2474,30 @@ set_itr_now:
}
/**
+ * e1000e_write_itr - write the ITR value to the appropriate registers
+ * @adapter: address of board private structure
+ * @itr: new ITR value to program
+ *
+ * e1000e_write_itr determines if the adapter is in MSI-X mode
+ * and, if so, writes the EITR registers with the ITR value.
+ * Otherwise, it writes the ITR value into the ITR register.
+ **/
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
+
+ if (adapter->msix_entries) {
+ int vector;
+
+ for (vector = 0; vector < adapter->num_vectors; vector++)
+ writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
+ } else {
+ ew32(ITR, new_itr);
+ }
+}
+
+/**
* e1000_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
**/
@@ -3074,7 +3083,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* irq moderation */
ew32(RADV, adapter->rx_abs_int_delay);
if ((adapter->itr_setting != 0) && (adapter->itr != 0))
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
ctrl_ext = er32(CTRL_EXT);
/* Auto-Mask interrupts upon ICR access */
@@ -3098,19 +3107,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
- if (adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (adapter->netdev->features & NETIF_F_RXCSUM)
rxcsum |= E1000_RXCSUM_TUOFL;
-
- /*
- * IPv4 payload checksum for UDP fragments must be
- * used in conjunction with packet-split.
- */
- if (adapter->rx_ps_pages)
- rxcsum |= E1000_RXCSUM_IPPCSE;
- } else {
+ else
rxcsum &= ~E1000_RXCSUM_TUOFL;
- /* no need to clear IPPCSE as it defaults to 0 */
- }
ew32(RXCSUM, rxcsum);
if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -3510,14 +3510,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned off\n");
adapter->flags2 |= FLAG2_DISABLE_AIM;
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, 0);
}
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned on\n");
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
adapter->itr = 20000;
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
}
}
@@ -4600,7 +4600,7 @@ link_up:
adapter->gorc - adapter->gotc) / 10000;
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
- ew32(ITR, 1000000000 / (itr * 256));
+ e1000e_write_itr(adapter, itr);
}
/* Cause software interrupt to ensure Rx ring is cleaned */
@@ -5241,22 +5241,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
- }
-
- /*
- * IP payload checksum (enabled with jumbos/packet-split when
- * Rx checksum is enabled) and generation of RSS hash is
- * mutually exclusive in the hardware.
- */
- if ((netdev->features & NETIF_F_RXCSUM) &&
- (netdev->features & NETIF_F_RXHASH)) {
- e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
- return -EINVAL;
- }
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
}
/* Supported frame sizes */
@@ -6030,17 +6018,6 @@ static int e1000_set_features(struct net_device *netdev,
NETIF_F_RXALL)))
return 0;
- /*
- * IP payload checksum (enabled with jumbos/packet-split when Rx
- * checksum is enabled) and generation of RSS hash is mutually
- * exclusive in the hardware.
- */
- if (adapter->rx_ps_pages &&
- (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
- e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
- return -EINVAL;
- }
-
if (changed & NETIF_F_RXFCS) {
if (features & NETIF_F_RXFCS) {
adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
@@ -6237,8 +6214,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.phy.ms_type = e1000_ms_hw_default;
}
- if (hw->phy.ops.check_reset_block(hw))
- e_info("PHY reset is blocked due to SOL/IDER session.\n");
+ if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
+ dev_info(&pdev->dev,
+ "PHY reset is blocked due to SOL/IDER session.\n");
/* Set initial default active device features */
netdev->features = (NETIF_F_SG |
@@ -6288,7 +6266,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
break;
if (i == 2) {
- e_err("The NVM Checksum Is Not Valid\n");
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -6298,13 +6276,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
- e_err("NVM Read Error while reading MAC address\n");
+ dev_err(&pdev->dev,
+ "NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->perm_addr);
err = -EIO;
goto err_eeprom;
}
@@ -6404,7 +6384,7 @@ err_register:
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_release_hw_control(adapter);
err_eeprom:
- if (!hw->phy.ops.check_reset_block(hw))
+ if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
e1000_phy_hw_reset(&adapter->hw);
err_hw_init:
kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 55cc1565b..dfbfa7f 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -199,16 +199,19 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- e_info("%s Enabled\n", opt->name);
+ dev_info(&adapter->pdev->dev, "%s Enabled\n",
+ opt->name);
return 0;
case OPTION_DISABLED:
- e_info("%s Disabled\n", opt->name);
+ dev_info(&adapter->pdev->dev, "%s Disabled\n",
+ opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- e_info("%s set to %i\n", opt->name, *value);
+ dev_info(&adapter->pdev->dev, "%s set to %i\n",
+ opt->name, *value);
return 0;
}
break;
@@ -220,7 +223,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- e_info("%s\n", ent->str);
+ dev_info(&adapter->pdev->dev, "%s\n",
+ ent->str);
return 0;
}
}
@@ -230,8 +234,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
- opt->err);
+ dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -251,8 +255,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- e_notice("Warning: no configuration for board #%i\n", bd);
- e_notice("Using defaults for all values\n");
+ dev_notice(&adapter->pdev->dev,
+ "Warning: no configuration for board #%i\n", bd);
+ dev_notice(&adapter->pdev->dev,
+ "Using defaults for all values\n");
}
{ /* Transmit Interrupt Delay */
@@ -366,27 +372,32 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
* default values
*/
if (adapter->itr > 4)
- e_info("%s set to default %d\n", opt.name,
- adapter->itr);
+ dev_info(&adapter->pdev->dev,
+ "%s set to default %d\n", opt.name,
+ adapter->itr);
}
adapter->itr_setting = adapter->itr;
switch (adapter->itr) {
case 0:
- e_info("%s turned off\n", opt.name);
+ dev_info(&adapter->pdev->dev, "%s turned off\n",
+ opt.name);
break;
case 1:
- e_info("%s set to dynamic mode\n", opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic mode\n", opt.name);
adapter->itr = 20000;
break;
case 3:
- e_info("%s set to dynamic conservative mode\n",
- opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
adapter->itr = 20000;
break;
case 4:
- e_info("%s set to simplified (2000-8000 ints) mode\n",
- opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to simplified (2000-8000 ints) mode\n",
+ opt.name);
break;
default:
/*
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0334d01..b860d4f 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
s32 ret_val;
u32 ctrl;
- ret_val = phy->ops.check_reset_block(hw);
- if (ret_val)
- return 0;
+ if (phy->ops.check_reset_block) {
+ ret_val = phy->ops.check_reset_block(hw);
+ if (ret_val)
+ return 0;
+ }
ret_val = phy->ops.acquire(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index e650839..5e84eaa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -206,8 +206,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
break;
case e1000_i350:
- case e1000_i210:
- case e1000_i211:
mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
break;
default:
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 35d1e4f..10efcd8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -117,6 +117,7 @@
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
/* Split and Replication RX Control - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ae6d3f3..9e572dd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -65,19 +65,30 @@ struct igb_adapter;
#define MAX_Q_VECTORS 8
/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \
- (hw->mac.type > e1000_82575 ? 8 : 4)))
-#define IGB_MAX_RX_QUEUES_I210 4
+#define IGB_MAX_RX_QUEUES 8
+#define IGB_MAX_RX_QUEUES_82575 4
#define IGB_MAX_RX_QUEUES_I211 2
-#define IGB_MAX_TX_QUEUES 16
-#define IGB_MAX_TX_QUEUES_I210 4
-#define IGB_MAX_TX_QUEUES_I211 2
+#define IGB_MAX_TX_QUEUES 8
#define IGB_MAX_VF_MC_ENTRIES 30
#define IGB_MAX_VF_FUNCTIONS 8
#define IGB_MAX_VFTA_ENTRIES 128
#define IGB_82576_VF_DEV_ID 0x10CA
#define IGB_I350_VF_DEV_ID 0x1520
+/* NVM version defines */
+#define IGB_MAJOR_MASK 0xF000
+#define IGB_MINOR_MASK 0x0FF0
+#define IGB_BUILD_MASK 0x000F
+#define IGB_COMB_VER_MASK 0x00FF
+#define IGB_MAJOR_SHIFT 12
+#define IGB_MINOR_SHIFT 4
+#define IGB_COMB_VER_SHFT 8
+#define IGB_NVM_VER_INVALID 0xFFFF
+#define IGB_ETRACK_SHIFT 16
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -371,6 +382,7 @@ struct igb_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ char fw_version[32];
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -420,6 +432,7 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
+extern void igb_set_fw_version(struct igb_adapter *);
#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_remove(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 812d4f9..a19c84c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -710,6 +710,7 @@ static int igb_set_eeprom(struct net_device *netdev,
if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
hw->nvm.ops.update(hw);
+ igb_set_fw_version(adapter);
kfree(eeprom_buff);
return ret_val;
}
@@ -718,20 +719,16 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- u16 eeprom_data;
strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
- /* EEPROM image version # is reported as firmware version # for
- * 82575 controllers */
- adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d-%d",
- (eeprom_data & 0xF000) >> 12,
- (eeprom_data & 0x0FF0) >> 4,
- eeprom_data & 0x000F);
-
+ /*
+ * EEPROM image version # is reported as firmware version # for
+ * 82575 controllers
+ */
+ strlcpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
@@ -2271,6 +2268,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
+#ifdef CONFIG_IGB_PTP
+static int igb_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL) |
+ (1 << HWTSTAMP_FILTER_SOME) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+#endif
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2299,6 +2328,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_coalesce = igb_set_coalesce,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
+#ifdef CONFIG_IGB_PTP
+ .get_ts_info = igb_ethtool_get_ts_info,
+#endif
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index dd3bfe8..1050411 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -59,9 +59,9 @@
#endif
#include "igb.h"
-#define MAJ 3
-#define MIN 4
-#define BUILD 7
+#define MAJ 4
+#define MIN 0
+#define BUILD 1
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -1048,11 +1048,6 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
- /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
- if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i211))
- numvecs = 4;
-
/* store the number of vectors reserved for queues */
adapter->num_q_vectors = numvecs;
@@ -1505,11 +1500,12 @@ static void igb_configure(struct igb_adapter *adapter)
**/
void igb_power_up_link(struct igb_adapter *adapter)
{
+ igb_reset_phy(&adapter->hw);
+
if (adapter->hw.phy.media_type == e1000_media_type_copper)
igb_power_up_phy_copper(&adapter->hw);
else
igb_power_up_serdes_link_82575(&adapter->hw);
- igb_reset_phy(&adapter->hw);
}
/**
@@ -1821,6 +1817,69 @@ static const struct net_device_ops igb_netdev_ops = {
};
/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ *
+ **/
+void igb_set_fw_version(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+ u16 major, build, patch, fw_version;
+ u32 etrack_id;
+
+ hw->nvm.ops.read(hw, 5, 1, &fw_version);
+ if (adapter->hw.mac.type != e1000_i211) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
+ etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
+
+ /* combo image version needs to be found */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != IGB_NVM_VER_INVALID)) {
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* Only display Option Rom if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != IGB_NVM_VER_INVALID) &&
+ (comb_verl != IGB_NVM_VER_INVALID))) {
+ major = comb_verl >> IGB_COMB_VER_SHFT;
+ build = (comb_verl << IGB_COMB_VER_SHFT) |
+ (comb_verh >> IGB_COMB_VER_SHFT);
+ patch = comb_verh & IGB_COMB_VER_MASK;
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d%d, 0x%08x, %d.%d.%d",
+ (fw_version & IGB_MAJOR_MASK) >>
+ IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >>
+ IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK),
+ etrack_id, major, build, patch);
+ goto out;
+ }
+ }
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%d.%d%d, 0x%08x",
+ (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK), etrack_id);
+ } else {
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%d.%d%d",
+ (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK));
+ }
+out:
+ return;
+}
+
+/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in igb_pci_tbl
@@ -2030,6 +2089,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_eeprom;
}
+ /* get firmware version for ethtool -i */
+ igb_set_fw_version(adapter);
+
setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2338,6 +2400,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u32 max_rss_queues;
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
@@ -2370,40 +2433,69 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
} else
adapter->vfs_allocated_count = max_vfs;
break;
- case e1000_i210:
- case e1000_i211:
- adapter->vfs_allocated_count = 0;
- break;
default:
break;
}
#endif /* CONFIG_PCI_IOV */
+
+ /* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
+ case e1000_i211:
+ max_rss_queues = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
case e1000_i210:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
- num_online_cpus());
+ max_rss_queues = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ /* I350 cannot do RSS and SR-IOV at the same time */
+ if (!!adapter->vfs_allocated_count) {
+ max_rss_queues = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (!!adapter->vfs_allocated_count) {
+ max_rss_queues = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ default:
+ max_rss_queues = IGB_MAX_RX_QUEUES;
break;
+ }
+
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ /* Determine if we need to pair queues. */
+ switch (hw->mac.type) {
+ case e1000_82575:
case e1000_i211:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
- num_online_cpus());
+ /* Device supports enough interrupts without queue pairing. */
break;
+ case e1000_82576:
+ /*
+ * If VFs are going to be allocated with RSS queues then we
+ * should pair the queues in order to conserve interrupts due
+ * to limited supply.
+ */
+ if ((adapter->rss_queues > 1) &&
+ (adapter->vfs_allocated_count > 6))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ /* fall through */
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
default:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
- num_online_cpus());
+ /*
+ * If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
break;
}
- /* i350 cannot do RSS and SR-IOV at the same time */
- if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
- adapter->rss_queues = 1;
-
- /*
- * if rss_queues > 4 or vfs are going to be allocated with rss_queues
- * then we should combine the queues into a queue pair in order to
- * conserve interrupts due to limited supply
- */
- if ((adapter->rss_queues > 4) ||
- ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kzalloc(sizeof(u32) *
@@ -4917,7 +5009,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
unsigned int device_id;
u16 thisvf_devfn;
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
switch (adapter->hw.mac.type) {
@@ -5326,7 +5418,7 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
/* generate a new mac address as we were hotplug removed/added */
if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
- random_ether_addr(vf_mac);
+ eth_random_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -5686,6 +5778,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
+ *
* returns true if ring is completely cleaned
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
@@ -6997,6 +7090,11 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
}
wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
+ */
+ wr32(E1000_RTTBCNRM, 0x14);
wr32(E1000_RTTBCNRC, bcnrc_val);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index d5ee7fa..c846ea9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -330,7 +330,17 @@ void igb_ptp_init(struct igb_adapter *adapter)
void igb_ptp_remove(struct igb_adapter *adapter)
{
- cancel_delayed_work_sync(&adapter->overflow_work);
+ switch (adapter->hw.mac.type) {
+ case e1000_i211:
+ case e1000_i210:
+ case e1000_i350:
+ case e1000_82580:
+ case e1000_82576:
+ cancel_delayed_work_sync(&adapter->overflow_work);
+ break;
+ default:
+ return;
+ }
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8ce6706..90eef07 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
- (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
- (ec->rx_coalesce_usecs == 2))
- return -EINVAL;
-
- /* convert to rate of irq's per second */
- if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+ if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+ (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+ adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ adapter->requested_itr = 1000000000 /
+ (adapter->current_itr * 256);
+ } else if ((ec->rx_coalesce_usecs == 3) ||
+ (ec->rx_coalesce_usecs == 2)) {
adapter->current_itr = IGBVF_START_ITR;
adapter->requested_itr = ec->rx_coalesce_usecs;
- } else {
- adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ } else if (ec->rx_coalesce_usecs == 0) {
+ /*
+ * The user's desire is to turn off interrupt throttling
+ * altogether, but due to HW limitations, we can't do that.
+ * Instead we set a very small value in EITR, which would
+ * allow ~967k interrupts per second, but allow the adapter's
+ * internal clocking to still function properly.
+ */
+ adapter->current_itr = 4;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
- }
+ } else
+ return -EINVAL;
writel(adapter->current_itr,
hw->hw_addr + adapter->rx_ring->itr_register);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 8ec74b0..0696abf 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -766,6 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
/**
* igbvf_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
+ *
* returns true if ring is completely cleaned
**/
static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 30a6cc4..eea0e10 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -283,7 +283,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
return err;
}
-/** e1000_rlpml_set_vf - Set the maximum receive packet length
+/**
+ * e1000_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
@@ -302,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
* e1000_rar_set_vf - set device MAC address
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
- * @index receive address array register
+ * @index: receive address array register
**/
static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
{
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index 99b69ad..bf9a220 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -32,6 +32,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/pci_ids.h>
#include "ixgb_hw.h"
#include "ixgb_ids.h"
@@ -96,7 +97,7 @@ static u32 ixgb_mac_reset(struct ixgb_hw *hw)
ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
#endif
- if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) {
+ if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) {
ctrl_reg = /* Enable interrupt from XFP and SerDes */
IXGB_CTRL1_GPI0_EN |
IXGB_CTRL1_SDP6_DIR |
@@ -271,7 +272,7 @@ ixgb_identify_phy(struct ixgb_hw *hw)
}
/* update phy type for sun specific board */
- if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID)
+ if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN)
phy_type = ixgb_phy_type_bcm;
return phy_type;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
index 2a58847..32c1b30 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
@@ -33,11 +33,6 @@
** The Device and Vendor IDs for 10 Gigabit MACs
**********************************************************************/
-#define INTEL_VENDOR_ID 0x8086
-#define INTEL_SUBVENDOR_ID 0x8086
-#define SUN_VENDOR_ID 0x108E
-#define SUN_SUBVENDOR_ID 0x108E
-
#define IXGB_DEVICE_ID_82597EX 0x1048
#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5fce363..d05fc95 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -54,13 +54,13 @@ MODULE_PARM_DESC(copybreak,
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
/* required last entry */
@@ -195,7 +195,7 @@ ixgb_irq_enable(struct ixgb_adapter *adapter)
{
u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
IXGB_INT_TXDW | IXGB_INT_LSC;
- if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
+ if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
val |= IXGB_INT_GPI0;
IXGB_WRITE_REG(&adapter->hw, IMS, val);
IXGB_WRITE_FLUSH(&adapter->hw);
@@ -2276,9 +2276,9 @@ static void ixgb_netpoll(struct net_device *dev)
#endif
/**
- * ixgb_io_error_detected() - called when PCI error is detected
- * @pdev pointer to pci device with error
- * @state pci channel state after error
+ * ixgb_io_error_detected - called when PCI error is detected
+ * @pdev: pointer to pci device with error
+ * @state: pci channel state after error
*
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 0bdf06b..5fd5d04 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
-
+ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 3ef3c52..b9623e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -77,17 +77,18 @@
#define IXGBE_MAX_FCPAUSE 0xFFFF
/* Supported Rx Buffer Sizes */
-#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
+#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
- * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
+ * this adds up to 448 bytes of extra data.
+ *
+ * Since netdev_alloc_skb now allocates a page fragment we can use a value
+ * of 256 and the resultant skb will have a truesize of 960 or less.
*/
-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
@@ -113,7 +114,7 @@
#define IXGBE_MAX_VFTA_ENTRIES 128
#define MAX_EMULATION_MAC_ADDRS 16
#define IXGBE_MAX_PF_MACVLANS 15
-#define VMDQ_P(p) ((p) + adapter->num_vfs)
+#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
@@ -130,7 +131,6 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
- struct pci_dev *vfdev;
};
struct vf_macvlans {
@@ -196,7 +196,7 @@ enum ixgbe_ring_state_t {
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
- __IXGBE_RX_FCOE_BUFSZ,
+ __IXGBE_RX_FCOE,
};
#define check_for_tx_hang(ring) \
@@ -278,10 +278,16 @@ enum ixgbe_ring_f_enum {
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature {
- int indices;
- int mask;
+ u16 limit; /* upper limit on feature indices */
+ u16 indices; /* current value of indices */
+ u16 mask; /* Mask used for feature to ring mapping */
+ u16 offset; /* offset to start of feature */
} ____cacheline_internodealigned_in_smp;
+#define IXGBE_82599_VMDQ_8Q_MASK 0x78
+#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
+#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
+
/*
* FCoE requires that all Rx buffers be over 2200 bytes in length. Since
* this is twice the size of a half page we need to double the page order
@@ -290,7 +296,7 @@ struct ixgbe_ring_feature {
#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{
- return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
+ return test_bit(__IXGBE_RX_FCOE, &ring->state) ? 1 : 0;
}
#else
#define ixgbe_rx_pg_order(_ring) 0
@@ -315,7 +321,7 @@ struct ixgbe_ring_container {
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
-/* MAX_MSIX_Q_VECTORS of these are allocated,
+/* MAX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbe_q_vector {
@@ -401,11 +407,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_VECTORS_82599 64
-#define MAX_MSIX_Q_VECTORS_82599 64
+#define MAX_Q_VECTORS_82599 64
#define MAX_MSIX_VECTORS_82598 18
-#define MAX_MSIX_Q_VECTORS_82598 16
+#define MAX_Q_VECTORS_82598 16
-#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
+#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
#define MIN_MSIX_Q_VECTORS 1
@@ -427,35 +433,33 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
-#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
-#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
-#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27)
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
+#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
+#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
+#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7)
+#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
+#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
+#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
+#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
+#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
+#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
@@ -496,7 +500,7 @@ struct ixgbe_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
/* DCB parameters */
struct ieee_pfc *ixgbe_ieee_pfc;
@@ -507,8 +511,8 @@ struct ixgbe_adapter {
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
- int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
+ int num_q_vectors; /* current number of q_vectors for device */
+ int max_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
@@ -561,6 +565,7 @@ struct ixgbe_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ int rx_hwtstamp_filter;
u32 base_incval;
u32 cycle_speed;
#endif /* CONFIG_IXGBE_PTP */
@@ -686,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u8 *hdr_len);
-extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
@@ -695,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_enable(struct net_device *netdev);
extern int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
@@ -704,6 +710,7 @@ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info);
+extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
@@ -718,6 +725,7 @@ extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index dee64d2..50fc137 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -241,7 +241,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
/* Determine 1G link capabilities off of SFP+ type */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = true;
goto out;
@@ -1023,6 +1025,9 @@ mac_reset_top:
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
@@ -2104,6 +2109,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
.set_vmdq = &ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
.clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77ac41f..90e41db 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2848,6 +2848,31 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ if (vmdq < 32) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
@@ -3132,7 +3157,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
/**
- * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
* the EEPROM
* @hw: pointer to hardware structure
* @wwnn_prefix: the alternative WWNN prefix
@@ -3200,20 +3225,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
* PFVFSPOOF register array is size 8 with 8 bits assigned to
* MAC anti-spoof enables in each register array element.
*/
- for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ for (j = 0; j < pf_target_reg; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
- /* If not enabling anti-spoofing then done */
- if (!enable)
- return;
-
/*
* The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Reset the bit assigned to the PF
+ * emulation mode NICs. Do not set the bits assigned to the PF
*/
- pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
- pfvfspoof ^= (1 << pf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+ pfvfspoof &= (1 << pf_target_shift) - 1;
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /*
+ * Remaining pools belong to the PF so they do not need to have
+ * anti-spoofing enabled.
+ */
+ for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
}
/**
@@ -3325,6 +3352,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
* ixgbe_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
* @length: size of EEPROM to calculate a checksum for
+ *
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6222fdb..d813d11 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -85,6 +85,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 8bfaaee..9bc17c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -180,67 +180,83 @@ out:
void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
{
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- *pfc_en = 0;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i;
+ for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+ if (tc_config[tc].dcb_pfc != pfc_disabled)
+ *pfc_en |= 1 << tc;
+ }
}
void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
u16 *refill)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- refill[i] = p->data_credits_refill;
- }
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ refill[tc] = tc_config[tc].path[direction].data_credits_refill;
}
void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
{
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- max[i] = cfg->tc_config[i].desc_credits_max;
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ max[tc] = tc_config[tc].desc_credits_max;
}
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
u8 *bwgid)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- bwgid[i] = p->bwg_id;
- }
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ bwgid[tc] = tc_config[tc].path[direction].bwg_id;
}
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 *ptype)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- ptype[i] = p->prio_type;
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ ptype[tc] = tc_config[tc].path[direction].prio_type;
+}
+
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
+{
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ u8 prio_mask = 1 << up;
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ /* If tc is 0 then DCB is likely not enabled or supported */
+ if (!tc)
+ goto out;
+
+ /*
+ * Test from maximum TC to 1 and report the first match we find. If
+ * we find no match we can assume that the TC is 0 since the TC must
+ * be set for all user priorities
+ */
+ for (tc--; tc; tc--) {
+ if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+ break;
}
+out:
+ return tc;
}
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
{
- int i, up;
- unsigned long bitmap;
+ u8 up;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap;
- for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
- map[up] = i;
- }
+ for (up = 0; up < MAX_USER_PRIORITY; up++)
+ map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 24333b7..1f4108e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -146,6 +146,7 @@ void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 5164a21..f1e002d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -151,34 +151,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- int err = 0;
- u8 prio_tc[MAX_USER_PRIORITY] = {0};
- int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
/* verify there is something to do, if not then exit */
- if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- goto out;
-
- if (state > 0) {
- err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
- ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
- } else {
- err = ixgbe_setup_tc(netdev, 0);
- }
-
- if (err)
+ if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
goto out;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
-
+ err = ixgbe_setup_tc(netdev,
+ state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
out:
- return err ? 1 : 0;
+ return !!err;
}
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -584,9 +571,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (err)
goto err_out;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
-
err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
err_out:
return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3178f1e..4104ea25 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -154,100 +154,60 @@ static int ixgbe_get_settings(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ ixgbe_link_speed supported_link;
u32 link_speed = 0;
+ bool autoneg;
bool link_up;
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->transceiver = XCVR_EXTERNAL;
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.multispeed_fiber)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg);
-
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- ecmd->supported |= SUPPORTED_100baseT_Full;
- break;
- default:
- break;
- }
-
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.autoneg_advertised) {
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- /*
- * Default advertised modes in case
- * phy.autoneg_advertised isn't set.
- */
- ecmd->advertising |= (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full);
- if (hw->mac.type == ixgbe_mac_X540)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
-
- if (hw->phy.media_type == ixgbe_media_type_copper) {
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
- } else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- }
- } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
- /* Set as FIBRE until SERDES defined in kernel */
- if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
- } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
- (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- } else {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- }
+ hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
+
+ /* set the supported link speeds */
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+
+ /* set the advertised speeds */
+ if (hw->phy.autoneg_advertised) {
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
} else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
+ /* default modes in case phy.autoneg_advertised isn't set */
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
}
- /* Get PHY type */
+ if (autoneg) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ } else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ /* Determine the remaining settings based on the PHY type. */
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
- /* Copper 10G-BASET */
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
break;
case ixgbe_phy_qt:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
@@ -257,42 +217,59 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
- switch (adapter->hw.phy.sfp_type) {
/* SFP+ devices, further checking needed */
+ switch (adapter->hw.phy.sfp_type) {
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
case ixgbe_sfp_type_lr:
case ixgbe_sfp_type_srlr_core0:
case ixgbe_sfp_type_srlr_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- ecmd->supported = SUPPORTED_TP;
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_TP);
+ break;
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
break;
case ixgbe_sfp_type_unknown:
default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_OTHER;
break;
}
@@ -2113,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- int num_vectors;
u16 tx_itr_param, rx_itr_param;
bool need_reset = false;
@@ -2149,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_vectors = 1;
-
- for (i = 0; i < num_vectors; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
@@ -2274,10 +2245,6 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
{
cmd->data = 0;
- /* if RSS is disabled then report no hashing */
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- return 0;
-
/* Report default options for RSS on ixgbe */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index bc07933..ae73ef1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -38,7 +38,7 @@
/**
* ixgbe_fcoe_clear_ddp - clear the given ddp context
- * @ddp - ptr to the ixgbe_fcoe_ddp
+ * @ddp: ptr to the ixgbe_fcoe_ddp
*
* Returns : none
*
@@ -104,10 +104,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
udelay(100);
}
if (ddp->sgl)
- pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
+ dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
if (ddp->pool) {
- pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
+ dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ddp->pool = NULL;
}
@@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct ixgbe_hw *hw;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
@@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
- struct pci_pool *pool;
- unsigned int cpu;
if (!netdev || !sgl)
return 0;
@@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 0;
fcoe = &adapter->fcoe;
- if (!fcoe->pool) {
- e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
- return 0;
- }
-
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
@@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
ixgbe_fcoe_clear_ddp(ddp);
+
+ if (!fcoe->ddp_pool) {
+ e_warn(drv, "No ddp_pool resources allocated\n");
+ return 0;
+ }
+
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
+ if (!ddp_pool->pool) {
+ e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
+ goto out_noddp;
+ }
+
/* setup dma from scsi command sgl */
- dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
e_err(drv, "xid 0x%x DMA map error\n", xid);
- return 0;
+ goto out_noddp;
}
/* alloc the udl from per cpu ddp pool */
- cpu = get_cpu();
- pool = *per_cpu_ptr(fcoe->pool, cpu);
- ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
- ddp->pool = pool;
+ ddp->pool = ddp_pool->pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
@@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) {
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
+ ddp_pool->noddp++;
goto out_noddp_free;
}
@@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
*/
if (lastsize == bufflen) {
if (j >= IXGBE_BUFFCNT_MAX) {
- *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
+ ddp_pool->noddp_ext_buff++;
goto out_noddp_free;
}
@@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 1;
out_noddp_free:
- pci_pool_free(pool, ddp->udl, ddp->udp);
+ dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap:
- pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
+out_noddp:
put_cpu();
return 0;
}
@@ -409,7 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
break;
/* unmap the sg list when FCPRSP is received */
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
- pci_unmap_sg(adapter->pdev, ddp->sgl,
+ dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = ddp_err;
ddp->sgl = NULL;
@@ -563,44 +568,37 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
return 0;
}
-static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
+static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
{
- unsigned int cpu;
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
- for_each_possible_cpu(cpu) {
- pool = per_cpu_ptr(fcoe->pool, cpu);
- if (*pool)
- pci_pool_destroy(*pool);
- }
- free_percpu(fcoe->pool);
- fcoe->pool = NULL;
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ if (ddp_pool->pool)
+ dma_pool_destroy(ddp_pool->pool);
+ ddp_pool->pool = NULL;
}
-static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
+static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
+ struct device *dev,
+ unsigned int cpu)
{
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- unsigned int cpu;
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
+ struct dma_pool *pool;
char pool_name[32];
- fcoe->pool = alloc_percpu(struct pci_pool *);
- if (!fcoe->pool)
- return;
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
- /* allocate pci pool for each cpu */
- for_each_possible_cpu(cpu) {
- snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
- pool = per_cpu_ptr(fcoe->pool, cpu);
- *pool = pci_pool_create(pool_name,
- adapter->pdev, IXGBE_FCPTR_MAX,
- IXGBE_FCPTR_ALIGN, PAGE_SIZE);
- if (!*pool) {
- e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
- ixgbe_fcoe_ddp_pools_free(fcoe);
- return;
- }
- }
+ pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
+ IXGBE_FCPTR_ALIGN, PAGE_SIZE);
+ if (!pool)
+ return -ENOMEM;
+
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ ddp_pool->pool = pool;
+ ddp_pool->noddp = 0;
+ ddp_pool->noddp_ext_buff = 0;
+
+ return 0;
}
/**
@@ -613,132 +611,171 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
*/
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
{
- int i, fcoe_q, fcoe_i;
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
- unsigned int cpu;
-
- if (!fcoe->pool) {
- spin_lock_init(&fcoe->lock);
-
- ixgbe_fcoe_ddp_pools_alloc(adapter);
- if (!fcoe->pool) {
- e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
- return;
- }
-
- /* Extra buffer to be shared by all DDPs for HW work around */
- fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
- if (fcoe->extra_ddp_buffer == NULL) {
- e_err(drv, "failed to allocated extra DDP buffer\n");
- goto out_ddp_pools;
- }
+ int i, fcoe_q, fcoe_i;
+ u32 etqf;
- fcoe->extra_ddp_buffer_dma =
- dma_map_single(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer,
- IXGBE_FCBUFF_MIN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer_dma)) {
- e_err(drv, "failed to map extra DDP buffer\n");
- goto out_extra_ddp_buffer;
- }
+ /* Minimal functionality for FCoE requires at least CRC offloads */
+ if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
+ return;
- /* Alloc per cpu mem to count the ddp alloc failure number */
- fcoe->pcpu_noddp = alloc_percpu(u64);
- if (!fcoe->pcpu_noddp) {
- e_err(drv, "failed to alloc noddp counter\n");
- goto out_pcpu_noddp_alloc_fail;
- }
+ /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
+ etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ etqf |= IXGBE_ETQF_POOL_ENABLE;
+ etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
- if (!fcoe->pcpu_noddp_ext_buff) {
- e_err(drv, "failed to alloc noddp extra buff cnt\n");
- goto out_pcpu_noddp_extra_buff_alloc_fail;
- }
+ /* leave registers un-configured if FCoE is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return;
- for_each_possible_cpu(cpu) {
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
- *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
- }
+ /* Use one or more Rx queues for FCoE by redirection table */
+ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
+ fcoe_i = fcoe->offset + (i % fcoe->indices);
+ fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
+ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
- /* Enable L2 eth type filter for FCoE */
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
- (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
- /* Enable L2 eth type filter for FIP */
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
- (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
- if (adapter->ring_feature[RING_F_FCOE].indices) {
- /* Use multiple rx queues for FCoE by redirection table */
- for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
- fcoe_i = f->mask + i % f->indices;
- fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
- }
- IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- } else {
- /* Use single rx queue for FCoE */
- fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
- IXGBE_ETQS_QUEUE_EN |
- (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
+ /* Enable L2 EtherType filter for FIP */
+ etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ etqf |= IXGBE_ETQF_POOL_ENABLE;
+ etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
}
- /* send FIP frames to the first FCoE queue */
- fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
+
+ /* Send FIP frames to the first FCoE queue */
+ fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
- IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
+ /* Configure FCoE Rx control */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
+ IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
- return;
-out_pcpu_noddp_extra_buff_alloc_fail:
- free_percpu(fcoe->pcpu_noddp);
-out_pcpu_noddp_alloc_fail:
- dma_unmap_single(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer_dma,
- IXGBE_FCBUFF_MIN,
- DMA_FROM_DEVICE);
-out_extra_ddp_buffer:
- kfree(fcoe->extra_ddp_buffer);
-out_ddp_pools:
- ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
- * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
+ * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
* @adapter : ixgbe adapter
*
* Cleans up outstanding ddp context resources
*
* Returns : none
*/
-void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
{
- int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ int cpu, i;
- if (!fcoe->pool)
+ /* do nothing if no DDP pools were allocated */
+ if (!fcoe->ddp_pool)
return;
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
+
+ for_each_possible_cpu(cpu)
+ ixgbe_fcoe_dma_pool_free(fcoe, cpu);
+
dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
- free_percpu(fcoe->pcpu_noddp);
- free_percpu(fcoe->pcpu_noddp_ext_buff);
kfree(fcoe->extra_ddp_buffer);
- ixgbe_fcoe_ddp_pools_free(fcoe);
+
+ fcoe->extra_ddp_buffer = NULL;
+ fcoe->extra_ddp_buffer_dma = 0;
+}
+
+/**
+ * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
+ * @adapter: ixgbe adapter
+ *
+ * Sets up ddp context resouces
+ *
+ * Returns : 0 indicates success or -EINVAL on failure
+ */
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ struct device *dev = &adapter->pdev->dev;
+ void *buffer;
+ dma_addr_t dma;
+ unsigned int cpu;
+
+ /* do nothing if no DDP pools were allocated */
+ if (!fcoe->ddp_pool)
+ return 0;
+
+ /* Extra buffer to be shared by all DDPs for HW work around */
+ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ if (!buffer) {
+ e_err(drv, "failed to allocate extra DDP buffer\n");
+ return -ENOMEM;
+ }
+
+ dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ e_err(drv, "failed to map extra DDP buffer\n");
+ kfree(buffer);
+ return -ENOMEM;
+ }
+
+ fcoe->extra_ddp_buffer = buffer;
+ fcoe->extra_ddp_buffer_dma = dma;
+
+ /* allocate pci pool for each cpu */
+ for_each_possible_cpu(cpu) {
+ int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
+ if (!err)
+ continue;
+
+ e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
+ ixgbe_free_fcoe_ddp_resources(adapter);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
+ return -EINVAL;
+
+ fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
+
+ if (!fcoe->ddp_pool) {
+ e_err(drv, "failed to allocate percpu DDP resources\n");
+ return -ENOMEM;
+ }
+
+ adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+
+ return 0;
+}
+
+static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ adapter->netdev->fcoe_ddp_xid = 0;
+
+ if (!fcoe->ddp_pool)
+ return;
+
+ free_percpu(fcoe->ddp_pool);
+ fcoe->ddp_pool = NULL;
}
/**
@@ -751,40 +788,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
- int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ atomic_inc(&fcoe->refcnt);
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
- goto out_enable;
+ return -EINVAL;
- atomic_inc(&fcoe->refcnt);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- goto out_enable;
+ return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
+ /* Allocate per CPU memory to track DDP pools */
+ ixgbe_fcoe_ddp_enable(adapter);
+ /* enable FCoE and notify stack */
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
- netdev->features |= NETIF_F_FCOE_CRC;
- netdev->features |= NETIF_F_FSO;
netdev->features |= NETIF_F_FCOE_MTU;
- netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+ netdev_features_change(netdev);
+ /* release existing queues and reallocate them */
+ ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
- netdev_features_change(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
- rc = 0;
-out_enable:
- return rc;
+ return 0;
}
/**
@@ -797,41 +831,35 @@ out_enable:
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
- int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
- goto out_disable;
+ if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
+ return -EINVAL;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- goto out_disable;
-
- if (!atomic_dec_and_test(&fcoe->refcnt))
- goto out_disable;
+ return -EINVAL;
e_info(drv, "Disabling FCoE offload features.\n");
- netdev->features &= ~NETIF_F_FCOE_CRC;
- netdev->features &= ~NETIF_F_FSO;
- netdev->features &= ~NETIF_F_FCOE_MTU;
- netdev->fcoe_ddp_xid = 0;
- netdev_features_change(netdev);
-
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
+ /* Free per CPU memory to track DDP pools */
+ ixgbe_fcoe_ddp_disable(adapter);
+
+ /* disable FCoE and notify stack */
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = 0;
- ixgbe_cleanup_fcoe(adapter);
+ netdev->features &= ~NETIF_F_FCOE_MTU;
+
+ netdev_features_change(netdev);
+
+ /* release existing queues and reallocate them */
+ ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
- rc = 0;
-out_disable:
- return rc;
+ return 0;
}
/**
@@ -960,3 +988,18 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
return 0;
}
+
+/**
+ * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
+ * @adapter - pointer to the device adapter structure
+ *
+ * Return : TC that FCoE is mapped to
+ */
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_IXGBE_DCB
+ return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 1dbed17..bf724da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -62,19 +62,24 @@ struct ixgbe_fcoe_ddp {
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
- struct pci_pool *pool;
+ struct dma_pool *pool;
+};
+
+/* per cpu variables */
+struct ixgbe_fcoe_ddp_pool {
+ struct dma_pool *pool;
+ u64 noddp;
+ u64 noddp_ext_buff;
};
struct ixgbe_fcoe {
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
atomic_t refcnt;
spinlock_t lock;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
- unsigned char *extra_ddp_buffer;
+ void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
- u64 __percpu *pcpu_noddp;
- u64 __percpu *pcpu_noddp_ext_buff;
#ifdef CONFIG_IXGBE_DCB
u8 up;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index af1a531..17ecbce 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -28,28 +28,83 @@
#include "ixgbe.h"
#include "ixgbe_sriov.h"
+#ifdef CONFIG_IXGBE_DCB
/**
- * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
* @adapter: board private structure to initialize
*
- * Cache the descriptor ring offsets for RSS to the assigned rings.
+ * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
+ * will also try to cache the proper offsets if RSS/FCoE are enabled along
+ * with VMDq.
*
**/
-static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
+static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
{
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
+#endif /* IXGBE_FCOE */
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
int i;
+ u16 reg_idx;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
return false;
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
+ /* verify we have VMDq enabled before proceeding */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return false;
+
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= tcs)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+ }
+
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= tcs)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ }
+
+#ifdef IXGBE_FCOE
+ /* nothing to do if FCoE is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return true;
+
+ /* The work is already done if the FCoE ring is shared */
+ if (fcoe->offset < tcs)
+ return true;
+
+ /* The FCoE rings exist separately, we need to move their reg_idx */
+ if (fcoe->indices) {
+ u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
+
+ reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
+ for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+ reg_idx++;
+ }
+
+ reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
+ for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ reg_idx++;
+ }
+ }
+#endif /* IXGBE_FCOE */
return true;
}
-#ifdef CONFIG_IXGBE_DCB
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
@@ -64,42 +119,37 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- *tx = tc << 2;
- *rx = tc << 3;
+ /* TxQs/TC: 4 RxQs/TC: 8 */
+ *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
+ *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
if (num_tcs > 4) {
- if (tc < 3) {
- *tx = tc << 5;
- *rx = tc << 4;
- } else if (tc < 5) {
- *tx = ((tc + 2) << 4);
- *rx = tc << 4;
- } else if (tc < num_tcs) {
- *tx = ((tc + 8) << 3);
- *rx = tc << 4;
- }
+ /*
+ * TCs : TC0/1 TC2/3 TC4-7
+ * TxQs/TC: 32 16 8
+ * RxQs/TC: 16 16 16
+ */
+ *rx = tc << 4;
+ if (tc < 3)
+ *tx = tc << 5; /* 0, 32, 64 */
+ else if (tc < 5)
+ *tx = (tc + 2) << 4; /* 80, 96 */
+ else
+ *tx = (tc + 8) << 3; /* 104, 112, 120 */
} else {
- *rx = tc << 5;
- switch (tc) {
- case 0:
- *tx = 0;
- break;
- case 1:
- *tx = 64;
- break;
- case 2:
- *tx = 96;
- break;
- case 3:
- *tx = 112;
- break;
- default:
- break;
- }
+ /*
+ * TCs : TC0 TC1 TC2/3
+ * TxQs/TC: 64 32 16
+ * RxQs/TC: 32 32 32
+ */
+ *rx = tc << 5;
+ if (tc < 2)
+ *tx = tc << 6; /* 0, 64 */
+ else
+ *tx = (tc + 4) << 4; /* 96, 112 */
}
- break;
default:
break;
}
@@ -112,106 +162,115 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
* Cache the descriptor ring offsets for DCB to the assigned rings.
*
**/
-static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
- int i, j, k;
+ unsigned int tx_idx, rx_idx;
+ int tc, offset, rss_i, i;
u8 num_tcs = netdev_get_num_tc(dev);
- if (!num_tcs)
+ /* verify we have DCB queueing enabled before proceeding */
+ if (num_tcs <= 1)
return false;
- for (i = 0, k = 0; i < num_tcs; i++) {
- unsigned int tx_s, rx_s;
- u16 count = dev->tc_to_txq[i].count;
+ rss_i = adapter->ring_feature[RING_F_RSS].indices;
- ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
- for (j = 0; j < count; j++, k++) {
- adapter->tx_ring[k]->reg_idx = tx_s + j;
- adapter->rx_ring[k]->reg_idx = rx_s + j;
- adapter->tx_ring[k]->dcb_tc = i;
- adapter->rx_ring[k]->dcb_tc = i;
+ for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
+ ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
+ for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
+ adapter->tx_ring[offset + i]->reg_idx = tx_idx;
+ adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+ adapter->tx_ring[offset + i]->dcb_tc = tc;
+ adapter->rx_ring[offset + i]->dcb_tc = tc;
}
}
return true;
}
-#endif
+#endif
/**
- * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
* @adapter: board private structure to initialize
*
- * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
*
- **/
-static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+ */
+static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
{
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
+#endif /* IXGBE_FCOE */
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
int i;
- bool ret = false;
-
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
- ret = true;
+ u16 reg_idx;
+
+ /* only proceed if VMDq is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
+ return false;
+
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+#ifdef IXGBE_FCOE
+ /* Allow first FCoE queue to be mapped as RSS */
+ if (fcoe->offset && (i > fcoe->offset))
+ break;
+#endif
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->rx_ring[i]->reg_idx = reg_idx;
}
- return ret;
-}
+#ifdef IXGBE_FCOE
+ /* FCoE uses a linear block of queues so just assigning 1:1 */
+ for (; i < adapter->num_rx_queues; i++, reg_idx++)
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+#endif
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
-/**
- * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
- *
- */
-static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
- int i;
- u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+ /* Allow first FCoE queue to be mapped as RSS */
+ if (fcoe->offset && (i > fcoe->offset))
+ break;
+#endif
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & rss->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ }
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- return false;
+#ifdef IXGBE_FCOE
+ /* FCoE uses a linear block of queues so just assigning 1:1 */
+ for (; i < adapter->num_tx_queues; i++, reg_idx++)
+ adapter->tx_ring[i]->reg_idx = reg_idx;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_cache_ring_fdir(adapter);
- else
- ixgbe_cache_ring_rss(adapter);
+#endif
- fcoe_rx_i = f->mask;
- fcoe_tx_i = f->mask;
- }
- for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
- }
return true;
}
-#endif /* IXGBE_FCOE */
/**
- * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
* @adapter: board private structure to initialize
*
- * SR-IOV doesn't use any descriptor rings but changes the default if
- * no other mapping is used.
+ * Cache the descriptor ring offsets for RSS to the assigned rings.
*
- */
-static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+ **/
+static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
- adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
- adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
- if (adapter->num_vfs)
- return true;
- else
- return false;
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
}
/**
@@ -231,186 +290,384 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
adapter->rx_ring[0]->reg_idx = 0;
adapter->tx_ring[0]->reg_idx = 0;
- if (ixgbe_cache_ring_sriov(adapter))
- return;
-
#ifdef CONFIG_IXGBE_DCB
- if (ixgbe_cache_ring_dcb(adapter))
+ if (ixgbe_cache_ring_dcb_sriov(adapter))
return;
-#endif
-#ifdef IXGBE_FCOE
- if (ixgbe_cache_ring_fcoe(adapter))
+ if (ixgbe_cache_ring_dcb(adapter))
return;
-#endif /* IXGBE_FCOE */
- if (ixgbe_cache_ring_fdir(adapter))
+#endif
+ if (ixgbe_cache_ring_sriov(adapter))
return;
- if (ixgbe_cache_ring_rss(adapter))
- return;
+ ixgbe_cache_ring_rss(adapter);
}
-/**
- * ixgbe_set_sriov_queues: Allocate queues for IOV use
- * @adapter: board private structure to initialize
- *
- * IOV doesn't actually use anything, so just NAK the
- * request for now and let the other queue routines
- * figure out what to do.
- */
-static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
-{
- return false;
-}
+#define IXGBE_RSS_16Q_MASK 0xF
+#define IXGBE_RSS_8Q_MASK 0x7
+#define IXGBE_RSS_4Q_MASK 0x3
+#define IXGBE_RSS_2Q_MASK 0x1
+#define IXGBE_RSS_DISABLED_MASK 0x0
+#ifdef CONFIG_IXGBE_DCB
/**
- * ixgbe_set_rss_queues: Allocate queues for RSS
+ * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
* @adapter: board private structure to initialize
*
- * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
- * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate. Also assign queues based on DCB
+ * priorities and map accordingly..
*
**/
-static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
{
- bool ret = false;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
-
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- f->mask = 0xF;
- adapter->num_rx_queues = f->indices;
- adapter->num_tx_queues = f->indices;
- ret = true;
+ int i;
+ u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+ u16 vmdq_m = 0;
+#ifdef IXGBE_FCOE
+ u16 fcoe_i = 0;
+#endif
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
+ return false;
+
+ /* verify we have VMDq enabled before proceeding */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return false;
+
+ /* Add starting offset to total pool count */
+ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
+
+ /* 16 pools w/ 8 TC per pool */
+ if (tcs > 4) {
+ vmdq_i = min_t(u16, vmdq_i, 16);
+ vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
+ /* 32 pools w/ 4 TC per pool */
+ } else {
+ vmdq_i = min_t(u16, vmdq_i, 32);
+ vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
}
- return ret;
-}
+#ifdef IXGBE_FCOE
+ /* queues in the remaining pools are available for FCoE */
+ fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
-/**
- * ixgbe_set_fdir_queues: Allocate queues for Flow Director
- * @adapter: board private structure to initialize
- *
- * Flow Director is an advanced Rx filter, attempting to get Rx flows back
- * to the original CPU that initiated the Tx session. This runs in addition
- * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
- * Rx load across CPUs using RSS.
- *
- **/
-static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
-{
- bool ret = false;
- struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+#endif
+ /* remove the starting offset from the pool count */
+ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
- f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
- f_fdir->mask = 0;
+ /* save features for later use */
+ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
/*
- * Use RSS in addition to Flow Director to ensure the best
- * distribution of flows across cores, even when an FDIR flow
- * isn't matched.
+ * We do not support DCB, VMDq, and RSS all simultaneously
+ * so we will disable RSS since it is the lowest priority
*/
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- adapter->num_tx_queues = f_fdir->indices;
- adapter->num_rx_queues = f_fdir->indices;
- ret = true;
- } else {
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->ring_feature[RING_F_RSS].indices = 1;
+ adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
+
+ /* disable ATR as it is not supported when VMDq is enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ adapter->num_rx_pools = vmdq_i;
+ adapter->num_rx_queues_per_pool = tcs;
+
+ adapter->num_tx_queues = vmdq_i * tcs;
+ adapter->num_rx_queues = vmdq_i * tcs;
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *fcoe;
+
+ fcoe = &adapter->ring_feature[RING_F_FCOE];
+
+ /* limit ourselves based on feature limits */
+ fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
+ fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
+
+ if (fcoe_i) {
+ /* alloc queues for FCoE separately */
+ fcoe->indices = fcoe_i;
+ fcoe->offset = vmdq_i * tcs;
+
+ /* add queues to adapter */
+ adapter->num_tx_queues += fcoe_i;
+ adapter->num_rx_queues += fcoe_i;
+ } else if (tcs > 1) {
+ /* use queue belonging to FcoE TC */
+ fcoe->indices = 1;
+ fcoe->offset = ixgbe_fcoe_get_tc(adapter);
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+
+ fcoe->indices = 0;
+ fcoe->offset = 0;
+ }
}
- return ret;
+
+#endif /* IXGBE_FCOE */
+ /* configure TC to queue mapping */
+ for (i = 0; i < tcs; i++)
+ netdev_set_tc_queue(adapter->netdev, i, 1, i);
+
+ return true;
}
+static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_ring_feature *f;
+ int rss_i, rss_m, i;
+ int tcs;
+
+ /* Map queue offset and counts onto allocated tx queues */
+ tcs = netdev_get_num_tc(dev);
+
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
+ return false;
+
+ /* determine the upper limit for our current DCB mode */
+ rss_i = dev->num_tx_queues / tcs;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ /* 8 TC w/ 4 queues per TC */
+ rss_i = min_t(u16, rss_i, 4);
+ rss_m = IXGBE_RSS_4Q_MASK;
+ } else if (tcs > 4) {
+ /* 8 TC w/ 8 queues per TC */
+ rss_i = min_t(u16, rss_i, 8);
+ rss_m = IXGBE_RSS_8Q_MASK;
+ } else {
+ /* 4 TC w/ 16 queues per TC */
+ rss_i = min_t(u16, rss_i, 16);
+ rss_m = IXGBE_RSS_16Q_MASK;
+ }
+
+ /* set RSS mask and indices */
+ f = &adapter->ring_feature[RING_F_RSS];
+ rss_i = min_t(int, rss_i, f->limit);
+ f->indices = rss_i;
+ f->mask = rss_m;
+
+ /* disable ATR as it is not supported when multiple TCs are enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
#ifdef IXGBE_FCOE
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and offset. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ u8 tc = ixgbe_fcoe_get_tc(adapter);
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+ f->indices = min_t(u16, rss_i, f->limit);
+ f->offset = rss_i * tc;
+ }
+
+#endif /* IXGBE_FCOE */
+ for (i = 0; i < tcs; i++)
+ netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
+
+ adapter->num_tx_queues = rss_i * tcs;
+ adapter->num_rx_queues = rss_i * tcs;
+
+ return true;
+}
+
+#endif
/**
- * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
* @adapter: board private structure to initialize
*
- * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
- * The ring feature mask is not used as a mask for FCoE, as it can take any 8
- * rx queues out of the max number of rx queues, instead, it is used as the
- * index of the first rx queue used by FCoE.
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate. If RSS is available, then also try and
+ * enable RSS and map accordingly.
*
**/
-static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
{
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+ u16 vmdq_m = 0;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
+ u16 rss_m = IXGBE_RSS_DISABLED_MASK;
+#ifdef IXGBE_FCOE
+ u16 fcoe_i = 0;
+#endif
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ /* only proceed if SR-IOV is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
- f->indices = min_t(int, num_online_cpus(), f->indices);
+ /* Add starting offset to total pool count */
+ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- e_info(probe, "FCoE enabled with RSS\n");
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_set_fdir_queues(adapter);
- else
- ixgbe_set_rss_queues(adapter);
+ /* 64 pool mode with 2 queues per pool */
+ if ((vmdq_i > 32) || (rss_i < 4)) {
+ vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
+ rss_m = IXGBE_RSS_2Q_MASK;
+ rss_i = min_t(u16, rss_i, 2);
+ /* 32 pool mode with 4 queues per pool */
+ } else {
+ vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
+ rss_m = IXGBE_RSS_4Q_MASK;
+ rss_i = 4;
}
- /* adding FCoE rx rings to the end */
- f->mask = adapter->num_rx_queues;
- adapter->num_rx_queues += f->indices;
- adapter->num_tx_queues += f->indices;
+#ifdef IXGBE_FCOE
+ /* queues in the remaining pools are available for FCoE */
+ fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
+
+#endif
+ /* remove the starting offset from the pool count */
+ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
+
+ /* save features for later use */
+ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+ /* limit RSS based on user input and save for later use */
+ adapter->ring_feature[RING_F_RSS].indices = rss_i;
+ adapter->ring_feature[RING_F_RSS].mask = rss_m;
+ adapter->num_rx_pools = vmdq_i;
+ adapter->num_rx_queues_per_pool = rss_i;
+
+ adapter->num_rx_queues = vmdq_i * rss_i;
+ adapter->num_tx_queues = vmdq_i * rss_i;
+
+ /* disable ATR as it is not supported when VMDq is enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+#ifdef IXGBE_FCOE
+ /*
+ * FCoE can use rings from adjacent buffers to allow RSS
+ * like behavior. To account for this we need to add the
+ * FCoE indices to the total ring count.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *fcoe;
+
+ fcoe = &adapter->ring_feature[RING_F_FCOE];
+
+ /* limit ourselves based on feature limits */
+ fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
+
+ if (vmdq_i > 1 && fcoe_i) {
+ /* reserve no more than number of CPUs */
+ fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
+
+ /* alloc queues for FCoE separately */
+ fcoe->indices = fcoe_i;
+ fcoe->offset = vmdq_i * rss_i;
+ } else {
+ /* merge FCoE queues with RSS queues */
+ fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
+
+ /* limit indices to rss_i if MSI-X is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ fcoe_i = rss_i;
+
+ /* attempt to reserve some queues for just FCoE */
+ fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
+ fcoe->offset = fcoe_i - fcoe->indices;
+
+ fcoe_i -= rss_i;
+ }
+
+ /* add queues to adapter */
+ adapter->num_tx_queues += fcoe_i;
+ adapter->num_rx_queues += fcoe_i;
+ }
+
+#endif
return true;
}
-#endif /* IXGBE_FCOE */
-/* Artificial max queue cap per traffic class in DCB mode */
-#define DCB_QUEUE_CAP 8
-
-#ifdef CONFIG_IXGBE_DCB
-static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+/**
+ * ixgbe_set_rss_queues - Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
- int per_tc_q, q, i, offset = 0;
- struct net_device *dev = adapter->netdev;
- int tcs = netdev_get_num_tc(dev);
+ struct ixgbe_ring_feature *f;
+ u16 rss_i;
- if (!tcs)
- return false;
+ /* set mask for 16 queue limit of RSS */
+ f = &adapter->ring_feature[RING_F_RSS];
+ rss_i = f->limit;
- /* Map queue offset and counts onto allocated tx queues */
- per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
- q = min_t(int, num_online_cpus(), per_tc_q);
+ f->indices = rss_i;
+ f->mask = IXGBE_RSS_16Q_MASK;
- for (i = 0; i < tcs; i++) {
- netdev_set_tc_queue(dev, i, q, offset);
- offset += q;
- }
+ /* disable ATR by default, it will be configured below */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ /*
+ * Use Flow Director in addition to RSS to ensure the best
+ * distribution of flows across cores, even when an FDIR flow
+ * isn't matched.
+ */
+ if (rss_i > 1 && adapter->atr_sample_rate) {
+ f = &adapter->ring_feature[RING_F_FDIR];
- adapter->num_tx_queues = q * tcs;
- adapter->num_rx_queues = q * tcs;
+ f->indices = min_t(u16, num_online_cpus(), f->limit);
+ rss_i = max_t(u16, rss_i, f->indices);
+
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ }
#ifdef IXGBE_FCOE
- /* FCoE enabled queues require special configuration indexed
- * by feature specific indices and mask. Here we map FCoE
- * indices onto the DCB queue pairs allowing FCoE to own
- * configuration later.
+ /*
+ * FCoE can exist on the same rings as standard network traffic
+ * however it is preferred to avoid that if possible. In order
+ * to get the best performance we allocate as many FCoE queues
+ * as we can and we place them at the end of the ring array to
+ * avoid sharing queues with standard RSS on systems with 24 or
+ * more CPUs.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- u8 prio_tc[MAX_USER_PRIORITY] = {0};
- int tc;
- struct ixgbe_ring_feature *f =
- &adapter->ring_feature[RING_F_FCOE];
-
- ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
- tc = prio_tc[adapter->fcoe.up];
- f->indices = dev->tc_to_txq[tc].count;
- f->mask = dev->tc_to_txq[tc].offset;
+ struct net_device *dev = adapter->netdev;
+ u16 fcoe_i;
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+
+ /* merge FCoE queues with RSS queues */
+ fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
+ fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
+
+ /* limit indices to rss_i if MSI-X is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ fcoe_i = rss_i;
+
+ /* attempt to reserve some queues for just FCoE */
+ f->indices = min_t(u16, fcoe_i, f->limit);
+ f->offset = fcoe_i - f->indices;
+ rss_i = max_t(u16, fcoe_i, rss_i);
}
-#endif
+
+#endif /* IXGBE_FCOE */
+ adapter->num_rx_queues = rss_i;
+ adapter->num_tx_queues = rss_i;
return true;
}
-#endif
/**
- * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * ixgbe_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -420,7 +677,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
* fallthrough conditions.
*
**/
-static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
/* Start with base case */
adapter->num_rx_queues = 1;
@@ -428,38 +685,18 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
- if (ixgbe_set_sriov_queues(adapter))
- goto done;
-
#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_set_dcb_sriov_queues(adapter))
+ return;
+
if (ixgbe_set_dcb_queues(adapter))
- goto done;
+ return;
#endif
-#ifdef IXGBE_FCOE
- if (ixgbe_set_fcoe_queues(adapter))
- goto done;
-
-#endif /* IXGBE_FCOE */
- if (ixgbe_set_fdir_queues(adapter))
- goto done;
-
- if (ixgbe_set_rss_queues(adapter))
- goto done;
-
- /* fallback to base case */
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
-
-done:
- if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
- (adapter->netdev->reg_state == NETREG_UNREGISTERING))
- return 0;
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
- /* Notify the stack of the (possibly) reduced queue counts. */
- netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- return netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
+ ixgbe_set_rss_queues(adapter);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -507,8 +744,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
- adapter->num_msix_vectors = min(vectors,
- adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ vectors -= NON_Q_VECTORS;
+ adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
}
}
@@ -632,9 +869,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
- if ((rxr_idx >= f->mask) &&
- (rxr_idx < f->mask + f->indices))
- set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
+ if ((rxr_idx >= f->offset) &&
+ (rxr_idx < f->offset + f->indices))
+ set_bit(__IXGBE_RX_FCOE, &ring->state);
}
#endif /* IXGBE_FCOE */
@@ -695,7 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int q_vectors = adapter->num_q_vectors;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
@@ -739,10 +976,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
return 0;
err_out:
- while (v_idx) {
- v_idx--;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx);
- }
return -ENOMEM;
}
@@ -757,14 +996,13 @@ err_out:
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
- int v_idx, q_vectors;
+ int v_idx = adapter->num_q_vectors;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- q_vectors = 1;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
- for (v_idx = 0; v_idx < q_vectors; v_idx++)
+ while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx);
}
@@ -788,11 +1026,10 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
-static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
+static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
- int vector, v_budget;
+ int vector, v_budget, err;
/*
* It's easy to be greedy for MSI-X vectors, but it really
@@ -825,38 +1062,41 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
ixgbe_acquire_msix_vectors(adapter, v_budget);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- goto out;
+ return;
}
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- e_err(probe,
- "ATR is not supported while multiple "
- "queues are disabled. Disabling Flow Director\n");
+ /* disable DCB if number of TCs exceeds 1 */
+ if (netdev_get_num_tc(adapter->netdev) > 1) {
+ e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
+ netdev_reset_tc(adapter->netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
}
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->atr_sample_rate = 0;
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+
+ /* disable SR-IOV */
+ ixgbe_disable_sriov(adapter);
- err = ixgbe_set_num_queues(adapter);
- if (err)
- return err;
+ /* disable RSS */
+ adapter->ring_feature[RING_F_RSS].limit = 1;
+
+ ixgbe_set_num_queues(adapter);
+ adapter->num_q_vectors = 1;
err = pci_enable_msi(adapter->pdev);
- if (!err) {
- adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
- } else {
+ if (err) {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI interrupt, "
"falling back to legacy. Error: %d\n", err);
- /* reset err */
- err = 0;
+ return;
}
-
-out:
- return err;
+ adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
}
/**
@@ -874,15 +1114,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
int err;
/* Number of supported queues */
- err = ixgbe_set_num_queues(adapter);
- if (err)
- return err;
+ ixgbe_set_num_queues(adapter);
- err = ixgbe_set_interrupt_capability(adapter);
- if (err) {
- e_dev_err("Unable to setup interrupt capabilities\n");
- goto err_set_interrupt;
- }
+ /* Set interrupt mode */
+ ixgbe_set_interrupt_capability(adapter);
err = ixgbe_alloc_q_vectors(adapter);
if (err) {
@@ -902,7 +1137,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
err_alloc_q_vectors:
ixgbe_reset_interrupt_capability(adapter);
-err_set_interrupt:
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bf20457..3b6784c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -516,7 +516,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
}
-/*
+/**
* ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -790,12 +790,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
#ifdef CONFIG_IXGBE_PTP
- if (unlikely(tx_buffer->tx_flags &
- IXGBE_TX_FLAGS_TSTAMP))
- ixgbe_ptp_tx_hwtstamp(q_vector,
- tx_buffer->skb);
-
+ if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
+ ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
#endif
+
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -995,7 +993,6 @@ out_no_update:
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
- int num_q_vectors;
int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -1004,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_q_vectors = 1;
-
- for (i = 0; i < num_q_vectors; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1;
ixgbe_update_dca(adapter->q_vector[i]);
}
@@ -1058,17 +1050,17 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE
/**
* ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
* @rx_desc: advanced rx descriptor
*
* Returns : true if it is FCoE pkt
*/
-static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring,
union ixgbe_adv_rx_desc *rx_desc)
{
__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
- return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ return test_bit(__IXGBE_RX_FCOE, &ring->state) &&
((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
(cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
@@ -1148,7 +1140,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
/* alloc new page for storage */
if (likely(!page)) {
- page = alloc_pages(GFP_ATOMIC | __GFP_COLD,
+ page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP,
ixgbe_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -1390,6 +1382,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ struct net_device *dev = rx_ring->netdev;
+
ixgbe_update_rsc_stats(rx_ring, skb);
ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1397,18 +1391,18 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
#ifdef CONFIG_IXGBE_PTP
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))
- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
+ ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
#endif
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
__vlan_hwaccel_put_tag(skb, vid);
}
skb_record_rx_queue(skb, rx_ring->queue_index);
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ skb->protocol = eth_type_trans(skb, dev);
}
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -1523,8 +1517,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = skb_frag_size(frag);
- if (pull_len > 256)
- pull_len = ixgbe_get_headlen(va, pull_len);
+ if (pull_len > IXGBE_RX_HDR_SIZE)
+ pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -1546,6 +1540,12 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
skb->truesize -= ixgbe_rx_bufsz(rx_ring);
}
+#ifdef IXGBE_FCOE
+ /* do not attempt to pad FCoE Frames as this will disrupt DDP */
+ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc))
+ return false;
+
+#endif
/* if skb_pad returns an error the skb was freed */
if (unlikely(skb->len < 60)) {
int pad_len = 60 - skb->len;
@@ -1772,7 +1772,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
- if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) {
ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
if (!ddp_bytes) {
dev_kfree_skb_any(skb);
@@ -1825,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
- int q_vectors, v_idx;
+ int v_idx;
u32 mask;
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
/* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) {
u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -1840,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
- for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx];
@@ -2404,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int vector, err;
int ri = 0, ti = 0;
- for (vector = 0; vector < q_vectors; vector++) {
+ for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
@@ -2563,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i, q_vectors;
+ int vector;
- q_vectors = adapter->num_msix_vectors;
- i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, adapter);
- i--;
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ free_irq(adapter->pdev->irq, adapter);
+ return;
+ }
- for (; i >= 0; i--) {
- /* free only the irqs that were actually requested */
- if (!adapter->q_vector[i]->rx.ring &&
- !adapter->q_vector[i]->tx.ring)
- continue;
+ for (vector = 0; vector < adapter->num_q_vectors; vector++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(adapter->msix_entries[i].vector,
- NULL);
+ /* free only the irqs that were actually requested */
+ if (!q_vector->rx.ring && !q_vector->tx.ring)
+ continue;
- free_irq(adapter->msix_entries[i].vector,
- adapter->q_vector[i]);
- }
- } else {
- free_irq(adapter->pdev->irq, adapter);
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(entry->vector, NULL);
+
+ free_irq(entry->vector, q_vector);
}
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
}
/**
@@ -2610,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i;
- for (i = 0; i < adapter->num_msix_vectors; i++)
- synchronize_irq(adapter->msix_entries[i].vector);
+ int vector;
+
+ for (vector = 0; vector < adapter->num_q_vectors; vector++)
+ synchronize_irq(adapter->msix_entries[vector].vector);
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
@@ -2690,8 +2688,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
- adapter->atr_sample_rate) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
ring->atr_sample_rate = adapter->atr_sample_rate;
ring->atr_count = 0;
set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
@@ -2721,8 +2718,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 rttdcs;
- u32 reg;
+ u32 rttdcs, mtqc;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -2734,28 +2730,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
/* set transmit pool layout */
- switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- case (IXGBE_FLAG_SRIOV_ENABLED):
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
- break;
- default:
- if (!tcs)
- reg = IXGBE_MTQC_64Q_1PB;
- else if (tcs <= 4)
- reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ mtqc = IXGBE_MTQC_VT_ENA;
+ if (tcs > 4)
+ mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ else if (tcs > 1)
+ mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ mtqc |= IXGBE_MTQC_32VF;
else
- reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ mtqc |= IXGBE_MTQC_64VF;
+ } else {
+ if (tcs > 4)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ else if (tcs > 1)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ }
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
- /* Enable Security TX Buffer IFG for multiple pb */
- if (tcs) {
- reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
- reg |= IXGBE_SECTX_DCB;
- IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
- }
- break;
+ /* Enable Security TX Buffer IFG for multiple pb */
+ if (tcs) {
+ u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ sectx |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
}
/* re-enable the arbiter */
@@ -2849,40 +2849,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
u8 reg_idx = rx_ring->reg_idx;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB: {
- struct ixgbe_ring_feature *feature = adapter->ring_feature;
- const int mask = feature[RING_F_RSS].mask;
- reg_idx = reg_idx & mask;
- }
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- default:
- break;
- }
-
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ u16 mask = adapter->ring_feature[RING_F_RSS].mask;
- srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
- srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- if (adapter->num_vfs)
- srrctl |= IXGBE_SRRCTL_DROP_EN;
+ /*
+ * if VMDq is not active we must program one srrctl register
+ * per RSS queue since we have enabled RDRXCTL.MVMEN
+ */
+ reg_idx &= mask;
+ }
- srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK;
+ /* configure header buffer length, needed for RSC */
+ srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ /* configure the packet buffer length */
#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#endif
+
+ /* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2894,11 +2888,15 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
u32 mrqc = 0, reta = 0;
u32 rxcsum;
int i, j;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
- int maxq = adapter->ring_feature[RING_F_RSS].indices;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- if (tcs)
- maxq = min(maxq, adapter->num_tx_queues / tcs);
+ /*
+ * Program table for at least 2 queues w/ SR-IOV so that VFs can
+ * make full use of any rings they may have. We will use the
+ * PSRTYPE register to control how many rings we use within the PF.
+ */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
+ rss_i = 2;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -2906,7 +2904,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/* Fill out redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == maxq)
+ if (j == rss_i)
j = 0;
/* reta = 4-byte sliding window of
* 0x00..(indices-1)(indices-1)00..etc. */
@@ -2920,35 +2918,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
rxcsum |= IXGBE_RXCSUM_PCSD;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
- (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
- mrqc = IXGBE_MRQC_RSSEN;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ if (adapter->ring_feature[RING_F_RSS].mask)
+ mrqc = IXGBE_MRQC_RSSEN;
} else {
- int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
- | IXGBE_FLAG_SRIOV_ENABLED);
-
- switch (mask) {
- case (IXGBE_FLAG_RSS_ENABLED):
- if (!tcs)
- mrqc = IXGBE_MRQC_RSSEN;
- else if (tcs <= 4)
- mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ if (tcs > 4)
+ mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
+ else if (tcs > 1)
+ mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
+ else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
+ mrqc = IXGBE_MRQC_VMDQRSS64EN;
+ } else {
+ if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
- break;
- case (IXGBE_FLAG_SRIOV_ENABLED):
- mrqc = IXGBE_MRQC_VMDQEN;
- break;
- default:
- break;
+ else if (tcs > 1)
+ mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ else
+ mrqc = IXGBE_MRQC_RSSEN;
}
}
/* Perform hash on these packet types */
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6
- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
+ IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
+ IXGBE_MRQC_RSS_FIELD_IPV6 |
+ IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
@@ -3099,6 +3098,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int rss_i = adapter->ring_feature[RING_F_RSS].indices;
int p;
/* PSRTYPE must be initialized in non 82598 adapters */
@@ -3111,58 +3111,69 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
return;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
- psrtype |= (adapter->num_rx_queues_per_pool << 29);
+ if (rss_i > 3)
+ psrtype |= 2 << 29;
+ else if (rss_i > 1)
+ psrtype |= 1 << 29;
for (p = 0; p < adapter->num_rx_pools; p++)
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
psrtype);
}
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 gcr_ext;
- u32 vt_reg_bits;
u32 reg_offset, vf_shift;
- u32 vmdctl;
+ u32 gcr_ext, vmdctl;
int i;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return;
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
- vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
+ vmdctl |= IXGBE_VT_CTL_REPLEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- vf_shift = adapter->num_vfs % 32;
- reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
+ vf_shift = VMDQ_P(0) % 32;
+ reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
- hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
/*
* Set up VF register offsets for selected VT Mode,
* i.e. 32 or 64 VFs for SR-IOV
*/
- gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
- gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ switch (adapter->ring_feature[RING_F_VMDQ].mask) {
+ case IXGBE_82599_VMDQ_8Q_MASK:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
+ break;
+ case IXGBE_82599_VMDQ_4Q_MASK:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
+ break;
+ default:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
+ break;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
/* enable Tx loopback for VF/PF communication */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
/* Enable MAC Anti-Spoofing */
- hw->mac.ops.set_mac_anti_spoofing(hw,
- (adapter->num_vfs != 0),
+ hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
adapter->num_vfs);
/* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
@@ -3298,10 +3309,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int pool_ndx = adapter->num_vfs;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
set_bit(vid, adapter->active_vlans);
return 0;
@@ -3311,10 +3321,9 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int pool_ndx = adapter->num_vfs;
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -3432,15 +3441,18 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- unsigned int vfn = adapter->num_vfs;
- unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
+ unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
+ /* In SR-IOV mode significantly less RAR entries are available */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
+
/* return ENOMEM indicating insufficient memory for addresses */
if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
- if (!netdev_uc_empty(netdev) && rar_entries) {
+ if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
/* return error if we do not support writing to RAR table */
if (!hw->mac.ops.set_rar)
@@ -3450,7 +3462,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
if (!rar_entries)
break;
hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- vfn, IXGBE_RAH_AV);
+ VMDQ_P(0), IXGBE_RAH_AV);
count++;
}
}
@@ -3524,12 +3536,14 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
vmolr |= IXGBE_VMOLR_ROPE;
}
- if (adapter->num_vfs) {
+ if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
- vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE);
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
}
/* This is useful for sniffing bad packets. */
@@ -3555,37 +3569,21 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- struct ixgbe_q_vector *q_vector;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = adapter->q_vector[q_idx];
- napi_enable(&q_vector->napi);
- }
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+ napi_enable(&adapter->q_vector[q_idx]->napi);
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- struct ixgbe_q_vector *q_vector;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = adapter->q_vector[q_idx];
- napi_disable(&q_vector->napi);
- }
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+ napi_disable(&adapter->q_vector[q_idx]->napi);
}
#ifdef CONFIG_IXGBE_DCB
-/*
+/**
* ixgbe_configure_dcb - Configure DCB hardware
* @adapter: ixgbe adapter struct
*
@@ -3607,10 +3605,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
netif_set_gso_max_size(adapter->netdev, 32768);
-
- /* Enable VLAN tag insert/strip */
- adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
-
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
#ifdef IXGBE_FCOE
@@ -3636,19 +3630,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
/* Enable RSS Hash per TC */
if (hw->mac.type != ixgbe_mac_82598EB) {
- int i;
- u32 reg = 0;
-
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- u8 msb = 0;
- u8 cnt = adapter->netdev->tc_to_txq[i].count;
-
- while (cnt >>= 1)
- msb++;
+ u32 msb = 0;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
- reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
+ while (rss_i) {
+ msb++;
+ rss_i >>= 1;
}
- IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
+
+ /* write msb to all 8 TCs in one write */
+ IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
}
}
#endif
@@ -3656,11 +3647,11 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
/* Additional bittime to account for IXGBE framing */
#define IXGBE_ETH_FRAMING 20
-/*
+/**
* ixgbe_hpbthresh - calculate high water mark for flow control
*
* @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
+ * @pb: packet buffer to calculate
*/
static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
{
@@ -3674,18 +3665,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if (dev->features & NETIF_F_FCOE_MTU) {
- int fcoe_pb = 0;
-
-#ifdef CONFIG_IXGBE_DCB
- fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == ixgbe_fcoe_get_tc(adapter)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif
- if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
- tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
- }
-#endif
-
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -3720,11 +3705,11 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
return marker;
}
-/*
+/**
* ixgbe_lpbthresh - calculate low water mark for for flow control
*
* @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
+ * @pb: packet buffer to calculate
*/
static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
{
@@ -3825,12 +3810,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- ixgbe_configure_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@@ -3860,6 +3839,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_virtualization(adapter);
+#ifdef IXGBE_FCOE
+ /* configure FCoE L2 filters, redirection table, and Rx control */
+ ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
}
@@ -3968,7 +3952,18 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- gpie |= IXGBE_GPIE_VTMODE_64;
+
+ switch (adapter->ring_feature[RING_F_VMDQ].mask) {
+ case IXGBE_82599_VMDQ_8Q_MASK:
+ gpie |= IXGBE_GPIE_VTMODE_16;
+ break;
+ case IXGBE_82599_VMDQ_4Q_MASK:
+ gpie |= IXGBE_GPIE_VTMODE_32;
+ break;
+ default:
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ break;
+ }
}
/* Enable Thermal over heat sensor interrupt */
@@ -4126,8 +4121,11 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
- IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+
+ /* update SAN MAC vmdq pool selection */
+ if (hw->mac.san_mac_rar_index)
+ hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
}
/**
@@ -4408,32 +4406,29 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Set capability flags */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
- adapter->ring_feature[RING_F_RSS].indices = rss;
- adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+ adapter->ring_feature[RING_F_RSS].limit = rss;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
- adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
- adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
/* Flow Director hash filters enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
- adapter->ring_feature[RING_F_FDIR].indices =
+ adapter->ring_feature[RING_F_FDIR].limit =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
@@ -4444,6 +4439,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
break;
}
+#ifdef IXGBE_FCOE
+ /* FCoE support exists, always init the FCoE lock */
+ spin_lock_init(&adapter->fcoe.lock);
+
+#endif
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
@@ -4492,6 +4492,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
+#ifdef CONFIG_PCI_IOV
+ /* assign number of SR-IOV VFs */
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
+
+#endif
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -4583,10 +4589,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
+
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
- break;
+ goto err_setup_tx;
}
+ return 0;
+err_setup_tx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
return err;
}
@@ -4661,10 +4673,20 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
+
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
- break;
+ goto err_setup_rx;
}
+#ifdef IXGBE_FCOE
+ err = ixgbe_setup_fcoe_ddp_resources(adapter);
+ if (!err)
+#endif
+ return 0;
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
return err;
}
@@ -4739,6 +4761,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i;
+#ifdef IXGBE_FCOE
+ ixgbe_free_fcoe_ddp_resources(adapter);
+
+#endif
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc)
ixgbe_free_rx_resources(adapter->rx_ring[i]);
@@ -4820,15 +4846,31 @@ static int ixgbe_open(struct net_device *netdev)
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev,
+ adapter->num_rx_pools > 1 ? 1 :
+ adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+
+ err = netif_set_real_num_rx_queues(netdev,
+ adapter->num_rx_pools > 1 ? 1 :
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
ixgbe_up_complete(adapter);
return 0;
+err_set_queues:
+ ixgbe_free_irq(adapter);
err_req_irq:
-err_setup_rx:
ixgbe_free_all_rx_resources(adapter);
-err_setup_tx:
+err_setup_rx:
ixgbe_free_all_tx_resources(adapter);
+err_setup_tx:
ixgbe_reset(adapter);
return err;
@@ -4886,23 +4928,19 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
- rtnl_lock();
- err = ixgbe_init_interrupt_scheme(adapter);
- rtnl_unlock();
- if (err) {
- e_dev_err("Cannot initialize interrupts for device\n");
- return err;
- }
-
ixgbe_reset(adapter);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
- if (netif_running(netdev)) {
+ rtnl_lock();
+ err = ixgbe_init_interrupt_scheme(adapter);
+ if (!err && netif_running(netdev))
err = ixgbe_open(netdev);
- if (err)
- return err;
- }
+
+ rtnl_unlock();
+
+ if (err)
+ return err;
netif_device_attach(netdev);
@@ -5038,11 +5076,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
-#ifdef IXGBE_FCOE
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- unsigned int cpu;
- u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
-#endif /* IXGBE_FCOE */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5173,17 +5206,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
/* Add up per cpu counters for total ddp aloc fail */
- if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) {
+ if (adapter->fcoe.ddp_pool) {
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
+ unsigned int cpu;
+ u64 noddp = 0, noddp_ext_buff = 0;
for_each_possible_cpu(cpu) {
- fcoe_noddp_counts_sum +=
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu);
- fcoe_noddp_ext_buff_counts_sum +=
- *per_cpu_ptr(fcoe->
- pcpu_noddp_ext_buff, cpu);
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ noddp += ddp_pool->noddp;
+ noddp_ext_buff += ddp_pool->noddp_ext_buff;
}
+ hwstats->fcoe_noddp = noddp;
+ hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
}
- hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
- hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
#endif /* IXGBE_FCOE */
break;
default:
@@ -5241,7 +5276,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/**
* ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
{
@@ -5277,7 +5312,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
* in order to make certain interrupts are occurring. Secondly it sets the
@@ -5311,7 +5346,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
} else {
/* get one bit for every active tx/rx interrupt vector */
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
eics |= ((u64)1 << i);
@@ -5325,8 +5360,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_update_link - update the link status
- * @adapter - pointer to the device adapter structure
- * @link_speed - pointer to a u32 to store the link_speed
+ * @adapter: pointer to the device adapter structure
+ * @link_speed: pointer to a u32 to store the link_speed
**/
static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
@@ -5369,7 +5404,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
@@ -5424,12 +5459,15 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+
+ /* ping all the active vfs to let them know link has changed */
+ ixgbe_ping_all_vfs(adapter);
}
/**
* ixgbe_watchdog_link_is_down - update netif_carrier status and
* print link down message
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
**/
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
{
@@ -5453,11 +5491,14 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
e_info(drv, "NIC Link is Down\n");
netif_carrier_off(netdev);
+
+ /* ping all the active vfs to let them know link has changed */
+ ixgbe_ping_all_vfs(adapter);
}
/**
* ixgbe_watchdog_flush_tx - flush queues on link down
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
{
@@ -5506,7 +5547,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_subtask - check and bring link up
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
@@ -5530,7 +5571,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_sfp_detection_subtask - poll for SFP+ cable
- * @adapter - the ixgbe adapter structure
+ * @adapter: the ixgbe adapter structure
**/
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
@@ -5597,7 +5638,7 @@ sfp_out:
/**
* ixgbe_sfp_link_config_subtask - set up link SFP after module install
- * @adapter - the ixgbe adapter structure
+ * @adapter: the ixgbe adapter structure
**/
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
@@ -6228,8 +6269,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
- txq += adapter->ring_feature[RING_F_FCOE].mask;
+ struct ixgbe_ring_feature *f;
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+
+ while (txq >= f->indices)
+ txq -= f->indices;
+ txq += adapter->ring_feature[RING_F_FCOE].offset;
+
return txq;
}
#endif
@@ -6343,7 +6390,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
if ((protocol == __constant_htons(ETH_P_FCOE)) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+ (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
tso = ixgbe_fso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
@@ -6384,17 +6431,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/*
* The minimum packet size for olinfo paylen is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
}
@@ -6422,8 +6464,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
- IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
return 0;
}
@@ -6480,12 +6521,15 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_mac_info *mac = &adapter->hw.mac;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (is_valid_ether_addr(mac->san_addr)) {
+ if (is_valid_ether_addr(hw->mac.san_addr)) {
rtnl_lock();
- err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
+
+ /* update SAN MAC vmdq pool selection */
+ hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
}
return err;
}
@@ -6528,11 +6572,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- for (i = 0; i < num_q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- ixgbe_msix_clean_rings(0, q_vector);
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
} else {
ixgbe_intr(adapter->pdev->irq, netdev);
}
@@ -6589,8 +6630,9 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
}
#ifdef CONFIG_IXGBE_DCB
-/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
- * #adapter: pointer to ixgbe_adapter
+/**
+ * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
+ * @adapter: pointer to ixgbe_adapter
* @tc: number of traffic classes currently enabled
*
* Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
@@ -6625,8 +6667,33 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
return;
}
-/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
- * classes.
+/**
+ * ixgbe_set_prio_tc_map - Configure netdev prio tc map
+ * @adapter: Pointer to adapter struct
+ *
+ * Populate the netdev user priority to tc map
+ */
+static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
+ struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
+ u8 prio;
+
+ for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
+ u8 tc = 0;
+
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
+ tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
+ else if (ets)
+ tc = ets->prio_tc[prio];
+
+ netdev_set_prio_tc_map(dev, prio, tc);
+ }
+}
+
+/**
+ * ixgbe_setup_tc - configure net_device for multiple traffic classes
*
* @netdev: net device to configure
* @tc: number of traffic classes to enable
@@ -6636,12 +6703,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- /* Multiple traffic classes requires multiple queues */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
- e_err(drv, "Enable failed, needs MSI-X\n");
- return -EINVAL;
- }
-
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB &&
@@ -6658,8 +6719,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (tc) {
netdev_set_num_tc(dev, tc);
+ ixgbe_set_prio_tc_map(adapter);
+
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
@@ -6667,11 +6729,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
}
} else {
netdev_reset_tc(dev);
+
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -6701,15 +6763,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- features &= ~NETIF_F_HW_VLAN_RX;
-#endif
-
- /* return error if RXHASH is being enabled when RSS is not supported */
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- features &= ~NETIF_F_RXHASH;
-
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
@@ -6718,7 +6771,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
features &= ~NETIF_F_LRO;
-
return features;
}
@@ -6750,22 +6802,47 @@ static int ixgbe_set_features(struct net_device *netdev,
* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
- if (!(features & NETIF_F_NTUPLE)) {
- if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- /* turn off Flow Director, set ATR and reset */
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- need_reset = true;
- }
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ switch (features & NETIF_F_NTUPLE) {
+ case NETIF_F_NTUPLE:
/* turn off ATR, enable perfect filters and reset */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ need_reset = true;
+
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- need_reset = true;
+ break;
+ default:
+ /* turn off perfect filters, enable ATR and reset */
+ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ need_reset = true;
+
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+
+ /* We cannot enable ATR if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ break;
+
+ /* We cannot enable ATR if we have 2 or more traffic classes */
+ if (netdev_get_num_tc(netdev) > 1)
+ break;
+
+ /* We cannot enable ATR if RSS is disabled */
+ if (adapter->ring_feature[RING_F_RSS].limit <= 1)
+ break;
+
+ /* A sample rate of 0 indicates ATR disabled */
+ if (!adapter->atr_sample_rate)
+ break;
+
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ break;
}
+ if (features & NETIF_F_HW_VLAN_RX)
+ ixgbe_vlan_strip_enable(adapter);
+ else
+ ixgbe_vlan_strip_disable(adapter);
+
if (changed & NETIF_F_RXALL)
need_reset = true;
@@ -6782,7 +6859,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
u16 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err = -EOPNOTSUPP;
+ int err;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
if (ndm->ndm_state & NUD_PERMANENT) {
pr_info("%s: FDB only supports static addresses\n",
@@ -6790,13 +6870,17 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
return -EINVAL;
}
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
+ if (is_unicast_ether_addr(addr)) {
+ u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
+
+ if (netdev_uc_count(dev) < rar_uc_entries)
err = dev_uc_add_excl(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_add_excl(dev, addr);
else
- err = -EINVAL;
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(dev, addr);
+ } else {
+ err = -EINVAL;
}
/* Only return duplicate errors if NLM_F_EXCL is set */
@@ -6885,26 +6969,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_dump = ixgbe_ndo_fdb_dump,
};
-static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii)
-{
-#ifdef CONFIG_PCI_IOV
- struct ixgbe_hw *hw = &adapter->hw;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- /* The 82599 supports up to 64 VFs per physical function
- * but this implementation limits allocation to 63 so that
- * basic networking resources are still available to the
- * physical function. If the user requests greater thn
- * 63 VFs then it is an error - reset to default of zero.
- */
- adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
- ixgbe_enable_sriov(adapter, ii);
-#endif /* CONFIG_PCI_IOV */
-}
-
/**
* ixgbe_wol_supported - Check whether device supports WoL
* @hw: hw specific details
@@ -6931,6 +6995,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
if (hw->bus.func != 0)
break;
case IXGBE_SUBDEV_ID_82599_SFP:
+ case IXGBE_SUBDEV_ID_82599_RNDC:
is_wol_supported = 1;
break;
}
@@ -6978,6 +7043,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
int i, err, pci_using_dac;
u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
+ unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7027,7 +7093,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_save_state(pdev);
#ifdef CONFIG_IXGBE_DCB
- indices *= MAX_TRAFFIC_CLASS;
+ if (ii->mac == ixgbe_mac_82598EB)
+ dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
+ IXGBE_MAX_RSS_INDICES);
+ else
+ dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
+ IXGBE_MAX_FDIR_INDICES);
#endif
if (ii->mac == ixgbe_mac_82598EB)
@@ -7039,6 +7110,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
+ indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7145,8 +7217,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
- ixgbe_probe_vf(adapter, ii);
+#ifdef CONFIG_PCI_IOV
+ ixgbe_enable_sriov(adapter, ii);
+#endif
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -7182,10 +7256,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
- IXGBE_FLAG_DCB_ENABLED);
-
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
#endif
@@ -7197,11 +7267,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
- }
- if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
- netdev->vlan_features |= NETIF_F_FCOE_CRC;
- netdev->vlan_features |= NETIF_F_FSO;
- netdev->vlan_features |= NETIF_F_FCOE_MTU;
+
+ adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+ netdev->features |= NETIF_F_FSO |
+ NETIF_F_FCOE_CRC;
+
+ netdev->vlan_features |= NETIF_F_FSO |
+ NETIF_F_FCOE_CRC |
+ NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
if (pci_using_dac) {
@@ -7240,11 +7314,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
- netdev->hw_features &= ~NETIF_F_RXHASH;
- netdev->features &= ~NETIF_F_RXHASH;
- }
-
/* WOL not supported for all devices */
adapter->wol = 0;
hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
@@ -7355,8 +7424,7 @@ err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(hw->hw_addr);
err_ioremap:
@@ -7403,25 +7471,13 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
ixgbe_sysfs_exit(adapter);
#endif /* CONFIG_IXGBE_HWMON */
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- ixgbe_cleanup_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
/* remove the added san mac */
ixgbe_del_sanmac_netdev(netdev);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (!(ixgbe_check_vf_assignment(adapter)))
- ixgbe_disable_sriov(adapter);
- else
- e_dev_warn("Unloading driver while VFs are assigned "
- "- VFs will not be deallocated\n");
- }
+ ixgbe_disable_sriov(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -7512,11 +7568,11 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
}
/* Find the pci device of the offending VF */
- vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
while (vfdev) {
if (vfdev->devfn == (req_id & 0xFF))
break;
- vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
device_id, vfdev);
}
/*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 2411770..71659ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -907,6 +907,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
* 9 SFP_1g_cu_CORE0 - 82599-specific
* 10 SFP_1g_cu_CORE1 - 82599-specific
+ * 11 SFP_1g_sx_CORE0 - 82599-specific
+ * 12 SFP_1g_sx_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -957,6 +959,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_cu_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1049,7 +1058,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Verify supported 1G SFP modules */
if (comp_codes_10g == 0 &&
!(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -1064,7 +1075,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->mac.ops.get_device_caps(hw, &enforce_sfp);
if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
!((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = 0;
@@ -1128,10 +1141,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core0)
+ sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core1)
+ sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ddc6a4d..3456d56 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,6 +26,7 @@
*******************************************************************************/
#include "ixgbe.h"
#include <linux/export.h>
+#include <linux/ptp_classify.h>
/*
* The 82599 and the X540 do not have true 64bit nanosecond scale
@@ -100,9 +101,13 @@
#define NSECS_PER_SEC 1000000000ULL
#endif
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
/**
* ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc - the cyclecounter structure
+ * @cc: the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
@@ -123,8 +128,8 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
/**
* ixgbe_ptp_adjfreq
- * @ptp - the ptp clock structure
- * @ppb - parts per billion adjustment from base
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
*
* adjust the frequency of the ptp cycle counter by the
* indicated ppb from the base frequency.
@@ -170,8 +175,8 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
/**
* ixgbe_ptp_adjtime
- * @ptp - the ptp clock structure
- * @delta - offset to adjust the cycle counter by
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
*
* adjust the timer by resetting the timecounter structure.
*/
@@ -198,8 +203,8 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
/**
* ixgbe_ptp_gettime
- * @ptp - the ptp clock structure
- * @ts - timespec structure to hold the current time value
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
@@ -224,8 +229,8 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
/**
* ixgbe_ptp_settime
- * @ptp - the ptp clock structure
- * @ts - the timespec containing the new time for the cycle counter
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
*
* reset the timecounter to use a new base value instead of the kernel
* wall timer value.
@@ -251,9 +256,9 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
/**
* ixgbe_ptp_enable
- * @ptp - the ptp clock structure
- * @rq - the requested feature to change
- * @on - whether to enable or disable the feature
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
*
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
@@ -289,8 +294,8 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
/**
* ixgbe_ptp_check_pps_event
- * @adapter - the private adapter structure
- * @eicr - the interrupt cause register value
+ * @adapter: the private adapter structure
+ * @eicr: the interrupt cause register value
*
* This function is called by the interrupt routine when checking for
* interrupts. It will check and handle a pps event.
@@ -307,20 +312,21 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
return;
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_TIMESYNC)
+ if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
ptp_clock_event(adapter->ptp_clock, &event);
- break;
- default:
- break;
+ break;
+ default:
+ break;
+ }
}
}
/**
* ixgbe_ptp_enable_sdp
- * @hw - the hardware private structure
- * @shift - the clock shift for calculating nanoseconds
+ * @hw: the hardware private structure
+ * @shift: the clock shift for calculating nanoseconds
*
* this function enables the clock out feature on the sdp0 for the
* X540 device. It will create a 1second periodic output that can be
@@ -393,7 +399,7 @@ static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
/**
* ixgbe_ptp_disable_sdp
- * @hw - the private hardware structure
+ * @hw: the private hardware structure
*
* this function disables the auxiliary SDP clock out feature
*/
@@ -425,6 +431,68 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_match - determine if this skb matches a ptp packet
+ * @skb: pointer to the skb
+ * @hwtstamp: pointer to the hwtstamp_config to check
+ *
+ * Determine whether the skb should have been timestamped, assuming the
+ * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
+ * should have a timestamp waiting in the registers, and 0 otherwise.
+ *
+ * V1 packets have to check the version type to determine whether they are
+ * correct. However, we can't directly access the data because it might be
+ * fragmented in the SKB, in paged memory. In order to work around this, we
+ * use skb_copy_bits which will properly copy the data whether it is in the
+ * paged memory fragments or not. We have to copy the IP header as well as the
+ * message type.
+ */
+static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
+{
+ struct iphdr iph;
+ u8 msgtype;
+ unsigned int type, offset;
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE)
+ return 0;
+
+ type = sk_run_filter(skb, ptp_filter);
+
+ if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
+ return type & PTP_CLASS_V2;
+
+ /* For the remaining cases actually check message type */
+ switch (type) {
+ case PTP_CLASS_V1_IPV4:
+ skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
+ offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
+ break;
+ case PTP_CLASS_V1_IPV6:
+ offset = OFF_PTP6 + OFF_PTP_CONTROL;
+ break;
+ default:
+ /* other cases invalid or handled above */
+ return 0;
+ }
+
+ /* Make sure our buffer is long enough */
+ if (skb->len < offset)
+ return 0;
+
+ skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
+ break;
+ default:
+ return 0;
+ }
+}
+
+/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: structure containing interrupt and ring information
* @skb: particular skb to send timestamp with
@@ -473,6 +541,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
/**
* ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
+ * @rx_desc: the rx descriptor
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
@@ -480,6 +549,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
* is passed up the network stack
*/
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
@@ -497,21 +567,33 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
hw = &adapter->hw;
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+
+ /* Check if we have a valid timestamp and make sure the skb should
+ * have been timestamped */
+ if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
+ !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ return;
+
+ /*
+ * Always read the registers, in order to clear a possible fault
+ * because of stagnant RX timestamp values for a packet that never
+ * reached the queue.
+ */
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
/*
- * If this bit is set, then the RX registers contain the time stamp. No
- * other packet will be time stamped until we read these registers, so
- * read the registers to make them available again. Because only one
- * packet can be time stamped at a time, we know that the register
- * values must belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
+ * If the timestamp bit is set in the packet's descriptor, we know the
+ * timestamp belongs to this packet. No other packet can be
+ * timestamped until the registers for timestamping have been read.
+ * Therefor only one packet with this bit can be in the queue at a
+ * time, and the rx timestamp values that were in the registers belong
+ * to this packet.
*
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
- if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
+ if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -539,6 +621,11 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
+ *
+ * Since hardware always timestamps Path delay packets when timestamping V2
+ * packets, regardless of the type specified in the register, only use V2
+ * Event mode. This more accurately tells the user what the hardware is going
+ * to do anyways.
*/
int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd)
@@ -582,41 +669,30 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
default:
/*
- * register RXMTRL must be set, therefore it is not
- * possible to time stamp both V1 Sync and Delay_Req messages
- * and hardware does not support timestamping all packets
- * => return error
+ * register RXMTRL must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages and hardware does not support
+ * timestamping all packets => return error
*/
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -626,6 +702,9 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
return 0;
}
+ /* Store filter value for later use */
+ adapter->rx_hwtstamp_filter = config.rx_filter;
+
/* define ethertype filter for timestamped packets */
if (is_l2)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
@@ -690,7 +769,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
/**
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
*
* this function initializes the timecounter and cyclecounter
* structures for use in generated a ns counter from the arbitrary
@@ -708,6 +787,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 incval = 0;
+ u32 timinca = 0;
u32 shift = 0;
u32 cycle_speed;
unsigned long flags;
@@ -730,8 +810,16 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
break;
}
- /* Bail if the cycle speed didn't change */
- if (adapter->cycle_speed == cycle_speed)
+ /*
+ * grab the current TIMINCA value from the register so that it can be
+ * double checked. If the register value has been cleared, it must be
+ * reset to the correct value for generating a cyclecounter. If
+ * TIMINCA is zero, the SYSTIME registers do not increment at all.
+ */
+ timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
+
+ /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
+ if (adapter->cycle_speed == cycle_speed && timinca)
return;
/* disable the SDP clock out */
@@ -817,7 +905,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
/**
* ixgbe_ptp_init
- * @adapter - the ixgbe private adapter structure
+ * @adapter: the ixgbe private adapter structure
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
@@ -861,6 +949,10 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
return;
}
+ /* initialize the ptp filter */
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
+ e_dev_warn("ptp_filter_init failed\n");
+
spin_lock_init(&adapter->tmreg_lock);
ixgbe_ptp_start_cyclecounter(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 2d971d1..4fea871 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -44,50 +44,15 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *pvfdev;
- u16 vf_devfn = 0;
- int device_id;
- int vfs_found = 0;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- device_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- device_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- device_id = 0;
- break;
- }
-
- vf_devfn = pdev->devfn + 0x80;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == vf_devfn &&
- (pvfdev->bus->number >= pdev->bus->number))
- vfs_found++;
- vf_devfn += 2;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
- device_id, pvfdev);
- }
-
- return vfs_found;
-}
-
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii)
{
struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
int pre_existing_vfs = 0;
- pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
+ pre_existing_vfs = pci_num_vf(adapter->pdev);
if (!pre_existing_vfs && !adapter->num_vfs)
return;
@@ -106,16 +71,33 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
"enabled for this device - Please reload all "
"VF drivers to avoid spoofed packet errors\n");
} else {
+ int err;
+ /*
+ * The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function. If the user requests greater thn
+ * 63 VFs then it is an error - reset to default of zero.
+ */
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ adapter->num_vfs = 0;
+ return;
+ }
}
- if (err) {
- e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- goto err_novfs;
- }
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+ if (!adapter->ring_feature[RING_F_VMDQ].limit)
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+ adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
+
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
@@ -146,12 +128,39 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
* and memory allocated set up the mailbox parameters
*/
ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops,
- sizeof(hw->mbx.ops));
+ memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+
+ /* limit trafffic classes based on VFs enabled */
+ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
+ (adapter->num_vfs < 16)) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ } else if (adapter->num_vfs < 32) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
+ } else {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+ }
+
+ /* We do not support RSS w/ SR-IOV */
+ adapter->ring_feature[RING_F_RSS].limit = 1;
/* Disable RSC when in SR-IOV mode */
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
+
+#ifdef IXGBE_FCOE
+ /*
+ * When SR-IOV is enabled 82599 cannot support jumbo frames
+ * so we must disable FCoE because we cannot support FCoE MTU.
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
+ IXGBE_FLAG_FCOE_CAPABLE);
+#endif
+
+ /* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
return;
@@ -160,31 +169,80 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
/* Oh oh */
e_err(probe, "Unable to allocate memory for VF Data Storage - "
"SRIOV disabled\n");
- pci_disable_sriov(adapter->pdev);
+ ixgbe_disable_sriov(adapter);
+}
-err_novfs:
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- adapter->num_vfs = 0;
+static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *vfdev;
+ int dev_id;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ dev_id = IXGBE_DEV_ID_82599_VF;
+ break;
+ case ixgbe_mac_X540:
+ dev_id = IXGBE_DEV_ID_X540_VF;
+ break;
+ default:
+ return false;
+ }
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
+ }
+
+ return false;
}
-#endif /* #ifdef CONFIG_PCI_IOV */
+#endif /* #ifdef CONFIG_PCI_IOV */
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 gcr;
u32 gpie;
u32 vmdctl;
- int i;
+
+ /* set num VFs to 0 to prevent access to vfinfo */
+ adapter->num_vfs = 0;
+
+ /* free VF control structures */
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ /* free macvlan list */
+ kfree(adapter->mv_list);
+ adapter->mv_list = NULL;
+
+ /* if SR-IOV is already disabled then there is nothing to do */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
#ifdef CONFIG_PCI_IOV
+ /*
+ * If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (ixgbe_vfs_are_assigned(adapter)) {
+ e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return;
+ }
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
#endif
/* turn off device IOV mode */
- gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gcr &= ~(IXGBE_GCR_EXT_SRIOV);
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -195,19 +253,14 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
IXGBE_WRITE_FLUSH(hw);
+ /* Disable VMDq flag so device will be set in VM mode */
+ if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+ adapter->ring_feature[RING_F_VMDQ].offset = 0;
+
/* take a breather then clean up driver data */
msleep(100);
- /* Release reference to VF devices */
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].vfdev)
- pci_dev_put(adapter->vfinfo[i].vfdev);
- }
- kfree(adapter->vfinfo);
- kfree(adapter->mv_list);
- adapter->vfinfo = NULL;
-
- adapter->num_vfs = 0;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
@@ -441,33 +494,16 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
return 0;
}
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_PCI_IOV
- int i;
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].vfdev->dev_flags &
- PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-#endif
- return false;
-}
-
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
- struct pci_dev *pvfdev;
- unsigned int device_id;
- u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
- (pdev->devfn & 1);
bool enable = ((event_mask & 0x10000000U) != 0);
if (enable) {
- random_ether_addr(vf_mac_addr);
+ eth_random_addr(vf_mac_addr);
e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
vfn, vf_mac_addr);
/*
@@ -475,31 +511,6 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
* for it later.
*/
memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- device_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- device_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- device_id = 0;
- break;
- }
-
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == thisvf_devfn)
- break;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
- device_id, pvfdev);
- }
- if (pvfdev)
- adapter->vfinfo[vfn].vfdev = pvfdev;
- else
- e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
- thisvf_devfn);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 2ab38d5..1be1d30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -42,7 +42,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 1d80b1c..16ddf14 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -37,7 +37,6 @@
#include <linux/netdevice.h>
#include <linux/hwmon.h>
-#ifdef CONFIG_IXGBE_HWMON
/* hwmon callback functions */
static ssize_t ixgbe_hwmon_show_location(struct device *dev,
struct device_attribute *attr,
@@ -96,11 +95,11 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
return sprintf(buf, "%u\n", value);
}
-/*
+/**
* ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
- * @ adapter: pointer to the adapter structure
- * @ offset: offset in the eeprom sensor data table
- * @ type: type of sensor data to display
+ * @adapter: pointer to the adapter structure
+ * @offset: offset in the eeprom sensor data table
+ * @type: type of sensor data to display
*
* For each file we want in hwmon's sysfs interface we need a device_attribute
* This is included in our hwmon_attr struct that contains the references to
@@ -241,5 +240,4 @@ err:
exit:
return rc;
}
-#endif /* CONFIG_IXGBE_HWMON */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 204848d..400f86a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -32,9 +32,6 @@
#include <linux/mdio.h>
#include <linux/netdevice.h>
-/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID 0x8086
-
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598_BX 0x1508
@@ -57,6 +54,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
@@ -1452,6 +1450,7 @@ enum {
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
@@ -2419,7 +2418,7 @@ typedef u32 ixgbe_physical_layer;
*/
/* BitTimes (BT) conversion */
-#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
#define IXGBE_B2BT(BT) (BT * 8)
/* Calculate Delay to respond to PFC */
@@ -2450,24 +2449,31 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PCI_DELAY 10000
/* Calculate X540 delay value in bit times */
-#define IXGBE_FILL_RATE (36 / 25)
-
-#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + \
- (2 * IXGBE_ID_X540) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate 82599, 82598 delay value in bit times */
-#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate low threshold delay values */
-#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
- (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
-#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+ (2 * IXGBE_B2BT(_max_frame_tc) + \
+ (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+ (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
@@ -2597,6 +2603,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_1g_cu_core0 = 9,
ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_1g_sx_core0 = 11,
+ ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2837,6 +2845,7 @@ struct ixgbe_mac_operations {
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
s32 (*clear_rar)(struct ixgbe_hw *, u32);
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
@@ -2912,6 +2921,7 @@ struct ixgbe_mac_info {
bool orig_link_settings_stored;
bool autotry_restart;
u8 flags;
+ u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f90ec07..de4da52 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -156,6 +156,9 @@ mac_reset_top:
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
@@ -832,6 +835,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
.set_vmdq = &ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
.clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index e09a6cc..418af82 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -251,6 +251,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
@@ -264,32 +265,9 @@ struct ixgbe_adv_tx_context_desc {
/* Interrupt register bitmasks */
-/* Extended Interrupt Cause Read */
-#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
-#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
-
-/* Extended Interrupt Cause Set */
-#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Set */
-#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Clear */
-#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_MAILBOX | \
- IXGBE_EIMS_OTHER)
-
#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
/* Error Codes */
#define IXGBE_ERR_INVALID_MAC_ADDR -1
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e8dddf5..8f20704 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -43,7 +43,6 @@
#define IXGBE_ALL_RAR_ENTRIES 16
-#ifdef ETHTOOL_GSTATS
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -75,21 +74,17 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
zero_base)},
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
zero_base)},
- {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
};
#define IXGBE_QUEUE_STATS_LEN 0
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
-#endif /* ETHTOOL_GSTATS */
-#ifdef ETHTOOL_TEST
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-#endif /* ETHTOOL_TEST */
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@@ -289,13 +284,11 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbevf_ring *tx_ring = adapter->tx_ring;
- struct ixgbevf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IXGBEVF_MAX_RXD;
ring->tx_max_pending = IXGBEVF_MAX_TXD;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
@@ -303,33 +296,28 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
- int i, err = 0;
u32 new_rx_count, new_tx_count;
+ int i, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
+ new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
- /* nothing to do */
+ new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
+ new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count))
return 0;
- }
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
- /*
- * If the adapter isn't up and running then just set the
- * new parameters and scurry for the exits.
- */
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count;
@@ -340,82 +328,98 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
- tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!tx_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring) {
- err = -ENOMEM;
- goto err_rx_setup;
- }
-
- ixgbevf_down(adapter);
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
- memcpy(tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (err) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ /* clone ring and setup updated count */
+ tx_ring[i] = adapter->tx_ring[i];
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
+ if (!err)
+ continue;
while (i) {
i--;
- ixgbevf_free_tx_resources(adapter,
- &tx_ring[i]);
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
}
- goto err_tx_ring_setup;
+
+ vfree(tx_ring);
+ tx_ring = NULL;
+
+ goto clear_reset;
}
- tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
}
- memcpy(rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
+ if (!rx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ /* clone ring and setup updated count */
+ rx_ring[i] = adapter->rx_ring[i];
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (!err)
+ continue;
while (i) {
i--;
- ixgbevf_free_rx_resources(adapter,
- &rx_ring[i]);
+ ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
}
- goto err_rx_ring_setup;
+
+ vfree(rx_ring);
+ rx_ring = NULL;
+
+ goto clear_reset;
}
- rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
}
- /*
- * Only switch to new rings if all the prior allocations
- * and ring setups have succeeded.
- */
- kfree(adapter->tx_ring);
- adapter->tx_ring = tx_ring;
- adapter->tx_ring_count = new_tx_count;
-
- kfree(adapter->rx_ring);
- adapter->rx_ring = rx_ring;
- adapter->rx_ring_count = new_rx_count;
+ /* bring interface down to prepare for update */
+ ixgbevf_down(adapter);
- /* success! */
- ixgbevf_up(adapter);
+ /* Tx */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+ adapter->tx_ring[i] = tx_ring[i];
+ }
+ adapter->tx_ring_count = new_tx_count;
- goto clear_reset;
+ vfree(tx_ring);
+ tx_ring = NULL;
+ }
-err_rx_ring_setup:
- for(i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ /* Rx */
+ if (rx_ring) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+ adapter->rx_ring[i] = rx_ring[i];
+ }
+ adapter->rx_ring_count = new_rx_count;
-err_tx_ring_setup:
- kfree(rx_ring);
+ vfree(rx_ring);
+ rx_ring = NULL;
+ }
-err_rx_setup:
- kfree(tx_ring);
+ /* restore interface using new values */
+ ixgbevf_up(adapter);
clear_reset:
+ /* free Tx resources if Rx error is encountered */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ vfree(tx_ring);
+ }
+
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
return err;
}
@@ -674,10 +678,8 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev)) {
- if (!adapter->dev_closed)
- ixgbevf_reinit_locked(adapter);
- }
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 0a1b992..98cadb0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -52,12 +52,12 @@ struct ixgbevf_tx_buffer {
struct ixgbevf_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
- dma_addr_t page_dma;
- unsigned int page_offset;
};
struct ixgbevf_ring {
+ struct ixgbevf_ring *next;
+ struct net_device *netdev;
+ struct device *dev;
struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -83,29 +83,9 @@ struct ixgbevf_ring {
* offset associated with this ring, which is different
* for DCB and RSS modes */
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
- /* cpu for tx queue */
- int cpu;
-#endif
-
- u64 v_idx; /* maps directly to the index for this ring in the hardware
- * vector array, can also be used for finding the bit in EICR
- * and friends that represents the vector for this ring */
-
- u16 work_limit; /* max work per interrupt */
u16 rx_buf_len;
};
-enum ixgbevf_ring_f_enum {
- RING_F_NONE = 0,
- RING_F_ARRAY_SIZE /* must be last in enum set */
-};
-
-struct ixgbevf_ring_feature {
- int indices;
- int mask;
-};
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -120,8 +100,6 @@ struct ixgbevf_ring_feature {
#define IXGBEVF_MIN_RXD 64
/* Supported Rx Buffer Sizes */
-#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
@@ -140,22 +118,42 @@ struct ixgbevf_ring_feature {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+struct ixgbevf_ring_container {
+ struct ixgbevf_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+/* iterator for handling rings in ring container */
+#define ixgbevf_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbevf_q_vector {
struct ixgbevf_adapter *adapter;
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring */
+ u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi;
- DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
- DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
- u8 rxr_count; /* Rx ring count assigned to this vector */
- u8 txr_count; /* Tx ring count assigned to this vector */
- u8 tx_itr;
- u8 rx_itr;
- u32 eitr;
- int v_idx; /* vector index in list */
+ struct ixgbevf_ring_container rx, tx;
+ char name[IFNAMSIZ + 9];
};
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define IXGBE_MIN_RSC_ITR 24
+#define IXGBE_100K_ITR 40
+#define IXGBE_20K_ITR 200
+#define IXGBE_10K_ITR 400
+#define IXGBE_8K_ITR 500
+
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways. The lowest value
* supported by all of the ixgbe hardware is 8.
@@ -168,12 +166,12 @@ struct ixgbevf_q_vector {
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define IXGBE_RX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
-#define IXGBE_TX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
-#define IXGBE_TX_CTXTDESC_ADV(R, i) \
- (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+#define IXGBEVF_RX_DESC(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_DESC(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_CTXTDESC(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
@@ -181,9 +179,8 @@ struct ixgbevf_q_vector {
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_Q_VECTORS 2
-#define MAX_MSIX_COUNT 2
-#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */
@@ -193,12 +190,14 @@ struct ixgbevf_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
/* Interrupt Throttle Rate */
- u32 itr_setting;
- u16 eitr_low;
- u16 eitr_high;
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
+ /* interrupt masks */
+ u32 eims_enable_mask;
+ u32 eims_other;
/* TX */
struct ixgbevf_ring *tx_ring; /* One per active queue */
@@ -213,18 +212,13 @@ struct ixgbevf_adapter {
/* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
- int num_rx_pools; /* == num_rx_queues in 82598 */
- int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
- struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -232,15 +226,8 @@ struct ixgbevf_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -254,18 +241,16 @@ struct ixgbevf_adapter {
u32 eitr_param;
unsigned long state;
- u32 *config_space;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
bool link_up;
- unsigned long link_check_timeout;
struct work_struct watchdog_task;
- bool netdev_registered;
- bool dev_closed;
+
+ spinlock_t mbx_lock;
};
enum ixbgevf_state_t {
@@ -301,11 +286,8 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-
-#ifdef ETHTOOL_OPS_COMPAT
extern int ethtool_ioctl(struct ifreq *ifr);
-#endif
extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f69ec42..3f9841d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -42,6 +42,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/checksum.h>
@@ -97,9 +98,7 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
- u32 itr_reg);
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
struct ixgbevf_ring *rx_ring,
@@ -115,7 +114,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
}
-/*
+/**
* ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -146,18 +145,18 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
}
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer
*tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_page(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
else
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
@@ -175,38 +174,34 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#ifdef MAX_SKB_FRAGS
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-#else
-#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
-#endif
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
static void ixgbevf_tx_timeout(struct net_device *netdev);
/**
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: board private structure
* @tx_ring: tx ring to clean
**/
-static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring)
{
- struct net_device *netdev = adapter->netdev;
- struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return true;
+
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->work_limit)) {
+ (count < tx_ring->count)) {
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
/* eop could change between read and DD-check */
@@ -214,7 +209,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
goto cont_loop;
for ( ; !cleaned; count++) {
struct sk_buff *skb;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
cleaned = (i == eop);
skb = tx_buffer_info->skb;
@@ -231,7 +226,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
total_bytes += bytecount;
}
- ixgbevf_unmap_and_free_tx_resource(adapter,
+ ixgbevf_unmap_and_free_tx_resource(tx_ring,
tx_buffer_info);
tx_desc->wb.status = 0;
@@ -243,37 +238,25 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
cont_loop:
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
}
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(netdev) &&
+ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
-#ifdef HAVE_TX_MQ
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
- !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
- }
-#else
- if (netif_queue_stopped(netdev) &&
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- netif_wake_queue(netdev);
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
++adapter->restart_queue;
}
-#endif
- }
-
- /* re-arm the interrupt */
- if ((count >= tx_ring->work_limit) &&
- (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
- IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
}
u64_stats_update_begin(&tx_ring->syncp);
@@ -281,7 +264,7 @@ cont_loop:
tx_ring->total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
- return count < tx_ring->work_limit;
+ return count < tx_ring->count;
}
/**
@@ -301,13 +284,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- if (is_vlan && test_bit(tag, adapter->active_vlans))
+ if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- napi_gro_receive(&q_vector->napi, skb);
- else
- netif_rx(skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -317,12 +297,13 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
* @skb: skb currently being received and modified
**/
static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring,
u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
/* Rx csum disabled */
- if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
/* if IP and error */
@@ -357,52 +338,21 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
struct sk_buff *skb;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+ unsigned int i = rx_ring->next_to_use;
- i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
-
- if (!bi->page_dma &&
- (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
skb = bi->skb;
if (!skb) {
- skb = netdev_alloc_skb(adapter->netdev,
- bufsz);
-
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- /*
- * Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
-
bi->skb = skb;
}
if (!bi->dma) {
@@ -410,14 +360,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- }
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
i++;
if (i == rx_ring->count)
@@ -428,36 +371,22 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
}
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
- u64 qmask)
+ u32 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
- mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-}
-
-static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
- int *work_done, int work_to_do)
+ int budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -466,36 +395,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb;
unsigned int i;
u32 len, staterr;
- u16 hdr_info;
- bool cleaned = false;
int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
- u32 upper_len = 0;
- if (*work_done >= work_to_do)
+ if (!budget)
break;
- (*work_done)++;
+ budget--;
rmb(); /* read descriptor and rx_buffer_info after status DD */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hdr_info & IXGBE_RXDADV_SPH)
- adapter->rx_hdr_split++;
- if (len > IXGBEVF_RX_HDR_SIZE)
- len = IXGBEVF_RX_HDR_SIZE;
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- } else {
- len = le16_to_cpu(rx_desc->wb.upper.length);
- }
- cleaned = true;
+ len = le16_to_cpu(rx_desc->wb.upper.length);
skb = rx_buffer_info->skb;
prefetch(skb->data - NET_IP_ALIGN);
rx_buffer_info->skb = NULL;
@@ -508,46 +422,19 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb_put(skb, len);
}
- if (upper_len) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page,
- rx_buffer_info->page_offset,
- upper_len);
-
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
- (page_count(rx_buffer_info->page) != 1))
- rx_buffer_info->page = NULL;
- else
- get_page(rx_buffer_info->page);
-
- skb->len += upper_len;
- skb->data_len += upper_len;
- skb->truesize += upper_len;
- }
-
i++;
if (i == rx_ring->count)
i = 0;
- next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
prefetch(next_rxd);
cleaned_count++;
next_buffer = &rx_ring->rx_buffer_info[i];
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_buffer_info->skb = next_buffer->skb;
- rx_buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- } else {
- skb->next = next_buffer->skb;
- skb->next->prev = skb;
- }
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
adapter->non_eop_descs++;
goto next_desc;
}
@@ -558,7 +445,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_rx_checksum(adapter, staterr, skb);
+ ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -573,7 +460,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (header_fixup_len < 14)
skb_push(skb, header_fixup_len);
}
- skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
@@ -605,95 +492,74 @@ next_desc:
rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
- return cleaned;
+ return !!budget;
}
/**
- * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * ixgbevf_poll - NAPI polling calback
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
+ * This function will clean more than one or more rings associated with a
+ * q_vector.
**/
-static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+static int ixgbevf_poll(struct napi_struct *napi, int budget)
{
struct ixgbevf_q_vector *q_vector =
container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *rx_ring = NULL;
- int work_done = 0;
- long r_idx;
+ struct ixgbevf_ring *ring;
+ int per_ring_budget;
+ bool clean_complete = true;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
- ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
-
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
- }
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget/q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+ /* all work done, exit the polling mode */
+ napi_complete(napi);
+ if (adapter->rx_itr_setting & 1)
+ ixgbevf_set_itr(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter,
+ 1 << q_vector->v_idx);
- return work_done;
+ return 0;
}
/**
- * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function will clean more than one rx queue associated with a
- * q_vector.
- **/
-static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
- struct ixgbevf_q_vector *q_vector =
- container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *rx_ring = NULL;
- int work_done = 0, i;
- long r_idx;
- u64 enable_mask = 0;
-
- /* attempt to distribute budget to each queue fairly, but don't allow
- * the budget to go below 1 because we'll exit polling */
- budget /= (q_vector->rxr_count ?: 1);
- budget = max(budget, 1);
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
- enable_mask |= rx_ring->v_idx;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
-#ifndef HAVE_NETDEV_NAPI_LIST
- if (!netif_running(adapter->netdev))
- work_done = 0;
-
-#endif
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable_queues(adapter, enable_mask);
- }
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
- return work_done;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
-
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -704,56 +570,49 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
{
struct ixgbevf_q_vector *q_vector;
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j, q_vectors, v_idx, r_idx;
- u32 mask;
+ int q_vectors, v_idx;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ adapter->eims_enable_mask = 0;
/*
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ struct ixgbevf_ring *ring;
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_set_bit(...) */
- r_idx = find_first_bit(q_vector->rxr_idx,
- adapter->num_rx_queues);
-
- for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
- ixgbevf_set_ivar(adapter, 0, j, v_idx);
- r_idx = find_next_bit(q_vector->rxr_idx,
- adapter->num_rx_queues,
- r_idx + 1);
- }
- r_idx = find_first_bit(q_vector->txr_idx,
- adapter->num_tx_queues);
-
- for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
- ixgbevf_set_ivar(adapter, 1, j, v_idx);
- r_idx = find_next_bit(q_vector->txr_idx,
- adapter->num_tx_queues,
- r_idx + 1);
+
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+ if (q_vector->tx.ring && !q_vector->rx.ring) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
}
- /* if this is a tx only vector halve the interrupt rate */
- if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = (adapter->eitr_param >> 1);
- else if (q_vector->rxr_count)
- /* rx only */
- q_vector->eitr = adapter->eitr_param;
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= 1 << v_idx;
- ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ ixgbevf_write_eitr(q_vector);
}
ixgbevf_set_ivar(adapter, -1, 1, v_idx);
-
- /* set up to autoclear timer, and the vectors */
- mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~IXGBE_EIMS_OTHER;
- IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+ /* setup eims_other and add value to global eims_enable_mask */
+ adapter->eims_other = 1 << v_idx;
+ adapter->eims_enable_mask |= adapter->eims_other;
}
enum latency_range {
@@ -765,11 +624,8 @@ enum latency_range {
/**
* ixgbevf_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @eitr: eitr setting (ints per sec) to give last timeslice
- * @itr_setting: current throttle rate in ints/second
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt
@@ -779,17 +635,17 @@ enum latency_range {
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring_container *ring_container)
{
- unsigned int retval = itr_setting;
+ int bytes = ring_container->total_bytes;
+ int packets = ring_container->total_packets;
u32 timepassed_us;
u64 bytes_perint;
+ u8 itr_setting = ring_container->itr;
if (packets == 0)
- goto update_itr_done;
-
+ return;
/* simple throttlerate management
* 0-20MB/s lowest (100000 ints/s)
@@ -797,134 +653,77 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
* 100-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
- timepassed_us = 1000000/eitr;
+ timepassed_us = q_vector->itr >> 2;
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
case lowest_latency:
- if (bytes_perint > adapter->eitr_low)
- retval = low_latency;
+ if (bytes_perint > 10)
+ itr_setting = low_latency;
break;
case low_latency:
- if (bytes_perint > adapter->eitr_high)
- retval = bulk_latency;
- else if (bytes_perint <= adapter->eitr_low)
- retval = lowest_latency;
+ if (bytes_perint > 20)
+ itr_setting = bulk_latency;
+ else if (bytes_perint <= 10)
+ itr_setting = lowest_latency;
break;
case bulk_latency:
- if (bytes_perint <= adapter->eitr_high)
- retval = low_latency;
+ if (bytes_perint <= 20)
+ itr_setting = low_latency;
break;
}
-update_itr_done:
- return retval;
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itr_setting;
}
-/**
- * ixgbevf_write_eitr - write VTEITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
- *
- * This function is made to be called by ethtool and by the driver
- * when it needs to update VTEITR registers at runtime. Hardware
- * specific quirks/differences are taken care of here.
- */
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
- u32 itr_reg)
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
{
- struct ixgbe_hw *hw = &adapter->hw;
-
- itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
-
- /*
- * set the WDIS bit to not clear the timer bits and cause an
- * immediate assertion of the interrupt
- */
- itr_reg |= IXGBE_EITR_CNT_WDIS;
+ u32 new_itr = q_vector->itr;
+ u8 current_itr;
- IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
-}
+ ixgbevf_update_itr(q_vector, &q_vector->tx);
+ ixgbevf_update_itr(q_vector, &q_vector->rx);
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
-{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- u32 new_itr;
- u8 current_itr, ret_itr;
- int i, r_idx, v_idx = q_vector->v_idx;
- struct ixgbevf_ring *rx_ring, *tx_ring;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
- ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
- q_vector->tx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
- q_vector->rx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
- new_itr = 100000;
+ new_itr = IXGBE_100K_ITR;
break;
case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
+ new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
default:
- new_itr = 8000;
+ new_itr = IXGBE_8K_ITR;
break;
}
- if (new_itr != q_vector->eitr) {
- u32 itr_reg;
-
- /* save the algorithm value here, not the smoothed one */
- q_vector->eitr = new_itr;
+ if (new_itr != q_vector->itr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
- itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
- ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ new_itr = (10 * new_itr * q_vector->itr) /
+ ((9 * new_itr) + q_vector->itr);
+
+ /* save the algorithm value here */
+ q_vector->itr = new_itr;
+
+ ixgbevf_write_eitr(q_vector);
}
}
static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
{
- struct net_device *netdev = data;
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
- u32 eicr;
u32 msg;
bool got_ack = false;
- eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
- IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
-
if (!hw->mbx.ops.check_for_ack(hw))
got_ack = true;
@@ -953,75 +752,24 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
if (got_ack)
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
-{
- struct ixgbevf_q_vector *q_vector = data;
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *tx_ring;
- int i, r_idx;
-
- if (!q_vector->txr_count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
- tx_ring->total_bytes = 0;
- tx_ring->total_packets = 0;
- ixgbevf_clean_tx_irq(adapter, tx_ring);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
return IRQ_HANDLED;
}
+
/**
- * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused
* @data: pointer to our q_vector struct for this interrupt vector
**/
-static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
{
struct ixgbevf_q_vector *q_vector = data;
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- int r_idx;
- int i;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- rx_ring->total_bytes = 0;
- rx_ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- if (!q_vector->rxr_count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
- /* disable interrupts on this vector only */
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
-{
- ixgbevf_msix_clean_rx(irq, data);
- ixgbevf_msix_clean_tx(irq, data);
+ /* EIAM disabled interrupts (on this vector) for us */
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -1031,9 +779,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- set_bit(r_idx, q_vector->rxr_idx);
- q_vector->rxr_count++;
- a->rx_ring[r_idx].v_idx = 1 << v_idx;
+ a->rx_ring[r_idx].next = q_vector->rx.ring;
+ q_vector->rx.ring = &a->rx_ring[r_idx];
+ q_vector->rx.count++;
}
static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
@@ -1041,9 +789,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- set_bit(t_idx, q_vector->txr_idx);
- q_vector->txr_count++;
- a->tx_ring[t_idx].v_idx = 1 << v_idx;
+ a->tx_ring[t_idx].next = q_vector->tx.ring;
+ q_vector->tx.ring = &a->tx_ring[t_idx];
+ q_vector->tx.count++;
}
/**
@@ -1119,37 +867,30 @@ out:
static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- irqreturn_t (*handler)(int, void *);
- int i, vector, q_vectors, err;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int vector, err;
int ri = 0, ti = 0;
- /* Decrement for Other and TCP Timer vectors */
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
- ? &ixgbevf_msix_clean_many : \
- (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
- (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
- NULL)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(adapter->q_vector[vector]);
-
- if (handler == &ixgbevf_msix_clean_rx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "rx", ri++);
- } else if (handler == &ixgbevf_msix_clean_tx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "tx", ti++);
- } else if (handler == &ixgbevf_msix_clean_many) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "TxRx", vector);
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "rx", ri++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "tx", ti++);
} else {
/* skip this unused q_vector */
continue;
}
- err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
+ q_vector->name, q_vector);
if (err) {
hw_dbg(&adapter->hw,
"request_irq failed for MSIX interrupt "
@@ -1158,9 +899,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
}
}
- sprintf(adapter->name[vector], "%s:mbx", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ &ixgbevf_msix_mbx, 0, netdev->name, adapter);
if (err) {
hw_dbg(&adapter->hw,
"request_irq for msix_mbx failed: %d\n", err);
@@ -1170,9 +910,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
return 0;
free_queue_irqs:
- for (i = vector - 1; i >= 0; i--)
- free_irq(adapter->msix_entries[--vector].vector,
- &(adapter->q_vector[i]));
+ while (vector) {
+ vector--;
+ free_irq(adapter->msix_entries[vector].vector,
+ adapter->q_vector[vector]);
+ }
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -1185,11 +927,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
for (i = 0; i < q_vectors; i++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
- bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
- bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
- q_vector->rxr_count = 0;
- q_vector->txr_count = 0;
- q_vector->eitr = adapter->eitr_param;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
}
}
@@ -1215,17 +956,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
int i, q_vectors;
q_vectors = adapter->num_msix_vectors;
-
i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, netdev);
+ free_irq(adapter->msix_entries[i].vector, adapter);
i--;
for (; i >= 0; i--) {
+ /* free only the irqs that were actually requested */
+ if (!adapter->q_vector[i]->rx.ring &&
+ !adapter->q_vector[i]->tx.ring)
+ continue;
+
free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]);
}
@@ -1239,10 +983,12 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
**/
static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
{
- int i;
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
IXGBE_WRITE_FLUSH(hw);
@@ -1254,23 +1000,13 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
* ixgbevf_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
- bool queues, bool flush)
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 mask;
- u64 qmask;
- mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
- qmask = ~0;
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
- if (queues)
- ixgbevf_irq_enable_queues(adapter, qmask);
-
- if (flush)
- IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
}
/**
@@ -1320,29 +1056,14 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN;
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- u16 bufsz = IXGBEVF_RXBUFFER_2048;
- /* grow the amount we can receive on large page machines */
- if (bufsz < (PAGE_SIZE / 2))
- bufsz = (PAGE_SIZE / 2);
- /* cap the bufsz at our largest descriptor size */
- bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
-
- srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- } else {
- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
- srrctl |= IXGBEVF_RXBUFFER_2048 >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= rx_ring->rx_buf_len >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- }
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
@@ -1362,36 +1083,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
u32 rdlen;
int rx_buf_len;
- /* Decide whether to use packet split mode or not */
- if (netdev->mtu > ETH_DATA_LEN) {
- if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- } else {
- if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- else
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- }
-
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- /* PSRTYPE must be initialized in 82599 */
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR |
- IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
- rx_buf_len = IXGBEVF_RX_HDR_SIZE;
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
- if (netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
- }
+ /* PSRTYPE must be initialized in 82599 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1418,9 +1115,14 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* add VID to filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
+
+ spin_unlock(&adapter->mbx_lock);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -1431,9 +1133,14 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* remove VID from filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -1488,11 +1195,15 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* reprogram multicast list */
if (hw->mac.ops.update_mc_addr_list)
hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
+
+ spin_unlock(&adapter->mbx_lock);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1502,15 +1213,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct napi_struct *napi;
q_vector = adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
- napi = &q_vector->napi;
- if (q_vector->rxr_count > 1)
- napi->poll = &ixgbevf_clean_rxonly_many;
-
- napi_enable(napi);
+ napi_enable(&q_vector->napi);
}
}
@@ -1522,8 +1226,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
napi_disable(&q_vector->napi);
}
}
@@ -1541,9 +1243,8 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
ixgbevf_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
- ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
- ring->next_to_use = ring->count - 1;
- writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ ixgbevf_alloc_rx_buffers(adapter, ring,
+ IXGBE_DESC_UNUSED(ring));
}
}
@@ -1647,6 +1348,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_configure_msix(adapter);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar) {
if (is_valid_ether_addr(hw->mac.addr))
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
@@ -1658,6 +1361,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mbx.ops.write_posted(hw, msg, 2);
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1667,10 +1372,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_save_reset_stats(adapter);
ixgbevf_init_last_counter_stats(adapter);
- /* bring the link up in the watchdog, this could race with our first
- * link up interrupt but shouldn't be a problem */
- adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
- adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
}
@@ -1685,7 +1386,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_VTEICR);
- ixgbevf_irq_enable(adapter, true, true);
+ ixgbevf_irq_enable(adapter);
}
/**
@@ -1723,14 +1424,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
dev_kfree_skb(this);
} while (skb);
}
- if (!rx_buffer_info->page)
- continue;
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- put_page(rx_buffer_info->page);
- rx_buffer_info->page = NULL;
- rx_buffer_info->page_offset = 0;
}
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -1767,7 +1460,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -1882,11 +1575,15 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n");
else
hw->mac.ops.init_hw(hw);
+ spin_unlock(&adapter->mbx_lock);
+
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
@@ -1900,10 +1597,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
{
int err, vector_threshold;
- /* We'll want at least 3 (vector_threshold):
- * 1) TxQ[0] Cleanup
- * 2) RxQ[0] Cleanup
- * 3) Other (Link Status Change, etc.)
+ /* We'll want at least 2 (vector_threshold):
+ * 1) TxQ[0] + RxQ[0] handler
+ * 2) Other (Link Status Change, etc.)
*/
vector_threshold = MIN_MSIX_COUNT;
@@ -1942,8 +1638,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
}
}
-/*
- * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
+/**
+ * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -1958,8 +1654,6 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
- adapter->num_rx_pools = adapter->num_rx_queues;
- adapter->num_rx_queues_per_pool = 1;
}
/**
@@ -1988,12 +1682,16 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
adapter->tx_ring[i].count = adapter->tx_ring_count;
adapter->tx_ring[i].queue_index = i;
adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i].dev = &adapter->pdev->dev;
+ adapter->tx_ring[i].netdev = adapter->netdev;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i].count = adapter->rx_ring_count;
adapter->rx_ring[i].queue_index = i;
adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i].dev = &adapter->pdev->dev;
+ adapter->rx_ring[i].netdev = adapter->netdev;
}
return 0;
@@ -2020,10 +1718,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ * (roughly) the same number of vectors as there are CPU's.
+ * The default is to use pairs of vectors.
*/
- v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ v_budget += NON_Q_VECTORS;
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
@@ -2054,12 +1754,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
{
int q_idx, num_q_vectors;
struct ixgbevf_q_vector *q_vector;
- int napi_vectors;
- int (*poll)(struct napi_struct *, int);
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
- poll = &ixgbevf_clean_rxonly;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
@@ -2067,10 +1763,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
goto err_out;
q_vector->adapter = adapter;
q_vector->v_idx = q_idx;
- q_vector->eitr = adapter->eitr_param;
- if (q_idx < napi_vectors)
- netif_napi_add(adapter->netdev, &q_vector->napi,
- (*poll), 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbevf_poll, 64);
adapter->q_vector[q_idx] = q_vector;
}
@@ -2216,21 +1910,17 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->netdev->addr_len);
}
- /* Enable dynamic interrupt throttling rates */
- adapter->eitr_param = 20000;
- adapter->itr_setting = 1;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
- /* set defaults for eitr in MegaBytes */
- adapter->eitr_low = 10;
- adapter->eitr_high = 20;
+ /* Enable dynamic interrupt throttling rates */
+ adapter->rx_itr_setting = 1;
+ adapter->tx_itr_setting = 1;
/* set default ring sizes */
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
- /* enable rx csum by default */
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
-
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -2290,7 +1980,7 @@ static void ixgbevf_watchdog(unsigned long data)
{
struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
struct ixgbe_hw *hw = &adapter->hw;
- u64 eics = 0;
+ u32 eics = 0;
int i;
/*
@@ -2304,11 +1994,11 @@ static void ixgbevf_watchdog(unsigned long data)
/* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
- if (qv->rxr_count || qv->txr_count)
- eics |= (1 << i);
+ if (qv->rx.ring || qv->tx.ring)
+ eics |= 1 << i;
}
- IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
watchdog_short_circuit:
schedule_work(&adapter->watchdog_task);
@@ -2362,8 +2052,16 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* no LSC interrupt
*/
if (hw->mac.ops.check_link) {
- if ((hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false)) != 0) {
+ s32 need_reset;
+
+ spin_lock(&adapter->mbx_lock);
+
+ need_reset = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
+ if (need_reset) {
adapter->link_up = link_up;
adapter->link_speed = link_speed;
netif_carrier_off(netdev);
@@ -2478,7 +2176,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->work_limit = tx_ring->count;
return 0;
err:
@@ -2682,7 +2379,7 @@ static int ixgbevf_open(struct net_device *netdev)
if (err)
goto err_req_irq;
- ixgbevf_irq_enable(adapter, true, true);
+ ixgbevf_irq_enable(adapter);
return 0;
@@ -2724,172 +2421,153 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
-static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl;
- u32 mss_l4len_idx, l4len;
+ u16 i = tx_ring->next_to_use;
- if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- adapter->hw_tso_ctxt++;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- adapter->hw_tso6_ctxt++;
- }
+ context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
- i = tx_ring->next_to_use;
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= ((skb_network_offset(skb)) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- vlan_macip_lens |=
- (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len +=
- (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->protocol == htons(ETH_P_IP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx =
- (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
- /* use index 1 for TSO */
- mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
- return true;
+static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
}
- return false;
+ /* compute header lengths */
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* mss_l4len_id: use 1 as index for TSO */
+ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return 1;
}
-static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
+static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
- struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |= (tx_flags &
- IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= (skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vlan_macip_lens |= (skb_transport_header(skb) -
- skb_network_header(skb));
-
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- case __constant_htons(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- pr_warn("partial checksum but "
- "proto=%x!\n", skb->protocol);
- }
- break;
- }
- }
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
- /* use index zero for tx checksum offload */
- context_desc->mss_l4len_idx = 0;
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
- adapter->hw_csum_tx_good++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4_hdr = 0;
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
- return true;
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ l4_hdr);
+ }
+ break;
+ }
}
- return false;
+ /* vlan_macip_lens: MACLEN, VLAN tag */
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
+static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
unsigned int first)
{
- struct pci_dev *pdev = adapter->pdev;
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
@@ -2908,12 +2586,11 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+ tx_buffer_info->dma = dma_map_single(tx_ring->dev,
skb->data + offset,
size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2938,12 +2615,12 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->dma =
- skb_frag_dma_map(&adapter->pdev->dev, frag,
+ skb_frag_dma_map(tx_ring->dev, frag,
offset, size, DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(tx_ring->dev,
+ tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2964,15 +2641,15 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
+ tx_ring->tx_buffer_info[first].time_stamp = jiffies;
return count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0;
- tx_buffer_info->time_stamp = 0;
tx_buffer_info->next_to_watch = 0;
count--;
@@ -2983,14 +2660,13 @@ dma_error:
if (i < 0)
i += tx_ring->count;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return count;
}
-static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring, int tx_flags,
+static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -3007,28 +2683,31 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+
if (tx_flags & IXGBE_TX_FLAGS_TSO) {
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
-
/* use index 1 context for tso */
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
+
+ }
- } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ /*
+ * Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ olinfo_status |= IXGBE_ADVTXD_CC;
olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
i = tx_ring->next_to_use;
while (count--) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
tx_desc->read.cmd_type_len =
cpu_to_le32(cmd_type_len | tx_buffer_info->length);
@@ -3040,24 +2719,14 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
}
-static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
- struct ixgbevf_ring *tx_ring, int size)
+static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
@@ -3069,17 +2738,16 @@ static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(netdev, tx_ring->queue_index);
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++adapter->restart_queue;
return 0;
}
-static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
- struct ixgbevf_ring *tx_ring, int size)
+static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
- return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+ return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -3090,54 +2758,66 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_flags = 0;
u8 hdr_len = 0;
int r_idx = 0, tso;
- int count = 0;
-
- unsigned int f;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
tx_ring = &adapter->tx_ring[r_idx];
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- /* four things can cause us to need a context descriptor */
- if (skb_is_gso(skb) ||
- (skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN))
- count++;
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
-
- if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
- adapter->tx_busy++;
- return NETDEV_TX_BUSY;
- }
-
first = tx_ring->next_to_use;
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
+ else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
- ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
- ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ ixgbevf_tx_queue(tx_ring, tx_flags,
+ ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
skb->len, hdr_len);
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
- ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+ ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
}
@@ -3161,9 +2841,13 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar)
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ spin_unlock(&adapter->mbx_lock);
+
return 0;
}
@@ -3220,9 +2904,7 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_free_all_rx_resources(adapter);
}
-#ifdef CONFIG_PM
pci_save_state(pdev);
-#endif
pci_disable_device(pdev);
}
@@ -3265,19 +2947,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static int ixgbevf_set_features(struct net_device *netdev,
- netdev_features_t features)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- if (features & NETIF_F_RXCSUM)
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
-
- return 0;
-}
-
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -3290,7 +2959,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
- .ndo_set_features = ixgbevf_set_features,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3350,12 +3018,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
pci_set_master(pdev);
-#ifdef HAVE_TX_MQ
netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
MAX_TX_QUEUES);
-#else
- netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
-#endif
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -3396,10 +3060,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
- adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
-
/* setup the private structure */
err = ixgbevf_sw_init(adapter);
if (err)
@@ -3458,8 +3118,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- adapter->netdev_registered = true;
-
netif_carrier_off(netdev);
ixgbevf_init_last_counter_stats(adapter);
@@ -3469,8 +3127,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "LRO is disabled\n");
-
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
return 0;
@@ -3510,10 +3166,8 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
- if (adapter->netdev_registered) {
+ if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- adapter->netdev_registered = false;
- }
ixgbevf_reset_interrupt_capability(adapter);
@@ -3530,12 +3184,92 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/**
+ * ixgbevf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgbevf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the ixgbevf_resume routine.
+ */
+static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+
+ ixgbevf_reset(adapter);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgbevf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the ixgbevf_resume routine.
+ */
+static void ixgbevf_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+ netif_device_attach(netdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers ixgbevf_err_handler = {
+ .error_detected = ixgbevf_io_error_detected,
+ .slot_reset = ixgbevf_io_slot_reset,
+ .resume = ixgbevf_io_resume,
+};
+
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = __devexit_p(ixgbevf_remove),
.shutdown = ixgbevf_shutdown,
+ .err_handler = &ixgbevf_err_handler
};
/**
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 4ea6580..c911d88 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2743,6 +2743,17 @@ jme_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void jme_netpoll(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ jme_intr(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
static int
jme_nway_reset(struct net_device *netdev)
{
@@ -2944,6 +2955,9 @@ static const struct net_device_ops jme_netdev_ops = {
.ndo_tx_timeout = jme_tx_timeout,
.ndo_fix_features = jme_fix_features,
.ndo_set_features = jme_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = jme_netpoll,
+#endif
};
static int __devinit
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 5dc9cbd..003c5bc 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -149,7 +149,6 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
spin_unlock_irqrestore(&priv->lock, flags);
skb_put(skb, len);
- skb->dev = ch->netdev;
skb->protocol = eth_type_trans(skb, ch->netdev);
netif_receive_skb(skb);
}
@@ -646,7 +645,7 @@ ltq_etop_init(struct net_device *dev)
memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
if (!is_valid_ether_addr(mac.sa_data)) {
pr_warn("etop: invalid MAC, using random\n");
- random_ether_addr(mac.sa_data);
+ eth_random_addr(mac.sa_data);
random_mac = true;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 04d901d..770ee55 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -436,7 +436,9 @@ struct mv643xx_eth_private {
/*
* Hardware-specific parameters.
*/
+#if defined(CONFIG_HAVE_CLK)
struct clk *clk;
+#endif
unsigned int t_clk;
};
@@ -1894,7 +1896,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
goto out_free;
}
- rx_desc = (struct rx_desc *)rxq->rx_desc_area;
+ rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
int nexti;
@@ -1999,7 +2001,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
txq->tx_desc_area_size = size;
- tx_desc = (struct tx_desc *)txq->tx_desc_area;
+ tx_desc = txq->tx_desc_area;
for (i = 0; i < txq->tx_ring_size; i++) {
struct tx_desc *txd = tx_desc + i;
int nexti;
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev;
/*
- * Get the clk rate, if there is one, otherwise use the default.
+ * Start with a default rate, and if there is a clock, allow
+ * it to override the default.
*/
+ mp->t_clk = 133000000;
+#if defined(CONFIG_HAVE_CLK)
mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
if (!IS_ERR(mp->clk)) {
clk_prepare_enable(mp->clk);
mp->t_clk = clk_get_rate(mp->clk);
- } else {
- mp->t_clk = 133000000;
- printk(KERN_WARNING "Unable to get clock");
}
-
+#endif
set_params(mp, pd);
netif_set_real_num_tx_queues(dev, mp->txq_count);
netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
phy_detach(mp->phy);
cancel_work_sync(&mp->tx_timeout_task);
+#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(mp->clk)) {
clk_disable_unprepare(mp->clk);
clk_put(mp->clk);
}
+#endif
+
free_netdev(mp->dev);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1db023b..5948972 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1032,7 +1032,7 @@ static int rxq_init(struct net_device *dev)
}
memset((void *)pep->p_rx_desc_area, 0, size);
/* initialize the next_desc_ptr links in the Rx descriptors ring */
- p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
@@ -1095,7 +1095,7 @@ static int txq_init(struct net_device *dev)
}
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
- p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cace36f..2b0748d 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -141,6 +141,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
{ 0 }
};
@@ -3079,8 +3080,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id)
/* Reading this mask interrupts as side effect */
status = sky2_read32(hw, B0_Y2_SP_ISRC2);
- if (status == 0 || status == ~0)
+ if (status == 0 || status == ~0) {
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
return IRQ_NONE;
+ }
prefetch(&hw->st_le[hw->st_idx]);
@@ -3349,6 +3352,17 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
reg);
+ if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+ hw->chip_rev == CHIP_REV_YU_PRM_A0) {
+ /* change PHY Interrupt polarity to low active */
+ reg = sky2_read16(hw, GPHY_CTRL);
+ sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
+
+ /* adapt HW for low active PHY Interrupt */
+ reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
+ sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
+ }
+
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
@@ -4381,10 +4395,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
struct sky2_port *sky2 = netdev_priv(dev);
netdev_features_t changed = dev->features ^ features;
- if (changed & NETIF_F_RXCSUM) {
- bool on = features & NETIF_F_RXCSUM;
- sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
- on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ if ((changed & NETIF_F_RXCSUM) &&
+ !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
+ sky2_write32(sky2->hw,
+ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ (features & NETIF_F_RXCSUM)
+ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
if (changed & NETIF_F_RXHASH)
@@ -4869,7 +4885,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
"UL 2", /* 0xba */
"Unknown", /* 0xbb */
"Optima", /* 0xbc */
- "Optima Prime", /* 0xbd */
+ "OptimaEEE", /* 0xbd */
"Optima 2", /* 0xbe */
};
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 3c896ce..615ac63 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -23,6 +23,7 @@ enum {
PSM_CONFIG_REG3 = 0x164,
PSM_CONFIG_REG4 = 0x168,
+ PCI_LDO_CTRL = 0xbc,
};
/* Yukon-2 */
@@ -586,6 +587,10 @@ enum yukon_supr_rev {
CHIP_REV_YU_SU_B1 = 3,
};
+enum yukon_prm_rev {
+ CHIP_REV_YU_PRM_Z1 = 1,
+ CHIP_REV_YU_PRM_A0 = 2,
+};
/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1bcead1..7e94987d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = NULL
+ .wrapper = mlx4_QUERY_FW_wrapper
},
{
.opcode = MLX4_CMD_QUERY_HCA,
@@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = {
.out_is_imm = false,
.encode_slave_id = false,
.verify = NULL,
- .wrapper = NULL
+ .wrapper = mlx4_QUERY_DEV_CAP_wrapper
},
{
.opcode = MLX4_CMD_QUERY_FUNC_CAP,
@@ -1080,6 +1080,25 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = NULL
},
+ /* flow steering commands */
+ {
+ .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_QP_FLOW_STEERING_DETACH,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
+ },
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 908a460..aa9c2f6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -77,6 +77,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
char name[25];
+ struct cpu_rmap *rmap =
+#ifdef CONFIG_RFS_ACCEL
+ priv->dev->rx_cpu_rmap;
+#else
+ NULL;
+#endif
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -91,7 +97,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
sprintf(name, "%s-%d", priv->dev->name,
cq->ring);
/* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
+ if (mlx4_assign_eq(mdev->dev, name, rmap,
+ &cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors;
mlx4_warn(mdev, "Failed Assigning an EQ to "
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 72901ce..9d0b88e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -34,10 +34,14 @@
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
+#include <linux/mlx4/driver.h>
#include "mlx4_en.h"
#include "en_port.h"
+#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
+#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
+#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
@@ -599,16 +603,369 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
return err;
}
+#define all_zeros_or_all_ones(field) \
+ ((field) == 0 || (field) == (__force typeof(field))-1)
+
+static int mlx4_en_validate_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethhdr *eth_mask;
+ u64 full_mac = ~0ull;
+ u64 zero_mac = 0;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ if (cmd->fs.m_u.tcp_ip4_spec.tos)
+ return -EINVAL;
+ l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
+ /* don't allow mask which isn't all 0 or 1 */
+ if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
+ !all_zeros_or_all_ones(l4_mask->ip4dst) ||
+ !all_zeros_or_all_ones(l4_mask->psrc) ||
+ !all_zeros_or_all_ones(l4_mask->pdst))
+ return -EINVAL;
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &cmd->fs.m_u.usr_ip4_spec;
+ if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
+ cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
+ (!l3_mask->ip4src && !l3_mask->ip4dst) ||
+ !all_zeros_or_all_ones(l3_mask->ip4src) ||
+ !all_zeros_or_all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ break;
+ case ETHER_FLOW:
+ eth_mask = &cmd->fs.m_u.ether_spec;
+ /* source mac mask must not be set */
+ if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
+ return -EINVAL;
+
+ /* dest mac mask must be ff:ff:ff:ff:ff:ff */
+ if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
+ return -EINVAL;
+
+ if (!all_zeros_or_all_ones(eth_mask->h_proto))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((cmd->fs.flow_type & FLOW_EXT)) {
+ if (cmd->fs.m_ext.vlan_etype ||
+ !(cmd->fs.m_ext.vlan_tci == 0 ||
+ cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int add_ip_rule(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h)
+{
+ struct mlx4_spec_list *spec_l3;
+ struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
+
+ spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
+ if (!spec_l3) {
+ en_err(priv, "Fail to alloc ethtool rule.\n");
+ return -ENOMEM;
+ }
+
+ spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
+ if (l3_mask->ip4src)
+ spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
+ if (l3_mask->ip4dst)
+ spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
+ list_add_tail(&spec_l3->list, list_h);
+
+ return 0;
+}
+
+static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h, int proto)
+{
+ struct mlx4_spec_list *spec_l3;
+ struct mlx4_spec_list *spec_l4;
+ struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
+
+ spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
+ spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
+ if (!spec_l4 || !spec_l3) {
+ en_err(priv, "Fail to alloc ethtool rule.\n");
+ kfree(spec_l3);
+ kfree(spec_l4);
+ return -ENOMEM;
+ }
+
+ spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
+
+ if (proto == TCP_V4_FLOW) {
+ spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
+ spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
+ spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
+ } else {
+ spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
+ spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
+ spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
+ }
+
+ if (l4_mask->ip4src)
+ spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
+ if (l4_mask->ip4dst)
+ spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
+
+ if (l4_mask->psrc)
+ spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
+ if (l4_mask->pdst)
+ spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
+
+ list_add_tail(&spec_l3->list, list_h);
+ list_add_tail(&spec_l4->list, list_h);
+
+ return 0;
+}
+
+static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h)
+{
+ int err;
+ u64 mac;
+ __be64 be_mac;
+ struct ethhdr *eth_spec;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_spec_list *spec_l2;
+ __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ err = mlx4_en_validate_flow(dev, cmd);
+ if (err)
+ return err;
+
+ spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
+ if (!spec_l2)
+ return -ENOMEM;
+
+ mac = priv->mac & MLX4_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+
+ spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
+ if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
+
+ if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
+ spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
+ spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
+ }
+
+ list_add_tail(&spec_l2->list, rule_list_h);
+
+ switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ eth_spec = &cmd->fs.h_u.ether_spec;
+ memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
+ spec_l2->eth.ether_type = eth_spec->h_proto;
+ if (eth_spec->h_proto)
+ spec_l2->eth.ether_type_enable = 1;
+ break;
+ case IP_USER_FLOW:
+ err = add_ip_rule(priv, cmd, rule_list_h);
+ break;
+ case TCP_V4_FLOW:
+ err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
+ break;
+ case UDP_V4_FLOW:
+ err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
+ break;
+ }
+
+ return err;
+}
+
+static int mlx4_en_flow_replace(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct ethtool_flow_id *loc_rule;
+ struct mlx4_spec_list *spec, *tmp_spec;
+ u32 qpn;
+ u64 reg_id;
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ };
+
+ rule.port = priv->port;
+ rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
+ INIT_LIST_HEAD(&rule.list);
+
+ /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
+ if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
+ qpn = priv->drop_qp.qpn;
+ else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
+ qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
+ } else {
+ if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
+ en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+ qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
+ if (!qpn) {
+ en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+ }
+ rule.qpn = qpn;
+ err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
+ if (err)
+ goto out_free_list;
+
+ loc_rule = &priv->ethtool_rules[cmd->fs.location];
+ if (loc_rule->id) {
+ err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
+ if (err) {
+ en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
+ cmd->fs.location, loc_rule->id);
+ goto out_free_list;
+ }
+ loc_rule->id = 0;
+ memset(&loc_rule->flow_spec, 0,
+ sizeof(struct ethtool_rx_flow_spec));
+ }
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
+ if (err) {
+ en_err(priv, "Fail to attach network rule at location %d.\n",
+ cmd->fs.location);
+ goto out_free_list;
+ }
+ loc_rule->id = reg_id;
+ memcpy(&loc_rule->flow_spec, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+out_free_list:
+ list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
+ list_del(&spec->list);
+ kfree(spec);
+ }
+ return err;
+}
+
+static int mlx4_en_flow_detach(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct ethtool_flow_id *rule;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->ethtool_rules[cmd->fs.location];
+ if (!rule->id) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ err = mlx4_flow_detach(priv->mdev->dev, rule->id);
+ if (err) {
+ en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
+ cmd->fs.location, rule->id);
+ goto out;
+ }
+ rule->id = 0;
+ memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
+out:
+ return err;
+
+}
+
+static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ int loc)
+{
+ int err = 0;
+ struct ethtool_flow_id *rule;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->ethtool_rules[loc];
+ if (rule->id)
+ memcpy(&cmd->fs, &rule->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ else
+ err = -ENOENT;
+
+ return err;
+}
+
+static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
+{
+
+ int i, res = 0;
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ if (priv->ethtool_rules[i].id)
+ res++;
+ }
+ return res;
+
+}
+
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
+ int i = 0, priority = 0;
+
+ if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
+ cmd->cmd == ETHTOOL_GRXCLSRULE ||
+ cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
+ mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EINVAL;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = priv->rx_ring_num;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = mlx4_en_get_num_flows(priv);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
+ err = mlx4_en_get_flow(dev, cmd, i);
+ if (!err)
+ rule_locs[priority++] = i;
+ i++;
+ }
+ err = 0;
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -617,6 +974,30 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
+static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EINVAL;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx4_en_flow_replace(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx4_en_flow_detach(dev, cmd);
+ break;
+ default:
+ en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -637,6 +1018,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
+ .set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_indir = mlx4_en_get_rxfh_indir,
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 988b242..69ba572 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
struct mlx4_en_priv *priv;
- if (!mdev->pndev[port])
- return;
-
- priv = netdev_priv(mdev->pndev[port]);
switch (event) {
case MLX4_DEV_EVENT_PORT_UP:
case MLX4_DEV_EVENT_PORT_DOWN:
+ if (!mdev->pndev[port])
+ return;
+ priv = netdev_priv(mdev->pndev[port]);
/* To prevent races, we poll the link state in a separate
task rather than changing it here */
priv->link_state = event;
@@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
break;
default:
- mlx4_warn(mdev, "Unhandled event: %d\n", event);
+ if (port < 1 || port > dev->caps.num_ports ||
+ !mdev->pndev[port])
+ return;
+ mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 926d8aa..8864d8b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -36,6 +36,8 @@
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/hash.h>
+#include <net/ip.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -66,6 +68,299 @@ static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
+#ifdef CONFIG_RFS_ACCEL
+
+struct mlx4_en_filter {
+ struct list_head next;
+ struct work_struct work;
+
+ __be32 src_ip;
+ __be32 dst_ip;
+ __be16 src_port;
+ __be16 dst_port;
+
+ int rxq_index;
+ struct mlx4_en_priv *priv;
+ u32 flow_id; /* RFS infrastructure id */
+ int id; /* mlx4_en driver id */
+ u64 reg_id; /* Flow steering API id */
+ u8 activated; /* Used to prevent expiry before filter
+ * is attached
+ */
+ struct hlist_node filter_chain;
+};
+
+static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
+
+static void mlx4_en_filter_work(struct work_struct *work)
+{
+ struct mlx4_en_filter *filter = container_of(work,
+ struct mlx4_en_filter,
+ work);
+ struct mlx4_en_priv *priv = filter->priv;
+ struct mlx4_spec_list spec_tcp = {
+ .id = MLX4_NET_TRANS_RULE_ID_TCP,
+ {
+ .tcp_udp = {
+ .dst_port = filter->dst_port,
+ .dst_port_msk = (__force __be16)-1,
+ .src_port = filter->src_port,
+ .src_port_msk = (__force __be16)-1,
+ },
+ },
+ };
+ struct mlx4_spec_list spec_ip = {
+ .id = MLX4_NET_TRANS_RULE_ID_IPV4,
+ {
+ .ipv4 = {
+ .dst_ip = filter->dst_ip,
+ .dst_ip_msk = (__force __be32)-1,
+ .src_ip = filter->src_ip,
+ .src_ip_msk = (__force __be32)-1,
+ },
+ },
+ };
+ struct mlx4_spec_list spec_eth = {
+ .id = MLX4_NET_TRANS_RULE_ID_ETH,
+ };
+ struct mlx4_net_trans_rule rule = {
+ .list = LIST_HEAD_INIT(rule.list),
+ .queue_mode = MLX4_NET_TRANS_Q_LIFO,
+ .exclusive = 1,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .port = priv->port,
+ .priority = MLX4_DOMAIN_RFS,
+ };
+ int rc;
+ __be64 mac;
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ list_add_tail(&spec_eth.list, &rule.list);
+ list_add_tail(&spec_ip.list, &rule.list);
+ list_add_tail(&spec_tcp.list, &rule.list);
+
+ mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
+
+ rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
+ memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ filter->activated = 0;
+
+ if (filter->reg_id) {
+ rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
+ if (rc && rc != -ENOENT)
+ en_err(priv, "Error detaching flow. rc = %d\n", rc);
+ }
+
+ rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
+ if (rc)
+ en_err(priv, "Error attaching flow. err = %d\n", rc);
+
+ mlx4_en_filter_rfs_expire(priv);
+
+ filter->activated = 1;
+}
+
+static inline struct hlist_head *
+filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
+ __be16 src_port, __be16 dst_port)
+{
+ unsigned long l;
+ int bucket_idx;
+
+ l = (__force unsigned long)src_port |
+ ((__force unsigned long)dst_port << 2);
+ l ^= (__force unsigned long)(src_ip ^ dst_ip);
+
+ bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
+
+ return &priv->filter_hash[bucket_idx];
+}
+
+static struct mlx4_en_filter *
+mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
+ __be32 dst_ip, __be16 src_port, __be16 dst_port,
+ u32 flow_id)
+{
+ struct mlx4_en_filter *filter = NULL;
+
+ filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
+ if (!filter)
+ return NULL;
+
+ filter->priv = priv;
+ filter->rxq_index = rxq_index;
+ INIT_WORK(&filter->work, mlx4_en_filter_work);
+
+ filter->src_ip = src_ip;
+ filter->dst_ip = dst_ip;
+ filter->src_port = src_port;
+ filter->dst_port = dst_port;
+
+ filter->flow_id = flow_id;
+
+ filter->id = priv->last_filter_id++;
+
+ list_add_tail(&filter->next, &priv->filters);
+ hlist_add_head(&filter->filter_chain,
+ filter_hash_bucket(priv, src_ip, dst_ip, src_port,
+ dst_port));
+
+ return filter;
+}
+
+static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
+{
+ struct mlx4_en_priv *priv = filter->priv;
+ int rc;
+
+ list_del(&filter->next);
+
+ rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
+ if (rc && rc != -ENOENT)
+ en_err(priv, "Error detaching flow. rc = %d\n", rc);
+
+ kfree(filter);
+}
+
+static inline struct mlx4_en_filter *
+mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
+ __be16 src_port, __be16 dst_port)
+{
+ struct hlist_node *elem;
+ struct mlx4_en_filter *filter;
+ struct mlx4_en_filter *ret = NULL;
+
+ hlist_for_each_entry(filter, elem,
+ filter_hash_bucket(priv, src_ip, dst_ip,
+ src_port, dst_port),
+ filter_chain) {
+ if (filter->src_ip == src_ip &&
+ filter->dst_ip == dst_ip &&
+ filter->src_port == src_port &&
+ filter->dst_port == dst_port) {
+ ret = filter;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct mlx4_en_priv *priv = netdev_priv(net_dev);
+ struct mlx4_en_filter *filter;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ __be32 src_ip;
+ __be32 dst_ip;
+ __be16 src_port;
+ __be16 dst_port;
+ int nhoff = skb_network_offset(skb);
+ int ret = 0;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ src_ip = ip->saddr;
+ dst_ip = ip->daddr;
+ src_port = ports[0];
+ dst_port = ports[1];
+
+ if (ip->protocol != IPPROTO_TCP)
+ return -EPROTONOSUPPORT;
+
+ spin_lock_bh(&priv->filters_lock);
+ filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
+ if (filter) {
+ if (filter->rxq_index == rxq_index)
+ goto out;
+
+ filter->rxq_index = rxq_index;
+ } else {
+ filter = mlx4_en_filter_alloc(priv, rxq_index,
+ src_ip, dst_ip,
+ src_port, dst_port, flow_id);
+ if (!filter) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ queue_work(priv->mdev->workqueue, &filter->work);
+
+out:
+ ret = filter->id;
+err:
+ spin_unlock_bh(&priv->filters_lock);
+
+ return ret;
+}
+
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *rx_ring)
+{
+ struct mlx4_en_filter *filter, *tmp;
+ LIST_HEAD(del_list);
+
+ spin_lock_bh(&priv->filters_lock);
+ list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
+ list_move(&filter->next, &del_list);
+ hlist_del(&filter->filter_chain);
+ }
+ spin_unlock_bh(&priv->filters_lock);
+
+ list_for_each_entry_safe(filter, tmp, &del_list, next) {
+ cancel_work_sync(&filter->work);
+ mlx4_en_filter_free(filter);
+ }
+}
+
+static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
+ LIST_HEAD(del_list);
+ int i = 0;
+
+ spin_lock_bh(&priv->filters_lock);
+ list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
+ if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
+ break;
+
+ if (filter->activated &&
+ !work_pending(&filter->work) &&
+ rps_may_expire_flow(priv->dev,
+ filter->rxq_index, filter->flow_id,
+ filter->id)) {
+ list_move(&filter->next, &del_list);
+ hlist_del(&filter->filter_chain);
+ } else
+ last_filter = filter;
+
+ i++;
+ }
+
+ if (last_filter && (&last_filter->next != priv->filters.next))
+ list_move(&priv->filters, &last_filter->next);
+
+ spin_unlock_bh(&priv->filters_lock);
+
+ list_for_each_entry_safe(filter, tmp, &del_list, next)
+ mlx4_en_filter_free(filter);
+}
+#endif
+
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -170,33 +465,81 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_mc_list *tmp, *mc_to_del;
- kfree(priv->mc_addrs);
- priv->mc_addrs = NULL;
- priv->mc_addrs_cnt = 0;
+ list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
+ list_del(&mc_to_del->list);
+ kfree(mc_to_del);
+ }
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
- char *mc_addrs;
- int mc_addrs_cnt = netdev_mc_count(dev);
- int i;
+ struct mlx4_en_mc_list *tmp;
- mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
- if (!mc_addrs) {
- en_err(priv, "failed to allocate multicast list\n");
- return;
- }
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
mlx4_en_clear_list(dev);
- priv->mc_addrs = mc_addrs;
- priv->mc_addrs_cnt = mc_addrs_cnt;
+ netdev_for_each_mc_addr(ha, dev) {
+ tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
+ if (!tmp) {
+ en_err(priv, "failed to allocate multicast list\n");
+ mlx4_en_clear_list(dev);
+ return;
+ }
+ memcpy(tmp->addr, ha->addr, ETH_ALEN);
+ list_add_tail(&tmp->list, &priv->mc_list);
+ }
}
+static void update_mclist_flags(struct mlx4_en_priv *priv,
+ struct list_head *dst,
+ struct list_head *src)
+{
+ struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
+ bool found;
+
+ /* Find all the entries that should be removed from dst,
+ * These are the entries that are not found in src
+ */
+ list_for_each_entry(dst_tmp, dst, list) {
+ found = false;
+ list_for_each_entry(src_tmp, src, list) {
+ if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ dst_tmp->action = MCLIST_REM;
+ }
+
+ /* Add entries that exist in src but not in dst
+ * mark them as need to add
+ */
+ list_for_each_entry(src_tmp, src, list) {
+ found = false;
+ list_for_each_entry(dst_tmp, dst, list) {
+ if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ dst_tmp->action = MCLIST_NONE;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+ GFP_KERNEL);
+ if (!new_mc) {
+ en_err(priv, "Failed to allocate current multicast list\n");
+ return;
+ }
+ memcpy(new_mc, src_tmp,
+ sizeof(struct mlx4_en_mc_list));
+ new_mc->action = MCLIST_ADD;
+ list_add_tail(&new_mc->list, dst);
+ }
+ }
+}
static void mlx4_en_set_multicast(struct net_device *dev)
{
@@ -214,9 +557,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
+ struct mlx4_en_mc_list *mclist, *tmp;
u64 mcast_addr = 0;
u8 mc_list[16] = {0};
- int err;
+ int err = 0;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
@@ -251,16 +595,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
- if (!(mdev->dev->caps.flags &
- MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 1);
- else
- err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_add(mdev->dev,
+ priv->base_qpn,
priv->port);
- if (err)
- en_err(priv, "Failed enabling "
- "promiscuous mode\n");
+ if (err)
+ en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+ /* Add the default qp number as multicast
+ * promisc
+ */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed enabling multicast promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ 1);
+ if (err)
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ break;
+ }
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -269,15 +643,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
en_err(priv, "Failed disabling "
"multicast filter\n");
- /* Add the default qp number as multicast promisc */
- if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed entering multicast promisc mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- }
-
/* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
@@ -296,22 +661,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
- if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 0);
- else
- err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
priv->port);
- if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
+ if (err)
+ en_err(priv, "Failed disabling unicast promiscuous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
- /* Disable Multicast promisc */
- if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
- priv->port);
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn, 0);
if (err)
- en_err(priv, "Failed disabling multicast promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ break;
}
/* Enable port VLAN filter */
@@ -329,18 +712,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
/* Add the default qp number as multicast promisc */
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
- priv->port);
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ break;
+ }
if (err)
en_err(priv, "Failed entering multicast promisc mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
} else {
- int i;
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
- priv->port);
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ break;
+ }
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -351,13 +762,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (err)
en_err(priv, "Failed disabling multicast filter\n");
- /* Detach our qp from all the multicast addresses */
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
- mc_list[5] = priv->port;
- mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, MLX4_PROT_ETH);
- }
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1, MLX4_MCAST_CONFIG);
@@ -367,13 +771,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
netif_tx_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_tx_unlock_bh(dev);
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- mcast_addr =
- mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
- mc_list[5] = priv->port;
- mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, 0, MLX4_PROT_ETH);
+ list_for_each_entry(mclist, &priv->mc_list, list) {
+ mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -381,6 +780,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
0, MLX4_MCAST_ENABLE);
if (err)
en_err(priv, "Failed enabling multicast filter\n");
+
+ update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
+ list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
+ if (mclist->action == MCLIST_REM) {
+ /* detach this address and delete from list */
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
+ mc_list[5] = priv->port;
+ err = mlx4_multicast_detach(mdev->dev,
+ &priv->rss_map.indir_qp,
+ mc_list,
+ MLX4_PROT_ETH,
+ mclist->reg_id);
+ if (err)
+ en_err(priv, "Fail to detach multicast address\n");
+
+ /* remove from list */
+ list_del(&mclist->list);
+ kfree(mclist);
+ } else if (mclist->action == MCLIST_ADD) {
+ /* attach the address */
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
+ /* needed for B0 steering support */
+ mc_list[5] = priv->port;
+ err = mlx4_multicast_attach(mdev->dev,
+ &priv->rss_map.indir_qp,
+ mc_list,
+ priv->port, 0,
+ MLX4_PROT_ETH,
+ &mclist->reg_id);
+ if (err)
+ en_err(priv, "Fail to attach multicast address\n");
+
+ }
+ }
}
out:
mutex_unlock(&mdev->state_lock);
@@ -605,6 +1038,9 @@ int mlx4_en_start_port(struct net_device *dev)
return 0;
}
+ INIT_LIST_HEAD(&priv->mc_list);
+ INIT_LIST_HEAD(&priv->curr_list);
+
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
mlx4_en_calc_rx_buf(dev);
@@ -653,6 +1089,10 @@ int mlx4_en_start_port(struct net_device *dev)
goto mac_err;
}
+ err = mlx4_en_create_drop_qp(priv);
+ if (err)
+ goto rss_err;
+
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
@@ -720,13 +1160,23 @@ int mlx4_en_start_port(struct net_device *dev)
/* Attach rx QP to bradcast address */
memset(&mc_list[10], 0xff, ETH_ALEN);
- mc_list[5] = priv->port;
+ mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
- 0, MLX4_PROT_ETH))
+ priv->port, 0, MLX4_PROT_ETH,
+ &priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ }
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
@@ -742,7 +1192,8 @@ tx_err:
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
}
-
+ mlx4_en_destroy_drop_qp(priv);
+rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
@@ -760,6 +1211,7 @@ void mlx4_en_stop_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_mc_list *mclist, *tmp;
int i;
u8 mc_list[16] = {0};
@@ -778,19 +1230,26 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Detach All multicasts */
memset(&mc_list[10], 0xff, ETH_ALEN);
- mc_list[5] = priv->port;
+ mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
- MLX4_PROT_ETH);
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ MLX4_PROT_ETH, priv->broadcast_id);
+ list_for_each_entry(mclist, &priv->curr_list, list) {
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, MLX4_PROT_ETH);
+ mc_list, MLX4_PROT_ETH, mclist->reg_id);
}
mlx4_en_clear_list(dev);
+ list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
+ list_del(&mclist->list);
+ kfree(mclist);
+ }
+
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
+ mlx4_en_destroy_drop_qp(priv);
+
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -915,6 +1374,11 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
+ priv->dev->rx_cpu_rmap = NULL;
+#endif
+
for (i = 0; i < priv->tx_ring_num; i++) {
if (priv->tx_ring[i].tx_info)
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
@@ -929,15 +1393,20 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
if (priv->rx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
+
+ if (priv->base_tx_qpn) {
+ mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+ priv->base_tx_qpn = 0;
+ }
}
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
- int base_tx_qpn, err;
+ int err;
- err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
+ err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
if (err) {
en_err(priv, "failed reserving range for TX rings\n");
return err;
@@ -949,7 +1418,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
prof->tx_ring_size, i, TX))
goto err;
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
prof->tx_ring_size, TXBB_SIZE))
goto err;
}
@@ -965,11 +1434,19 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
goto err;
}
+#ifdef CONFIG_RFS_ACCEL
+ priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
+ if (!priv->dev->rx_cpu_rmap)
+ goto err;
+
+ INIT_LIST_HEAD(&priv->filters);
+ spin_lock_init(&priv->filters_lock);
+#endif
+
return 0;
err:
en_err(priv, "Failed to allocate NIC resources\n");
- mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
return -ENOMEM;
}
@@ -1073,6 +1550,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_setup_tc = mlx4_en_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx4_en_filter_rfs,
+#endif
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1190,6 +1670,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
NETIF_F_HW_VLAN_FILTER;
dev->hw_features |= NETIF_F_LOOPBACK;
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ dev->hw_features |= NETIF_F_NTUPLE;
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
@@ -1204,9 +1688,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
/* Configure port */
+ mlx4_en_calc_rx_buf(dev);
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
- MLX4_EN_MIN_MTU,
- 0, 0, 0, 0);
+ priv->rx_skb_size + ETH_FCS_LEN,
+ prof->tx_pause, prof->tx_ppp,
+ prof->rx_pause, prof->rx_ppp);
if (err) {
en_err(priv, "Failed setting port general configurations "
"for port %d, with error %d\n", priv->port, err);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d49a7ac..f32e703 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -41,41 +41,75 @@
#include "mlx4_en.h"
-
-static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
- struct mlx4_en_rx_alloc *ring_alloc,
- int i)
+static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct mlx4_en_rx_alloc *frags,
+ struct mlx4_en_rx_alloc *ring_alloc)
{
- struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
- struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
+ struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+ struct mlx4_en_frag_info *frag_info;
struct page *page;
dma_addr_t dma;
+ int i;
- if (page_alloc->offset == frag_info->last_offset) {
- /* Allocate new page */
- page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
- if (!page)
- return -ENOMEM;
-
- skb_frags[i].page = page_alloc->page;
- skb_frags[i].offset = page_alloc->offset;
- page_alloc->page = page;
- page_alloc->offset = frag_info->frag_align;
- } else {
- page = page_alloc->page;
- get_page(page);
+ for (i = 0; i < priv->num_frags; i++) {
+ frag_info = &priv->frag_info[i];
+ if (ring_alloc[i].offset == frag_info->last_offset) {
+ page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ MLX4_EN_ALLOC_ORDER);
+ if (!page)
+ goto out;
+ dma = dma_map_page(priv->ddev, page, 0,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(priv->ddev, dma)) {
+ put_page(page);
+ goto out;
+ }
+ page_alloc[i].page = page;
+ page_alloc[i].dma = dma;
+ page_alloc[i].offset = frag_info->frag_align;
+ } else {
+ page_alloc[i].page = ring_alloc[i].page;
+ get_page(ring_alloc[i].page);
+ page_alloc[i].dma = ring_alloc[i].dma;
+ page_alloc[i].offset = ring_alloc[i].offset +
+ frag_info->frag_stride;
+ }
+ }
- skb_frags[i].page = page;
- skb_frags[i].offset = page_alloc->offset;
- page_alloc->offset += frag_info->frag_stride;
+ for (i = 0; i < priv->num_frags; i++) {
+ frags[i] = ring_alloc[i];
+ dma = ring_alloc[i].dma + ring_alloc[i].offset;
+ ring_alloc[i] = page_alloc[i];
+ rx_desc->data[i].addr = cpu_to_be64(dma);
}
- dma = dma_map_single(priv->ddev, page_address(skb_frags[i].page) +
- skb_frags[i].offset, frag_info->frag_size,
- PCI_DMA_FROMDEVICE);
- rx_desc->data[i].addr = cpu_to_be64(dma);
+
return 0;
+
+
+out:
+ while (i--) {
+ frag_info = &priv->frag_info[i];
+ if (ring_alloc[i].offset == frag_info->last_offset)
+ dma_unmap_page(priv->ddev, page_alloc[i].dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ put_page(page_alloc[i].page);
+ }
+ return -ENOMEM;
+}
+
+static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frags,
+ int i)
+{
+ struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+
+ if (frags[i].offset == frag_info->last_offset) {
+ dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+ if (frags[i].page)
+ put_page(frags[i].page);
}
static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
@@ -91,6 +125,13 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
if (!page_alloc->page)
goto out;
+ page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(priv->ddev, page_alloc->dma)) {
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ goto out;
+ }
page_alloc->offset = priv->frag_info[i].frag_align;
en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
i, page_alloc->page);
@@ -100,6 +141,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
out:
while (i--) {
page_alloc = &ring->page_alloc[i];
+ dma_unmap_page(priv->ddev, page_alloc->dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc->page);
page_alloc->page = NULL;
}
@@ -117,24 +160,22 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
i, page_count(page_alloc->page));
+ dma_unmap_page(priv->ddev, page_alloc->dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc->page);
page_alloc->page = NULL;
}
}
-
static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
- struct skb_frag_struct *skb_frags = ring->rx_info +
- (index << priv->log_rx_info);
int possible_frags;
int i;
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
- skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
@@ -151,29 +192,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
}
}
-
static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
- struct page_frag *skb_frags = ring->rx_info +
- (index << priv->log_rx_info);
- int i;
+ struct mlx4_en_rx_alloc *frags = ring->rx_info +
+ (index << priv->log_rx_info);
- for (i = 0; i < priv->num_frags; i++)
- if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
- goto err;
-
- return 0;
-
-err:
- while (i--) {
- dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
- pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
- PCI_DMA_FROMDEVICE);
- put_page(skb_frags[i].page);
- }
- return -ENOMEM;
+ return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc);
}
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
@@ -185,20 +211,13 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
- struct page_frag *skb_frags;
- struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
- dma_addr_t dma;
+ struct mlx4_en_rx_alloc *frags;
int nr;
- skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- dma = be64_to_cpu(rx_desc->data[nr].addr);
-
- en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
- dma_unmap_single(priv->ddev, dma, skb_frags[nr].size,
- PCI_DMA_FROMDEVICE);
- put_page(skb_frags[nr].page);
+ mlx4_en_free_frag(priv, frags, nr);
}
}
@@ -268,10 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = -ENOMEM;
int tmp;
-
ring->prod = 0;
ring->cons = 0;
ring->size = size;
@@ -281,7 +299,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
- sizeof(struct skb_frag_struct));
+ sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vmalloc(tmp);
if (!ring->rx_info)
return -ENOMEM;
@@ -338,7 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
memset(ring->buf, 0, ring->buf_size);
mlx4_en_update_rx_prod_db(ring);
- /* Initailize all descriptors */
+ /* Initialize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
@@ -389,6 +407,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv, ring);
+#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -401,12 +422,10 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
}
-/* Unmap a completed descriptor and free unused pages */
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
+ struct mlx4_en_rx_alloc *frags,
struct sk_buff *skb,
- struct mlx4_en_rx_alloc *page_alloc,
int length)
{
struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
@@ -414,26 +433,24 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
int nr;
dma_addr_t dma;
- /* Collect used fragments while replacing them in the HW descirptors */
+ /* Collect used fragments while replacing them in the HW descriptors */
for (nr = 0; nr < priv->num_frags; nr++) {
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
+ if (!frags[nr].page)
+ goto fail;
- /* Save page reference in skb */
- __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
- skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
- skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
- skb->truesize += frag_info->frag_stride;
dma = be64_to_cpu(rx_desc->data[nr].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
+ DMA_FROM_DEVICE);
- /* Allocate a replacement page */
- if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
- goto fail;
-
- /* Unmap buffer */
- dma_unmap_single(priv->ddev, dma, skb_frag_size(&skb_frags_rx[nr]),
- PCI_DMA_FROMDEVICE);
+ /* Save page reference in skb */
+ get_page(frags[nr].page);
+ __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
+ skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
+ skb_frags_rx[nr].page_offset = frags[nr].offset;
+ skb->truesize += frag_info->frag_stride;
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
@@ -442,8 +459,6 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
return nr;
fail:
- /* Drop all accumulated fragments (which have already been replaced in
- * the descriptor) of this packet; remaining fragments are reused... */
while (nr > 0) {
nr--;
__skb_frag_unref(&skb_frags_rx[nr]);
@@ -454,8 +469,7 @@ fail:
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
- struct mlx4_en_rx_alloc *page_alloc,
+ struct mlx4_en_rx_alloc *frags,
unsigned int length)
{
struct sk_buff *skb;
@@ -473,23 +487,20 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(skb_frags[0].page) + skb_frags[0].offset;
+ va = page_address(frags[0].page) + frags[0].offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
- * synch buffers for the copy */
+ * sync buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(priv->ddev, dma, length,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
- dma_sync_single_for_device(priv->ddev, dma, length,
- DMA_FROM_DEVICE);
skb->tail += length;
} else {
-
/* Move relevant fragments to skb */
- used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
- skb, page_alloc, length);
+ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
+ skb, length);
if (unlikely(!used_frags)) {
kfree_skb(skb);
return NULL;
@@ -526,12 +537,25 @@ out_loopback:
dev_kfree_skb_any(skb);
}
+static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ int index = ring->prod & ring->size_mask;
+
+ while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
+ if (mlx4_en_prepare_rx_desc(priv, ring, index))
+ break;
+ ring->prod++;
+ index = ring->prod & ring->size_mask;
+ }
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- struct page_frag *skb_frags;
+ struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
@@ -540,6 +564,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int polled = 0;
int ip_summed;
struct ethhdr *ethh;
+ dma_addr_t dma;
u64 s_mac;
if (!priv->port_up)
@@ -555,7 +580,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
- skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ frags = ring->rx_info + (index << priv->log_rx_info);
rx_desc = ring->buf + (index << ring->log_stride);
/*
@@ -579,8 +604,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't skb yet and
* cast it to ethhdr struct */
- ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
- skb_frags[0].offset);
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
+ DMA_FROM_DEVICE);
+ ethh = (struct ethhdr *)(page_address(frags[0].page) +
+ frags[0].offset);
s_mac = mlx4_en_mac_to_u64(ethh->h_source);
/* If source MAC is equal to our own MAC and not performing
@@ -612,10 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (!gro_skb)
goto next;
- nr = mlx4_en_complete_rx_desc(
- priv, rx_desc,
- skb_frags, gro_skb,
- ring->page_alloc, length);
+ nr = mlx4_en_complete_rx_desc(priv,
+ rx_desc, frags, gro_skb,
+ length);
if (!nr)
goto next;
@@ -651,8 +678,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring->csum_none++;
}
- skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
- ring->page_alloc, length);
+ skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
priv->stats.rx_dropped++;
goto next;
@@ -678,6 +704,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
netif_receive_skb(skb);
next:
+ for (nr = 0; nr < priv->num_frags; nr++)
+ mlx4_en_free_frag(priv, frags, nr);
+
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = &cq->buf[index];
@@ -693,7 +722,7 @@ out:
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
- ring->prod += polled; /* Polled descriptors were realocated in place */
+ mlx4_en_refill_rx_buffers(priv, ring);
mlx4_en_update_rx_prod_db(ring);
return polled;
}
@@ -782,7 +811,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->num_frags = i;
priv->rx_skb_size = eff_mtu;
- priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
+ priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
"num_frags:%d):\n", eff_mtu, priv->num_frags);
@@ -844,6 +873,36 @@ out:
return err;
}
+int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
+{
+ int err;
+ u32 qpn;
+
+ err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
+ if (err) {
+ en_err(priv, "Failed reserving drop qpn\n");
+ return err;
+ }
+ err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
+ if (err) {
+ en_err(priv, "Failed allocating drop qp\n");
+ mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
+{
+ u32 qpn;
+
+ qpn = priv->drop_qp.qpn;
+ mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
+ mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
+ mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
+}
+
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
@@ -954,8 +1013,3 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
}
-
-
-
-
-
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3b6f8ef..cd48337 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -39,6 +39,7 @@
#include <linux/dma-mapping.h>
#include <linux/mlx4/cmd.h>
+#include <linux/cpu_rmap.h>
#include "mlx4.h"
#include "fw.h"
@@ -426,7 +427,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
- if (flr_slave > dev->num_slaves) {
+ if (flr_slave >= dev->num_slaves) {
mlx4_warn(dev,
"Got FLR for unknown function: %d\n",
flr_slave);
@@ -1060,7 +1061,8 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
}
EXPORT_SYMBOL(mlx4_test_interrupts);
-int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
+int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
+ int *vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1074,6 +1076,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
snprintf(priv->eq_table.irq_names +
vec * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE, "%s", name);
+#ifdef CONFIG_RFS_ACCEL
+ if (rmap) {
+ err = irq_cpu_rmap_add(rmap,
+ priv->eq_table.eq[vec].irq);
+ if (err)
+ mlx4_warn(dev, "Failed adding irq rmap\n");
+ }
+#endif
err = request_irq(priv->eq_table.eq[vec].irq,
mlx4_msi_x_interrupt, 0,
&priv->eq_table.irq_names[vec<<5],
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 68f5cd6..1d70657 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -123,7 +123,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
static const char * const fname[] = {
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
- [2] = "RSS XOR Hash Function support"
+ [2] = "RSS XOR Hash Function support",
+ [3] = "Device manage flow steering support"
};
int i;
@@ -391,6 +392,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
+#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
+#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -412,7 +415,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
- MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -474,6 +477,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->num_ports = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
dev_cap->max_msg_sz = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
+ dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
+ dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
@@ -590,8 +599,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
- MLX4_CMD_TIME_CLASS_B,
- !mlx4_is_slave(dev));
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -669,6 +677,28 @@ out:
return err;
}
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+ u8 field;
+
+ err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ /* For guests, report Blueflame disabled */
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
+ field &= 0x7f;
+ MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
+
+ return 0;
+}
+
int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -860,6 +890,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ if (mlx4_is_slave(dev))
+ goto out;
+
MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
dev->caps.function = lg;
@@ -927,6 +960,27 @@ out:
return err;
}
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u8 *outbuf;
+ int err;
+
+ outbuf = outbox->buf;
+ err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ /* for slaves, zero out everything except FW version */
+ outbuf[0] = outbuf[1] = 0;
+ memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
+ return 0;
+}
+
static void get_board_id(void *vsd, char *board_id)
{
int i;
@@ -1016,6 +1070,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
+#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
+#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
+#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
+#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
+#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
+#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
+#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
@@ -1074,14 +1137,44 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
- /* multicast attributes */
-
- MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
- MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
- MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ /* steering attributes */
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
+ cpu_to_be32(1 <<
+ INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
+
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz,
+ INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz,
+ INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ /* Enable Ethernet flow steering
+ * with udp unicast and tcp unicast
+ */
+ MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ INIT_HCA_FS_ETH_BITS_OFFSET);
+ MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
+ INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
+ /* Enable IPoIB flow steering
+ * with udp unicast and tcp unicast
+ */
+ MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ INIT_HCA_FS_IB_BITS_OFFSET);
+ MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
+ INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
+ } else {
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_hash_sz,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
+ MLX4_PUT(inbox, (u8) (1 << 3),
+ INIT_HCA_UC_STEERING_OFFSET);
+ }
/* TPT attributes */
@@ -1143,15 +1236,24 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
- /* multicast attributes */
+ /* steering attributes */
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
- MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
- MLX4_GET(param->log_mc_entry_sz, outbox,
- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_GET(param->log_mc_hash_sz, outbox,
- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- MLX4_GET(param->log_mc_table_sz, outbox,
- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ } else {
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ }
/* TPT attributes */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 64c0399..83fcbbf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -78,6 +78,8 @@ struct mlx4_dev_cap {
u16 wavelength[MLX4_MAX_PORTS + 1];
u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
+ int fs_log_max_ucast_qp_range_size;
+ int fs_max_num_qp_per_entry;
u64 flags;
u64 flags2;
int reserved_uars;
@@ -165,6 +167,7 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz;
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
+ u8 fs_hash_enable_bits;
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2e024a6..4264516 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
+#include <linux/netdevice.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -90,7 +91,9 @@ module_param_named(log_num_mgm_entry_size,
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:"
" 10 gives 248.range: 9<="
- " log_num_mgm_entry_size <= 12");
+ " log_num_mgm_entry_size <= 12."
+ " Not in use with device managed"
+ " flow steering");
#define MLX4_VF (1 << 0)
@@ -142,12 +145,6 @@ struct mlx4_port_config {
struct pci_dev *pdev;
};
-static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
-{
- return dev->caps.reserved_eqs +
- MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
-}
-
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -217,6 +214,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
dev->caps.num_ports = dev_cap->num_ports;
+ dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
@@ -248,7 +246,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -279,6 +276,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
+ } else {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
+ "set to use B0 steering. Falling back to A0 steering mode.\n");
+ }
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
+ }
+ mlx4_dbg(dev, "Steering mode is: %s\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode));
+
/* Sense port always allowed on supported devices for ConnectX1 and 2 */
if (dev->pdev->device != 0x1003)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -435,12 +454,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
memset(&dev_cap, 0, sizeof(dev_cap));
+ dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
return err;
}
+ err = mlx4_QUERY_FW(dev);
+ if (err)
+ mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+
page_size = ~dev->caps.page_size_cap + 1;
mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
if (page_size > PAGE_SIZE) {
@@ -485,15 +509,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.num_mgms = 0;
dev->caps.num_amgms = 0;
- for (i = 1; i <= dev->caps.num_ports; ++i)
- dev->caps.port_mask[i] = dev->caps.port_type[i];
-
if (dev->caps.num_ports > MLX4_MAX_PORTS) {
mlx4_err(dev, "HCA has %d ports, but we only support %d, "
"aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
+
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
pci_resource_len(dev->pdev, 2)) {
@@ -504,18 +528,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
-#if 0
- mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
- mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
- dev->caps.num_uars, dev->caps.reserved_uars,
- dev->caps.uar_page_size * dev->caps.num_uars,
- pci_resource_len(dev->pdev, 2));
- mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
- dev->caps.reserved_eqs);
- mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
- dev->caps.num_pds, dev->caps.reserved_pds,
- dev->caps.slave_pd_shift, dev->caps.pd_base);
-#endif
return 0;
}
@@ -810,9 +822,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
- num_eqs = (mlx4_is_master(dev)) ?
- roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
- dev->caps.num_eqs;
+ num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
@@ -874,9 +885,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
}
- num_eqs = (mlx4_is_master(dev)) ?
- roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
- dev->caps.num_eqs;
+ num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
@@ -981,9 +991,11 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
}
/*
- * It's not strictly required, but for simplicity just map the
- * whole multicast group table now. The table isn't very big
- * and it's a lot easier than trying to track ref counts.
+ * For flow steering device managed mode it is required to use
+ * mlx4_init_icm_table. For B0 steering mode it's not strictly
+ * required, but for simplicity just map the whole multicast
+ * group table now. The table isn't very big and it's a lot
+ * easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
init_hca->mc_base,
@@ -1219,7 +1231,26 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ priv->fs_hash_mode = MLX4_FS_L2_HASH;
+
+ switch (priv->fs_hash_mode) {
+ case MLX4_FS_L2_HASH:
+ init_hca.fs_hash_enable_bits = 0;
+ break;
+
+ case MLX4_FS_L2_L3_L4_HASH:
+ /* Enable flow steering with
+ * udp unicast and tcp unicast
+ */
+ init_hca.fs_hash_enable_bits =
+ MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
+ break;
+ }
+
profile = default_profile;
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ profile.num_mcg = MLX4_FS_NUM_MCG;
icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
&init_hca);
@@ -1553,8 +1584,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
- + MSIX_LEGACY_SZ, MAX_MSIX);
+ min_t(int, netif_get_num_default_rss_queues() + 1,
+ MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
int err;
int i;
@@ -1989,6 +2020,8 @@ slave_start:
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
+ dev->caps.num_comp_vectors = 1;
+ dev->caps.comp_pool = 0;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f4a8f98..4ec3835 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,7 +54,12 @@ struct mlx4_mgm {
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
- return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
+ else
+ return min((1 << mlx4_log_num_mgm_entry_size),
+ MLX4_MAX_MGM_ENTRY_SIZE);
}
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
@@ -62,6 +67,35 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
}
+static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ u32 size,
+ u64 *reg_id)
+{
+ u64 imm;
+ int err = 0;
+
+ err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ *reg_id = imm;
+
+ return err;
+}
+
+static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
+{
+ int err = 0;
+
+ err = mlx4_cmd(dev, regid, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ return err;
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
@@ -614,6 +648,311 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
return err;
}
+struct mlx4_net_trans_rule_hw_ctrl {
+ __be32 ctrl;
+ __be32 vf_vep_port;
+ __be32 qpn;
+ __be32 reserved;
+};
+
+static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
+ struct mlx4_net_trans_rule_hw_ctrl *hw)
+{
+ static const u8 __promisc_mode[] = {
+ [MLX4_FS_PROMISC_NONE] = 0x0,
+ [MLX4_FS_PROMISC_UPLINK] = 0x1,
+ [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
+ [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
+ };
+
+ u32 dw = 0;
+
+ dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
+ dw |= ctrl->exclusive ? (1 << 2) : 0;
+ dw |= ctrl->allow_loopback ? (1 << 3) : 0;
+ dw |= __promisc_mode[ctrl->promisc_mode] << 8;
+ dw |= ctrl->priority << 16;
+
+ hw->ctrl = cpu_to_be32(dw);
+ hw->vf_vep_port = cpu_to_be32(ctrl->port);
+ hw->qpn = cpu_to_be32(ctrl->qpn);
+}
+
+struct mlx4_net_trans_rule_hw_ib {
+ u8 size;
+ u8 rsvd1;
+ __be16 id;
+ u32 rsvd2;
+ __be32 qpn;
+ __be32 qpn_mask;
+ u8 dst_gid[16];
+ u8 dst_gid_msk[16];
+} __packed;
+
+struct mlx4_net_trans_rule_hw_eth {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ u8 rsvd1[6];
+ u8 dst_mac[6];
+ u16 rsvd2;
+ u8 dst_mac_msk[6];
+ u16 rsvd3;
+ u8 src_mac[6];
+ u16 rsvd4;
+ u8 src_mac_msk[6];
+ u8 rsvd5;
+ u8 ether_type_enable;
+ __be16 ether_type;
+ __be16 vlan_id_msk;
+ __be16 vlan_id;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_tcp_udp {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be16 rsvd1[3];
+ __be16 dst_port;
+ __be16 rsvd2;
+ __be16 dst_port_msk;
+ __be16 rsvd3;
+ __be16 src_port;
+ __be16 rsvd4;
+ __be16 src_port_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_ipv4 {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be32 rsvd1;
+ __be32 dst_ip;
+ __be32 dst_ip_msk;
+ __be32 src_ip;
+ __be32 src_ip_msk;
+} __packed;
+
+struct _rule_hw {
+ union {
+ struct {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ };
+ struct mlx4_net_trans_rule_hw_eth eth;
+ struct mlx4_net_trans_rule_hw_ib ib;
+ struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+ struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+ };
+};
+
+static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
+ struct _rule_hw *rule_hw)
+{
+ static const u16 __sw_id_hw[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
+ [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
+ [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
+ [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
+ };
+
+ static const size_t __rule_hw_sz[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] =
+ sizeof(struct mlx4_net_trans_rule_hw_eth),
+ [MLX4_NET_TRANS_RULE_ID_IB] =
+ sizeof(struct mlx4_net_trans_rule_hw_ib),
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] =
+ sizeof(struct mlx4_net_trans_rule_hw_ipv4),
+ [MLX4_NET_TRANS_RULE_ID_TCP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_UDP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ };
+ if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
+ mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
+ return -EINVAL;
+ }
+ memset(rule_hw, 0, __rule_hw_sz[spec->id]);
+ rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
+ rule_hw->size = __rule_hw_sz[spec->id] >> 2;
+
+ switch (spec->id) {
+ case MLX4_NET_TRANS_RULE_ID_ETH:
+ memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
+ memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
+ ETH_ALEN);
+ memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
+ memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
+ ETH_ALEN);
+ if (spec->eth.ether_type_enable) {
+ rule_hw->eth.ether_type_enable = 1;
+ rule_hw->eth.ether_type = spec->eth.ether_type;
+ }
+ rule_hw->eth.vlan_id = spec->eth.vlan_id;
+ rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ rule_hw->ib.qpn = spec->ib.r_qpn;
+ rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
+ memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
+ memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ return -EOPNOTSUPP;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV4:
+ rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
+ rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
+ rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
+ rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_TCP:
+ case MLX4_NET_TRANS_RULE_ID_UDP:
+ rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
+ rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
+ rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
+ rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return __rule_hw_sz[spec->id];
+}
+
+static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
+ struct mlx4_net_trans_rule *rule)
+{
+#define BUF_SIZE 256
+ struct mlx4_spec_list *cur;
+ char buf[BUF_SIZE];
+ int len = 0;
+
+ mlx4_err(dev, "%s", str);
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "port = %d prio = 0x%x qp = 0x%x ",
+ rule->port, rule->priority, rule->qpn);
+
+ list_for_each_entry(cur, &rule->list, list) {
+ switch (cur->id) {
+ case MLX4_NET_TRANS_RULE_ID_ETH:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dmac = %pM ", &cur->eth.dst_mac);
+ if (cur->eth.ether_type)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "ethertype = 0x%x ",
+ be16_to_cpu(cur->eth.ether_type));
+ if (cur->eth.vlan_id)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "vlan-id = %d ",
+ be16_to_cpu(cur->eth.vlan_id));
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV4:
+ if (cur->ipv4.src_ip)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "src-ip = %pI4 ",
+ &cur->ipv4.src_ip);
+ if (cur->ipv4.dst_ip)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-ip = %pI4 ",
+ &cur->ipv4.dst_ip);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_TCP:
+ case MLX4_NET_TRANS_RULE_ID_UDP:
+ if (cur->tcp_udp.src_port)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "src-port = %d ",
+ be16_to_cpu(cur->tcp_udp.src_port));
+ if (cur->tcp_udp.dst_port)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-port = %d ",
+ be16_to_cpu(cur->tcp_udp.dst_port));
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-gid = %pI6\n", cur->ib.dst_gid);
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-gid-mask = %pI6\n",
+ cur->ib.dst_gid_msk);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ break;
+
+ default:
+ break;
+ }
+ }
+ len += snprintf(buf + len, BUF_SIZE - len, "\n");
+ mlx4_err(dev, "%s", buf);
+
+ if (len >= BUF_SIZE)
+ mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+}
+
+int mlx4_flow_attach(struct mlx4_dev *dev,
+ struct mlx4_net_trans_rule *rule, u64 *reg_id)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_spec_list *cur;
+ u32 size = 0;
+ int ret;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
+ trans_rule_ctrl_to_hw(rule, mailbox->buf);
+
+ size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+
+ list_for_each_entry(cur, &rule->list, list) {
+ ret = parse_trans_rule(dev, cur, mailbox->buf + size);
+ if (ret < 0) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return -EINVAL;
+ }
+ size += ret;
+ }
+
+ ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
+ if (ret == -ENOMEM)
+ mlx4_err_rule(dev,
+ "mcg table is full. Fail to register network rule.\n",
+ rule);
+ else if (ret)
+ mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_attach);
+
+int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
+{
+ int err;
+
+ err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
+ if (err)
+ mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
+ reg_id);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer)
@@ -866,49 +1205,159 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_mcast_loopback, enum mlx4_protocol prot)
+ u8 port, int block_mcast_loopback,
+ enum mlx4_protocol prot, u64 *reg_id)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
+
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+ return mlx4_qp_attach_common(dev, qp, gid,
+ block_mcast_loopback, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.allow_loopback = ~block_mcast_loopback;
+ rule.port = port;
+ rule.qpn = qp->qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ switch (prot) {
+ case MLX4_PROT_ETH:
+ spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
+ memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ break;
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 1,
- block_mcast_loopback, prot);
+ case MLX4_PROT_IB_IPV6:
+ spec.id = MLX4_NET_TRANS_RULE_ID_IB;
+ memcpy(spec.ib.dst_gid, gid, 16);
+ memset(&spec.ib.dst_gid_msk, 0xff, 16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ list_add_tail(&spec.list, &rule.list);
- return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
- prot, MLX4_MC_STEER);
+ return mlx4_flow_attach(dev, &rule, reg_id);
+ }
+
+ default:
+ return -EINVAL;
+ }
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol prot)
+ enum mlx4_protocol prot, u64 reg_id)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return mlx4_flow_detach(dev, reg_id);
- return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER);
+ default:
+ return -EINVAL;
+ }
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
+ u32 qpn, enum mlx4_net_trans_promisc_mode mode)
+{
+ struct mlx4_net_trans_rule rule;
+ u64 *regid_p;
+
+ switch (mode) {
+ case MLX4_FS_PROMISC_UPLINK:
+ case MLX4_FS_PROMISC_FUNCTION_PORT:
+ regid_p = &dev->regid_promisc_array[port];
+ break;
+ case MLX4_FS_PROMISC_ALL_MULTI:
+ regid_p = &dev->regid_allmulti_array[port];
+ break;
+ default:
+ return -1;
+ }
+
+ if (*regid_p != 0)
+ return -1;
+
+ rule.promisc_mode = mode;
+ rule.port = port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+ mlx4_err(dev, "going promisc on %x\n", port);
+
+ return mlx4_flow_attach(dev, &rule, regid_p);
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
+
+int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
+ enum mlx4_net_trans_promisc_mode mode)
+{
+ int ret;
+ u64 *regid_p;
+
+ switch (mode) {
+ case MLX4_FS_PROMISC_UPLINK:
+ case MLX4_FS_PROMISC_FUNCTION_PORT:
+ regid_p = &dev->regid_promisc_array[port];
+ break;
+ case MLX4_FS_PROMISC_ALL_MULTI:
+ regid_p = &dev->regid_allmulti_array[port];
+ break;
+ default:
+ return -1;
+ }
+
+ if (*regid_p == 0)
+ return -1;
+
+ ret = mlx4_flow_detach(dev, *regid_p);
+ if (ret == 0)
+ *regid_p = 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
+
int mlx4_unicast_attach(struct mlx4_dev *dev,
struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
@@ -924,10 +1373,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], enum mlx4_protocol prot)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
@@ -968,9 +1413,6 @@ static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
@@ -980,9 +1422,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
@@ -992,9 +1431,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
@@ -1004,9 +1440,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
@@ -1019,6 +1452,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ /* No need for mcg_table when fw managed the mcg table*/
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return 0;
err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
dev->caps.num_amgms - 1, 0, 0);
if (err)
@@ -1031,5 +1468,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
{
- mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 86b6e5a..d2c436b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -39,6 +39,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
@@ -53,6 +54,17 @@
#define DRV_VERSION "1.1"
#define DRV_RELDATE "Dec, 2011"
+#define MLX4_FS_UDP_UC_EN (1 << 1)
+#define MLX4_FS_TCP_UC_EN (1 << 2)
+#define MLX4_FS_NUM_OF_L2_ADDR 8
+#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
+#define MLX4_FS_NUM_MCG (1 << 17)
+
+enum {
+ MLX4_FS_L2_HASH = 0,
+ MLX4_FS_L2_L3_L4_HASH,
+};
+
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -137,6 +149,7 @@ enum mlx4_resource {
RES_VLAN,
RES_EQ,
RES_COUNTER,
+ RES_FS_RULE,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -509,7 +522,7 @@ struct slave_list {
struct mlx4_resource_tracker {
spinlock_t lock;
/* tree for each resources */
- struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
/* num_of_slave's lists, one per slave */
struct slave_list *slave_list;
};
@@ -703,6 +716,7 @@ struct mlx4_set_port_rqp_calc_context {
struct mlx4_mac_entry {
u64 mac;
+ u64 reg_id;
};
struct mlx4_port_info {
@@ -776,6 +790,7 @@ struct mlx4_priv {
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
int reserved_mtts;
+ int fs_hash_mode;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -1032,13 +1047,18 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
/* resource tracker functions*/
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource resource_type,
- int resource_id, int *slave);
+ u64 resource_id, int *slave);
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
int mlx4_init_resource_tracker(struct mlx4_dev *dev);
void mlx4_free_resource_tracker(struct mlx4_dev *dev,
enum mlx4_res_tracker_free_type type);
+int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -1054,6 +1074,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
@@ -1107,6 +1132,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6ae3509..5f1ab10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -43,6 +43,7 @@
#ifdef CONFIG_MLX4_EN_DCB
#include <linux/dcbnl.h>
#endif
+#include <linux/cpu_rmap.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -75,6 +76,10 @@
#define STAMP_SHIFT 31
#define STAMP_VAL 0x7fffffff
#define STATS_DELAY (HZ / 4)
+#define MAX_NUM_OF_FS_RULES 256
+
+#define MLX4_EN_FILTER_HASH_SHIFT 4
+#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
#define MAX_DESC_SIZE 512
@@ -106,7 +111,7 @@ enum {
#define MLX4_EN_MAX_TX_SIZE 8192
#define MLX4_EN_MAX_RX_SIZE 8192
-/* Minimum ring size for our page-allocation sceme to work */
+/* Minimum ring size for our page-allocation scheme to work */
#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
@@ -227,6 +232,7 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
+ dma_addr_t dma;
u16 offset;
};
@@ -404,6 +410,19 @@ struct mlx4_en_perf_stats {
#define NUM_PERF_COUNTERS 6
};
+enum mlx4_en_mclist_act {
+ MCLIST_NONE,
+ MCLIST_REM,
+ MCLIST_ADD,
+};
+
+struct mlx4_en_mc_list {
+ struct list_head list;
+ enum mlx4_en_mclist_act action;
+ u8 addr[ETH_ALEN];
+ u64 reg_id;
+};
+
struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
@@ -422,6 +441,11 @@ struct mlx4_en_frag_info {
#endif
+struct ethtool_flow_id {
+ struct ethtool_rx_flow_spec flow_spec;
+ u64 id;
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -431,6 +455,7 @@ struct mlx4_en_priv {
struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
+ struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets;
@@ -480,6 +505,7 @@ struct mlx4_en_priv {
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct mlx4_qp drop_qp;
struct work_struct mcast_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
@@ -489,17 +515,26 @@ struct mlx4_en_priv {
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
u64 stats_bitmap;
- char *mc_addrs;
- int mc_addrs_cnt;
+ struct list_head mc_list;
+ struct list_head curr_list;
+ u64 broadcast_id;
struct mlx4_en_stat_out_mbox hw_stats;
int vids[128];
bool wol;
struct device *ddev;
+ int base_tx_qpn;
#ifdef CONFIG_MLX4_EN_DCB
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
#endif
+#ifdef CONFIG_RFS_ACCEL
+ spinlock_t filters_lock;
+ int last_filter_id;
+ struct list_head filters;
+ struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
+#endif
+
};
enum mlx4_en_wol {
@@ -564,6 +599,8 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
void mlx4_en_calc_rx_buf(struct net_device *dev);
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
+void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
@@ -577,6 +614,11 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
#endif
+#ifdef CONFIG_RFS_ACCEL
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *rx_ring);
+#endif
+
#define MLX4_EN_NUM_SELF_TEST 5
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 1fe2c7a..028833f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -39,7 +39,6 @@
#include "mlx4.h"
#define MLX4_MAC_VALID (1ull << 63)
-#define MLX4_MAC_MASK 0xffffffffffffULL
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
@@ -75,21 +74,54 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
+ u64 mac, int *qpn, u64 *reg_id)
{
- struct mlx4_qp qp;
- u8 gid[16] = {0};
__be64 be_mac;
int err;
- qp.qpn = *qpn;
-
- mac &= 0xffffffffffffULL;
+ mac &= MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
- err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = *qpn;
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
+ gid[5] = port;
+
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec_eth = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.port = port;
+ rule.qpn = *qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ list_add_tail(&spec_eth.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
if (err)
mlx4_warn(dev, "Failed Attaching Unicast\n");
@@ -97,19 +129,30 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn)
+ u64 mac, int qpn, u64 reg_id)
{
- struct mlx4_qp qp;
- u8 gid[16] = {0};
- __be64 be_mac;
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+ __be64 be_mac;
- qp.qpn = qpn;
- mac &= 0xffffffffffffULL;
- be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
+ qp.qpn = qpn;
+ mac &= MLX4_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
+ gid[5] = port;
- mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ mlx4_flow_detach(dev, reg_id);
+ break;
+ }
+ default:
+ mlx4_err(dev, "Invalid steering mode.\n");
+ }
}
static int validate_index(struct mlx4_dev *dev,
@@ -144,6 +187,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
+ u64 reg_id;
mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
(unsigned long long) mac);
@@ -155,7 +199,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
return err;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
*qpn = info->base_qpn + index;
return 0;
}
@@ -167,7 +211,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
goto qp_err;
}
- err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
if (err)
goto steer_err;
@@ -177,6 +221,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
goto alloc_err;
}
entry->mac = mac;
+ entry->reg_id = reg_id;
err = radix_tree_insert(&info->mac_tree, *qpn, entry);
if (err)
goto insert_err;
@@ -186,7 +231,7 @@ insert_err:
kfree(entry);
alloc_err:
- mlx4_uc_steer_release(dev, port, mac, *qpn);
+ mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
steer_err:
mlx4_qp_release_range(dev, *qpn, 1);
@@ -206,13 +251,14 @@ void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
(unsigned long long) mac);
mlx4_unregister_mac(dev, port, mac);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
" qpn %d\n", port,
(unsigned long long) mac, qpn);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac,
+ qpn, entry->reg_id);
mlx4_qp_release_range(dev, qpn, 1);
radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
@@ -359,15 +405,18 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
int index = qpn - info->base_qpn;
int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac,
+ qpn, entry->reg_id);
mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
+ entry->reg_id = 0;
mlx4_register_mac(dev, port, new_mac);
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ err = mlx4_uc_steer_add(dev, port, entry->mac,
+ &qpn, &entry->reg_id);
return err;
}
@@ -697,10 +746,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
if (slave != dev->caps.function)
memset(inbox->buf, 0, 256);
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
- *(u8 *) inbox->buf = !!reset_qkey_viols << 6;
+ *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
((__be32 *) inbox->buf)[2] = agg_cap_mask;
} else {
- ((u8 *) inbox->buf)[3] = !!reset_qkey_viols;
+ ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
((__be32 *) inbox->buf)[1] = agg_cap_mask;
}
@@ -803,8 +852,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
MCAST_DIRECT : MCAST_DEFAULT;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 06e5ade..9ee4725 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_AUXC].num = request->num_qp;
profile[MLX4_RES_SRQ].num = request->num_srq;
profile[MLX4_RES_CQ].num = request->num_cq;
- profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
+ profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
+ dev->phys_caps.num_phys_eqs :
+ min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
@@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->log_num_cqs = profile[i].log_num;
break;
case MLX4_RES_EQ:
- dev->caps.num_eqs = profile[i].num;
+ dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
+ MAX_MSIX));
init_hca->eqc_base = profile[i].start;
- init_hca->log_num_eqs = profile[i].log_num;
+ init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
break;
case MLX4_RES_DMPT:
dev->caps.num_mpts = profile[i].num;
@@ -234,13 +237,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->mtt_base = profile[i].start;
break;
case MLX4_RES_MCG:
- dev->caps.num_mgms = profile[i].num >> 1;
- dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
init_hca->log_mc_entry_sz =
ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
- init_hca->log_mc_hash_sz = profile[i].log_num - 1;
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ dev->caps.num_mgms = profile[i].num;
+ } else {
+ init_hca->log_mc_hash_sz =
+ profile[i].log_num - 1;
+ dev->caps.num_mgms = profile[i].num >> 1;
+ dev->caps.num_amgms = profile[i].num >> 1;
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b45d0e7..94ceddd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -41,13 +41,12 @@
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
+#include <linux/if_ether.h>
#include "mlx4.h"
#include "fw.h"
#define MLX4_MAC_VALID (1ull << 63)
-#define MLX4_MAC_MASK 0x7fffffffffffffffULL
-#define ETH_ALEN 6
struct mac_res {
struct list_head list;
@@ -57,7 +56,8 @@ struct mac_res {
struct res_common {
struct list_head list;
- u32 res_id;
+ struct rb_node node;
+ u64 res_id;
int owner;
int state;
int from_state;
@@ -189,6 +189,58 @@ struct res_xrcdn {
int port;
};
+enum res_fs_rule_states {
+ RES_FS_RULE_BUSY = RES_ANY_BUSY,
+ RES_FS_RULE_ALLOCATED,
+};
+
+struct res_fs_rule {
+ struct res_common com;
+};
+
+static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct res_common *res = container_of(node, struct res_common,
+ node);
+
+ if (res_id < res->res_id)
+ node = node->rb_left;
+ else if (res_id > res->res_id)
+ node = node->rb_right;
+ else
+ return res;
+ }
+ return NULL;
+}
+
+static int res_tracker_insert(struct rb_root *root, struct res_common *res)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct res_common *this = container_of(*new, struct res_common,
+ node);
+
+ parent = *new;
+ if (res->res_id < this->res_id)
+ new = &((*new)->rb_left);
+ else if (res->res_id > this->res_id)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&res->node, parent, new);
+ rb_insert_color(&res->node, root);
+
+ return 0;
+}
+
/* For Debug uses */
static const char *ResourceType(enum mlx4_resource rt)
{
@@ -201,6 +253,7 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MAC: return "RES_MAC";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
+ case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
};
@@ -228,8 +281,7 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
dev->num_slaves);
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
- INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
- GFP_ATOMIC|__GFP_NOWARN);
+ priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
return 0 ;
@@ -277,11 +329,11 @@ static void *find_res(struct mlx4_dev *dev, int res_id,
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
- res_id);
+ return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
}
-static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type,
void *res)
{
@@ -307,7 +359,7 @@ static int get_res(struct mlx4_dev *dev, int slave, int res_id,
r->from_state = r->state;
r->state = RES_ANY_BUSY;
- mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
ResourceType(type), r->res_id);
if (res)
@@ -320,7 +372,7 @@ exit:
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
- int res_id, int *slave)
+ u64 res_id, int *slave)
{
struct res_common *r;
@@ -341,7 +393,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
return err;
}
-static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type)
{
struct res_common *r;
@@ -473,7 +525,21 @@ static struct res_common *alloc_xrcdn_tr(int id)
return &ret->com;
}
-static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+static struct res_common *alloc_fs_rule_tr(u64 id)
+{
+ struct res_fs_rule *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_FS_RULE_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
int extra)
{
struct res_common *ret;
@@ -506,6 +572,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
break;
+ case RES_FS_RULE:
+ ret = alloc_fs_rule_tr(id);
+ break;
default:
return NULL;
}
@@ -515,7 +584,7 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
return ret;
}
-static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
int i;
@@ -523,7 +592,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
struct mlx4_priv *priv = mlx4_priv(dev);
struct res_common **res_arr;
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
- struct radix_tree_root *root = &tracker->res_tree[type];
+ struct rb_root *root = &tracker->res_tree[type];
res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
if (!res_arr)
@@ -546,7 +615,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
err = -EEXIST;
goto undo;
}
- err = radix_tree_insert(root, base + i, res_arr[i]);
+ err = res_tracker_insert(root, res_arr[i]);
if (err)
goto undo;
list_add_tail(&res_arr[i]->list,
@@ -559,7 +628,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
undo:
for (--i; i >= base; --i)
- radix_tree_delete(&tracker->res_tree[type], i);
+ rb_erase(&res_arr[i]->node, root);
spin_unlock_irq(mlx4_tlock(dev));
@@ -638,6 +707,16 @@ static int remove_xrcdn_ok(struct res_xrcdn *res)
return 0;
}
+static int remove_fs_rule_ok(struct res_fs_rule *res)
+{
+ if (res->com.state == RES_FS_RULE_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_FS_RULE_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
static int remove_cq_ok(struct res_cq *res)
{
if (res->com.state == RES_CQ_BUSY)
@@ -679,15 +758,17 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
return remove_counter_ok((struct res_counter *)res);
case RES_XRCD:
return remove_xrcdn_ok((struct res_xrcdn *)res);
+ case RES_FS_RULE:
+ return remove_fs_rule_ok((struct res_fs_rule *)res);
default:
return -EINVAL;
}
}
-static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
- int i;
+ u64 i;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -695,7 +776,7 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
spin_lock_irq(mlx4_tlock(dev));
for (i = base; i < base + count; ++i) {
- r = radix_tree_lookup(&tracker->res_tree[type], i);
+ r = res_tracker_lookup(&tracker->res_tree[type], i);
if (!r) {
err = -ENOENT;
goto out;
@@ -710,8 +791,8 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
}
for (i = base; i < base + count; ++i) {
- r = radix_tree_lookup(&tracker->res_tree[type], i);
- radix_tree_delete(&tracker->res_tree[type], i);
+ r = res_tracker_lookup(&tracker->res_tree[type], i);
+ rb_erase(&r->node, &tracker->res_tree[type]);
list_del(&r->list);
kfree(r);
}
@@ -733,7 +814,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -741,7 +822,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
else {
switch (state) {
case RES_QP_BUSY:
- mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
__func__, r->com.res_id);
err = -EBUSY;
break;
@@ -750,7 +831,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
if (r->com.state == RES_QP_MAPPED && !alloc)
break;
- mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
err = -EINVAL;
break;
@@ -759,7 +840,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
r->com.state == RES_QP_HW)
break;
else {
- mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
r->com.res_id);
err = -EINVAL;
}
@@ -779,7 +860,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
r->com.to_state = state;
r->com.state = RES_QP_BUSY;
if (qp)
- *qp = (struct res_qp *)r;
+ *qp = r;
}
}
@@ -797,7 +878,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -832,7 +913,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.to_state = state;
r->com.state = RES_MPT_BUSY;
if (mpt)
- *mpt = (struct res_mpt *)r;
+ *mpt = r;
}
}
@@ -850,7 +931,7 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -898,7 +979,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
int err;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -952,7 +1033,7 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -1001,7 +1082,7 @@ static void res_abort_move(struct mlx4_dev *dev, int slave,
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[type], id);
+ r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->from_state;
spin_unlock_irq(mlx4_tlock(dev));
@@ -1015,7 +1096,7 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[type], id);
+ r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->to_state;
spin_unlock_irq(mlx4_tlock(dev));
@@ -2695,6 +2776,60 @@ ex_put:
return err;
}
+int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EOPNOTSUPP;
+
+ err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
+ vhcr->in_modifier, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ /* detach rule*/
+ mlx4_cmd(dev, vhcr->out_param, 0, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ }
+ return err;
+}
+
+int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EOPNOTSUPP;
+
+ err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ return err;
+ }
+
+ err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ return err;
+}
+
enum {
BUSY_MAX_RETRIES = 10
};
@@ -2751,7 +2886,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
if (r->state == RES_ANY_BUSY) {
if (print)
mlx4_dbg(dev,
- "%s id 0x%x is busy\n",
+ "%s id 0x%llx is busy\n",
ResourceType(type),
r->res_id);
++busy;
@@ -2817,8 +2952,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
switch (state) {
case RES_QP_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_QP],
- qp->com.res_id);
+ rb_erase(&qp->com.node,
+ &tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(qp);
@@ -2888,8 +3023,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
case RES_SRQ_ALLOCATED:
__mlx4_srq_free_icm(dev, srqn);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_SRQ],
- srqn);
+ rb_erase(&srq->com.node,
+ &tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(srq);
@@ -2954,8 +3089,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
case RES_CQ_ALLOCATED:
__mlx4_cq_free_icm(dev, cqn);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_CQ],
- cqn);
+ rb_erase(&cq->com.node,
+ &tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(cq);
@@ -3017,8 +3152,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
case RES_MPT_RESERVED:
__mlx4_mr_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_MPT],
- mptn);
+ rb_erase(&mpt->com.node,
+ &tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(mpt);
@@ -3086,8 +3221,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
__mlx4_free_mtt_range(dev, base,
mtt->order);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_MTT],
- base);
+ rb_erase(&mtt->com.node,
+ &tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(mtt);
@@ -3104,6 +3239,58 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
spin_unlock_irq(mlx4_tlock(dev));
}
+static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *fs_rule_list =
+ &tracker->slave_list[slave].res_list[RES_FS_RULE];
+ struct res_fs_rule *fs_rule;
+ struct res_fs_rule *tmp;
+ int state;
+ u64 base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_FS_RULE);
+ if (err)
+ mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
+ slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (fs_rule->com.owner == slave) {
+ base = fs_rule->com.res_id;
+ state = fs_rule->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_FS_RULE_ALLOCATED:
+ /* detach rule */
+ err = mlx4_cmd(dev, base, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ rb_erase(&fs_rule->com.node,
+ &tracker->res_tree[RES_FS_RULE]);
+ list_del(&fs_rule->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(fs_rule);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3133,8 +3320,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
switch (state) {
case RES_EQ_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_EQ],
- eqn);
+ rb_erase(&eq->com.node,
+ &tracker->res_tree[RES_EQ]);
list_del(&eq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(eq);
@@ -3191,7 +3378,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
if (counter->com.owner == slave) {
index = counter->com.res_id;
- radix_tree_delete(&tracker->res_tree[RES_COUNTER], index);
+ rb_erase(&counter->com.node,
+ &tracker->res_tree[RES_COUNTER]);
list_del(&counter->com.list);
kfree(counter);
__mlx4_counter_free(dev, index);
@@ -3220,7 +3408,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
if (xrcd->com.owner == slave) {
xrcdn = xrcd->com.res_id;
- radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn);
+ rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
list_del(&xrcd->com.list);
kfree(xrcd);
__mlx4_xrcd_free(dev, xrcdn);
@@ -3244,5 +3432,6 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
rem_slave_mtts(dev, slave);
rem_slave_counters(dev, slave);
rem_slave_xrcdns(dev, slave);
+ rem_slave_fs_rule(dev, slave);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 5e313e9..1540ebe 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -422,7 +422,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
*
* Get or create the initial mac address for the device and then set that
* into the station address register. If there is an EEPROM present, then
- * we try that. If no valid mac address is found we use random_ether_addr()
+ * we try that. If no valid mac address is found we use eth_random_addr()
* to create a new one.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 5ffde23..38529ed 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -16,8 +16,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/**
- * Supports:
+/* Supports:
* KS8851 16bit MLL chip from Micrel Inc.
*/
@@ -35,7 +34,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/ks8851_mll.h>
#define DRV_NAME "ks8851_mll"
@@ -465,8 +464,7 @@ static int msg_enable;
#define BE1 0x2000 /* Byte Enable 1 */
#define BE0 0x1000 /* Byte Enable 0 */
-/**
- * register read/write calls.
+/* register read/write calls.
*
* All these calls issue transactions to access the chip's registers. They
* all require that the necessary lock is held to prevent accesses when the
@@ -1103,7 +1101,7 @@ static void ks_set_grpaddr(struct ks_net *ks)
}
} /* ks_set_grpaddr */
-/*
+/**
* ks_clear_mcast - clear multicast information
*
* @ks : The chip information
@@ -1515,6 +1513,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
struct net_device *netdev;
struct ks_net *ks;
u16 id, data;
+ struct ks8851_mll_platform_data *pdata;
io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1596,17 +1595,27 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
ks_disable_qmu(ks);
ks_setup(ks);
ks_setup_int(ks);
- memcpy(netdev->dev_addr, ks->mac_addr, 6);
data = ks_rdreg16(ks, KS_OBCR);
ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
- /**
- * If you want to use the default MAC addr,
- * comment out the 2 functions below.
- */
+ /* overwriting the default MAC address */
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ netdev_err(netdev, "No platform data\n");
+ err = -ENODEV;
+ goto err_pdata;
+ }
+ memcpy(ks->mac_addr, pdata->mac_addr, 6);
+ if (!is_valid_ether_addr(ks->mac_addr)) {
+ /* Use random MAC address if none passed */
+ eth_random_addr(ks->mac_addr);
+ netdev_info(netdev, "Using random mac address\n");
+ }
+ netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
+
+ memcpy(netdev->dev_addr, ks->mac_addr, 6);
- random_ether_addr(netdev->dev_addr);
ks_set_mac(ks, netdev->dev_addr);
id = ks_rdreg16(ks, KS_CIDER);
@@ -1615,6 +1624,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
+err_pdata:
+ unregister_netdev(netdev);
err_register:
err_get_irq:
iounmap(ks->hw_addr_cmd);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index eaf9ff0..318fee9 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3913,7 +3913,7 @@ static void hw_start_rx(struct ksz_hw *hw)
hw->rx_stop = 2;
}
-/*
+/**
* hw_stop_rx - stop receiving
* @hw: The hardware instance.
*
@@ -4480,14 +4480,12 @@ static void ksz_init_rx_buffers(struct dev_info *adapter)
dma_buf->len = adapter->mtu;
if (!dma_buf->skb)
dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
- if (dma_buf->skb && !dma_buf->dma) {
- dma_buf->skb->dev = adapter->dev;
+ if (dma_buf->skb && !dma_buf->dma)
dma_buf->dma = pci_map_single(
adapter->pdev,
skb_tail_pointer(dma_buf->skb),
dma_buf->len,
PCI_DMA_FROMDEVICE);
- }
/* Set descriptor. */
set_rx_buf(desc, dma_buf->dma);
@@ -4881,8 +4879,8 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
left = hw_alloc_pkt(hw, skb->len, num);
if (left) {
if (left < num ||
- ((CHECKSUM_PARTIAL == skb->ip_summed) &&
- (ETH_P_IPV6 == htons(skb->protocol)))) {
+ (CHECKSUM_PARTIAL == skb->ip_summed &&
+ skb->protocol == htons(ETH_P_IPV6))) {
struct sk_buff *org_skb = skb;
skb = netdev_alloc_skb(dev, org_skb->len);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 90153fc..fa85cf1 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3775,7 +3775,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->num_slices = 1;
msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- ncpus = num_online_cpus();
+ ncpus = netif_get_num_default_rss_queues();
if (myri10ge_max_slices == 1 || msix_cap == 0 ||
(myri10ge_max_slices == -1 && ncpus < 2))
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bb36758..d958c22 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3377,7 +3377,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
} while (cnt < 20);
return ret;
}
-/*
+/**
* check_pci_device_id - Checks if the device id is supported
* @id : device id
* Description: Function to check if the pci device id is supported by driver.
@@ -5238,7 +5238,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
}
/**
- * s2io_set_mac_addr driver entry point
+ * s2io_set_mac_addr - driver entry point
*/
static int s2io_set_mac_addr(struct net_device *dev, void *p)
@@ -6088,7 +6088,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
}
/**
- * s2io-link_test - verifies the link state of the nic
+ * s2io_link_test - verifies the link state of the nic
* @sp ; private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data: variable that returns the result of each of the test conducted by
@@ -6116,9 +6116,9 @@ static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
/**
* s2io_rldram_test - offline test for access to the RldRam chip on the NIC
- * @sp - private member of the device structure, which is a pointer to the
+ * @sp: private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @data - variable that returns the result of each of the test
+ * @data: variable that returns the result of each of the test
* conducted by the driver.
* Description:
* This is one of the offline test that tests the read and write
@@ -6946,9 +6946,9 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
if (sp->rxd_mode == RXD_MODE_3B)
ba = &ring->ba[j][k];
if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
- (u64 *)&temp0_64,
- (u64 *)&temp1_64,
- (u64 *)&temp2_64,
+ &temp0_64,
+ &temp1_64,
+ &temp2_64,
size) == -ENOMEM) {
return 0;
}
@@ -7149,7 +7149,7 @@ static int s2io_card_up(struct s2io_nic *sp)
int i, ret = 0;
struct config_param *config;
struct mac_info *mac_control;
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
u16 interruptible;
/* Initialize the H/W I/O registers */
@@ -7325,7 +7325,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
{
struct s2io_nic *sp = ring_data->nic;
- struct net_device *dev = (struct net_device *)ring_data->dev;
+ struct net_device *dev = ring_data->dev;
struct sk_buff *skb = (struct sk_buff *)
((unsigned long)rxdp->Host_Control);
int ring_no = ring_data->ring_no;
@@ -7508,7 +7508,7 @@ aggregate:
static void s2io_link(struct s2io_nic *sp, int link)
{
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (link != sp->last_link_state) {
@@ -8280,7 +8280,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
return -1;
}
- *ip = (struct iphdr *)((u8 *)buffer + ip_off);
+ *ip = (struct iphdr *)(buffer + ip_off);
ip_len = (u8)((*ip)->ihl);
ip_len <<= 2;
*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 98e2c10..32d0682 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2346,7 +2346,7 @@ void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
for (i = 0; i < nreq; i++)
vxge_os_dma_malloc_async(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
blockpool->hldev, VXGE_HW_BLOCK_SIZE);
}
@@ -2428,13 +2428,13 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
break;
pci_unmap_single(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
((struct __vxge_hw_blockpool_entry *)p)->length,
PCI_DMA_BIDIRECTIONAL);
vxge_os_dma_free(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->memblock,
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
@@ -4059,7 +4059,7 @@ __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
- vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
+ vpath = &hldev->virtual_paths[vp_id];
if (vpath->ringh) {
status = __vxge_hw_ring_reset(vpath->ringh);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 5046a64..9e0c1ee 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1922,7 +1922,7 @@ realloc:
/* misaligned, free current one and try allocating
* size + VXGE_CACHE_LINE_SIZE memory
*/
- kfree((void *) vaddr);
+ kfree(vaddr);
size += VXGE_CACHE_LINE_SIZE;
realloc_flag = 1;
goto realloc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 51387c3..de21904 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1134,7 +1134,7 @@ static void vxge_set_multicast(struct net_device *dev)
"%s:%d", __func__, __LINE__);
vdev = netdev_priv(dev);
- hldev = (struct __vxge_hw_device *)vdev->devh;
+ hldev = vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
return;
@@ -3131,12 +3131,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
u64 packets, bytes, multicast;
do {
- start = u64_stats_fetch_begin(&rxstats->syncp);
+ start = u64_stats_fetch_begin_bh(&rxstats->syncp);
packets = rxstats->rx_frms;
multicast = rxstats->rx_mcast;
bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start));
net_stats->rx_packets += packets;
net_stats->rx_bytes += bytes;
@@ -3146,11 +3146,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->rx_dropped += rxstats->rx_dropped;
do {
- start = u64_stats_fetch_begin(&txstats->syncp);
+ start = u64_stats_fetch_begin_bh(&txstats->syncp);
packets = txstats->tx_frms;
bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry(&txstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&txstats->syncp, start));
net_stats->tx_packets += packets;
net_stats->tx_bytes += bytes;
@@ -3687,7 +3687,8 @@ static int __devinit vxge_config_vpaths(
return 0;
if (!driver_config->g_no_cpus)
- driver_config->g_no_cpus = num_online_cpus();
+ driver_config->g_no_cpus =
+ netif_get_num_default_rss_queues();
driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
if (!driver_config->vpath_per_dev)
@@ -3989,16 +3990,16 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
continue;
vxge_debug_ll_config(VXGE_TRACE,
"%s: MTU size - %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].mtu);
vxge_debug_init(VXGE_TRACE,
"%s: VLAN tag stripping %s", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].rpa_strip_vlan_tag
? "Enabled" : "Disabled");
vxge_debug_ll_config(VXGE_TRACE,
"%s: Max frags : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].fifo.max_frags);
break;
}
@@ -4260,9 +4261,7 @@ static int vxge_probe_fw_update(struct vxgedev *vdev)
if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
VXGE_FW_VER(maj, min, 0)) {
vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
- " be used with this driver.\n"
- "Please get the latest version from "
- "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+ " be used with this driver.",
VXGE_DRIVER_NAME, maj, min, bld);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 35f3e75..36ca40f 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -430,8 +430,7 @@ void vxge_initialize_ethtool_ops(struct net_device *ndev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
-/**
- * #define VXGE_DEBUG_INIT: debug for initialization functions
+/* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
* #define VXGE_DEBUG_RX : debug recevice related functions
* #define VXGE_DEBUG_MEM : debug memory module
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 5954fa2..99749bd 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -533,8 +533,7 @@ __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
/* notify driver */
if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(
- (struct __vxge_hw_device *)hldev,
+ hldev->uld_callbacks->crit_err(hldev,
type, vp_id);
out:
@@ -1322,7 +1321,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
/* check whether it is not the end */
if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
- vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
+ vxge_assert((rxdp)->host_control !=
0);
++ring->cmpl_cnt;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 928913c..f45def0 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3218,7 +3218,7 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
}
/**
- * nv_update_linkspeed: Setup the MAC according to the link partner
+ * nv_update_linkspeed - Setup the MAC according to the link partner
* @dev: Network device to be configured
*
* The function queries the PHY and checks if there is a link partner.
@@ -3552,8 +3552,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
return IRQ_HANDLED;
}
-/**
- * All _optimized functions are used to help increase performance
+/* All _optimized functions are used to help increase performance
* (reduce CPU and increase throughput). They use descripter version 3,
* compiler directives, and reduce memory accesses.
*/
@@ -3776,7 +3775,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & NVREG_IRQ_RECOVER_ERROR) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
@@ -3786,7 +3785,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
np->recover_error = 1;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
if (unlikely(i > max_interrupt_work)) {
@@ -5183,6 +5182,7 @@ static const struct ethtool_ops ops = {
.get_ethtool_stats = nv_get_ethtool_stats,
.get_sset_count = nv_get_sset_count,
.self_test = nv_self_test,
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* The mgmt unit and driver use a semaphore to access the phy during init */
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8d2666f..4069eda 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -44,7 +44,6 @@
#include <linux/of_net.h>
#include <linux/types.h>
-#include <linux/delay.h>
#include <linux/io.h>
#include <mach/board.h>
#include <mach/platform.h>
@@ -52,7 +51,6 @@
#define MODNAME "lpc-eth"
#define DRV_VERSION "1.00"
-#define PHYDEF_ADDR 0x00
#define ENET_MAXF_SIZE 1536
#define ENET_RX_DESC 48
@@ -416,9 +414,6 @@ static bool use_iram_for_net(struct device *dev)
#define TXDESC_CONTROL_LAST (1 << 30)
#define TXDESC_CONTROL_INT (1 << 31)
-static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
- struct net_device *ndev);
-
/*
* Structure of a TX/RX descriptors and RX status
*/
@@ -440,7 +435,7 @@ struct netdata_local {
spinlock_t lock;
void __iomem *net_base;
u32 msg_enable;
- struct sk_buff *skb[ENET_TX_DESC];
+ unsigned int skblen[ENET_TX_DESC];
unsigned int last_tx_idx;
unsigned int num_used_tx_buffs;
struct mii_bus *mii_bus;
@@ -903,12 +898,11 @@ err_out:
static void __lpc_handle_xmit(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct sk_buff *skb;
u32 txcidx, *ptxstat, txstat;
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
while (pldat->last_tx_idx != txcidx) {
- skb = pldat->skb[pldat->last_tx_idx];
+ unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
/* A buffer is available, get buffer status */
ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
@@ -945,17 +939,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
} else {
/* Update stats */
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
-
- /* Free buffer */
- dev_kfree_skb_irq(skb);
+ ndev->stats.tx_bytes += skblen;
}
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
}
- if (netif_queue_stopped(ndev))
- netif_wake_queue(ndev);
+ if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ }
}
static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1132,7 +1125,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
/* Save the buffer and increment the buffer counter */
- pldat->skb[txidx] = skb;
+ pldat->skblen[txidx] = len;
pldat->num_used_tx_buffs++;
/* Start transmit */
@@ -1147,6 +1140,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irq(&pldat->lock);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1320,6 +1314,7 @@ static const struct net_device_ops lpc_netdev_ops = {
.ndo_set_rx_mode = lpc_eth_set_multicast_list,
.ndo_do_ioctl = lpc_eth_ioctl,
.ndo_set_mac_address = lpc_set_mac_address,
+ .ndo_change_mtu = eth_change_mtu,
};
static int lpc_eth_drv_probe(struct platform_device *pdev)
@@ -1441,7 +1436,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
res->start);
netdev_dbg(ndev, "IO address size :%d\n",
res->end - res->start + 1);
- netdev_err(ndev, "IO address (mapped) :0x%p\n",
+ netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index e48f084..5ae03e8 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -60,7 +60,7 @@ static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
/**
* pch_gbe_plat_init_hw - Initialize hardware
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed-EBUSY
*/
@@ -108,7 +108,7 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_setup_init_funcs - Initializes function pointers
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
@@ -137,7 +137,7 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_init_hw - Initialize hardware
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
@@ -155,7 +155,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
* @hw: Pointer to the HW structure
* @offset: The register to read
* @data: The buffer to store the 16-bit read.
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -172,7 +172,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
* @hw: Pointer to the HW structure
* @offset: The register to read
* @data: The value to write.
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -211,7 +211,7 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_read_mac_addr - Reads MAC address
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index ac4e72d..9dbf38c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -77,7 +77,7 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
* pch_gbe_get_settings - Get device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -100,7 +100,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
* pch_gbe_set_settings - Set device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -220,7 +220,7 @@ static void pch_gbe_get_wol(struct net_device *netdev,
* pch_gbe_set_wol - Turn Wake-on-Lan on or off
* @netdev: Network interface device structure
* @wol: Pointer of wake-on-Lan information straucture
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -248,7 +248,7 @@ static int pch_gbe_set_wol(struct net_device *netdev,
/**
* pch_gbe_nway_reset - Restart autonegotiation
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -398,7 +398,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
* pch_gbe_set_pauseparam - Set pause paramters
* @netdev: Network interface device structure
* @pause: Pause parameters structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3787c64..b100656 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -301,7 +301,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
/**
* pch_gbe_mac_read_mac_addr - Read MAC address
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
*/
s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
@@ -483,7 +483,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
/**
* pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -639,7 +639,7 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
/**
* pch_gbe_alloc_queues - Allocate memory for all rings
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -670,7 +670,7 @@ static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_init_phy - Initialize PHY
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -720,7 +720,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
* @netdev: Network interface device structure
* @addr: Phy ID
* @reg: Access location
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1364,7 +1364,7 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
* pch_gbe_intr - Interrupt Handler
* @irq: Interrupt number
* @data: Pointer to a network interface device structure
- * Returns
+ * Returns:
* - IRQ_HANDLED: Our interrupt
* - IRQ_NONE: Not our interrupt
*/
@@ -1566,7 +1566,7 @@ static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
* pch_gbe_clean_tx - Reclaim resources after transmit completes
* @adapter: Board private structure
* @tx_ring: Tx descriptor ring
- * Returns
+ * Returns:
* true: Cleaned the descriptor
* false: Not cleaned the descriptor
*/
@@ -1660,7 +1660,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
* @rx_ring: Rx descriptor ring
* @work_done: Completed count
* @work_to_do: Request count
- * Returns
+ * Returns:
* true: Cleaned the descriptor
* false: Not cleaned the descriptor
*/
@@ -1775,7 +1775,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
* pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
* @adapter: Board private structure
* @tx_ring: Tx descriptor ring (for a specific queue) to setup
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1822,7 +1822,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
* pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
* @adapter: Board private structure
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1899,7 +1899,7 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
/**
* pch_gbe_request_irq - Allocate an interrupt line
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1932,7 +1932,7 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_up - Up GbE network device
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2018,7 +2018,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2057,7 +2057,7 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_open - Called when a network interface is made active
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2097,7 +2097,7 @@ err_setup_tx:
/**
* pch_gbe_stop - Disables a network interface
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successfully
*/
static int pch_gbe_stop(struct net_device *netdev)
@@ -2117,7 +2117,7 @@ static int pch_gbe_stop(struct net_device *netdev)
* pch_gbe_xmit_frame - Packet transmitting start
* @skb: Socket buffer structure
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* - NETDEV_TX_OK: Normal end
* - NETDEV_TX_BUSY: Error end
*/
@@ -2225,7 +2225,7 @@ static void pch_gbe_set_multi(struct net_device *netdev)
* pch_gbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: Network interface device structure
* @addr: Pointer to an address structure
- * Returns
+ * Returns:
* 0: Successfully
* -EADDRNOTAVAIL: Failed
*/
@@ -2256,7 +2256,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
* pch_gbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: Network interface device structure
* @new_mtu: New value for maximum frame size
- * Returns
+ * Returns:
* 0: Successfully
* -EINVAL: Failed
*/
@@ -2309,7 +2309,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
* pch_gbe_set_features - Reset device after features changed
* @netdev: Network interface device structure
* @features: New features
- * Returns
+ * Returns:
* 0: HW state updated successfully
*/
static int pch_gbe_set_features(struct net_device *netdev,
@@ -2334,7 +2334,7 @@ static int pch_gbe_set_features(struct net_device *netdev,
* @netdev: Network interface device structure
* @ifr: Pointer to ifr structure
* @cmd: Control command
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2369,7 +2369,7 @@ static void pch_gbe_tx_timeout(struct net_device *netdev)
* pch_gbe_napi_poll - NAPI receive and transfer polling callback
* @napi: Pointer of polling device struct
* @budget: The maximum number of a packet
- * Returns
+ * Returns:
* false: Exit the polling mode
* true: Continue the polling mode
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 29e23be..8653c3b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -139,7 +139,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
/**
* pch_gbe_option - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -220,7 +220,7 @@ static const struct pch_gbe_opt_list fc_list[] = {
* @value: value
* @opt: option
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 37ccbe5..eb3dfdb 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 79
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.79"
+#define _NETXEN_NIC_LINUX_SUBVERSION 80
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.80"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 3973040..10468e7 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -489,7 +489,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
int port = adapter->physical_port;
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -511,7 +511,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
break;
}
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
@@ -534,7 +534,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
int port = adapter->physical_port;
/* read mode */
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -577,7 +577,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
}
NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
return -EIO;
val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
if (port == 0) {
@@ -826,7 +826,12 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
dump->len = mdump->md_dump_size;
else
dump->len = 0;
- dump->flag = mdump->md_capture_mask;
+
+ if (!mdump->md_enabled)
+ dump->flag = ETH_FW_DUMP_DISABLE;
+ else
+ dump->flag = mdump->md_capture_mask;
+
dump->version = adapter->fw_version;
return 0;
}
@@ -840,8 +845,10 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
switch (val->flag) {
case NX_FORCE_FW_DUMP_KEY:
- if (!mdump->md_enabled)
- mdump->md_enabled = 1;
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ return 0;
+ }
if (adapter->fw_mdump_rdy) {
netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index de96a94..946160f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -365,7 +365,7 @@ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
return 0;
- if (port > NETXEN_NIU_MAX_XG_PORTS)
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
mac_cfg = 0;
@@ -392,7 +392,7 @@ static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
u32 port = adapter->physical_port;
u16 board_type = adapter->ahw.board_type;
- if (port > NETXEN_NIU_MAX_XG_PORTS)
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 8694124..bc165f4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1437,8 +1437,6 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
netdev->name, cable_len);
}
- netxen_advert_link_change(adapter, link_status);
-
/* update link parameters */
if (duplex == LINKEVENT_FULL_DUPLEX)
adapter->link_duplex = DUPLEX_FULL;
@@ -1447,6 +1445,8 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
adapter->module_type = module;
adapter->link_autoneg = autoneg;
adapter->link_speed = link_speed;
+
+ netxen_advert_link_change(adapter, link_status);
}
static void
@@ -1532,8 +1532,6 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
} else
skb->ip_summed = CHECKSUM_NONE;
- skb->dev = adapter->netdev;
-
buffer->skb = NULL;
no_skb:
buffer->state = NETXEN_BUFFER_FREE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 8680a5d..eaa1db9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 28
-#define QLCNIC_LINUX_VERSIONID "5.0.28"
+#define _QLCNIC_LINUX_SUBVERSION 29
+#define QLCNIC_LINUX_VERSIONID "5.0.29"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -258,6 +258,8 @@ struct rcv_desc {
(((sts_data) >> 52) & 0x1)
#define qlcnic_get_lro_sts_seq_number(sts_data) \
((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
struct status_desc {
@@ -610,7 +612,11 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_INVALID_ARGS 6
#define QLCNIC_RCODE_NOT_SUPPORTED 9
+#define QLCNIC_RCODE_NOT_PERMITTED 10
+#define QLCNIC_RCODE_NOT_IMPL 15
+#define QLCNIC_RCODE_INVALID 16
#define QLCNIC_RCODE_TIMEOUT 17
#define QLCNIC_DESTROY_CTX_RESET 0
@@ -623,6 +629,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
+#define QLCNIC_CAP0_LRO_MSS (1 << 21)
/*
* Context state
@@ -829,6 +836,9 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
+#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
+
+#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -918,6 +928,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_NEED_FLR 0x1000
#define QLCNIC_FW_RESET_OWNER 0x2000
#define QLCNIC_FW_HANG 0x4000
+#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 8db8524..b8ead69 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -53,12 +53,39 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
- dev_err(&pdev->dev, "card response timeout.\n");
+ dev_err(&pdev->dev, "CDRP response timeout.\n");
cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ switch (cmd->rsp.cmd) {
+ case QLCNIC_RCODE_INVALID_ARGS:
+ dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_NOT_SUPPORTED:
+ case QLCNIC_RCODE_NOT_IMPL:
+ dev_err(&pdev->dev,
+ "CDRP command not supported: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_NOT_PERMITTED:
+ dev_err(&pdev->dev,
+ "CDRP requested action not permitted: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_INVALID:
+ dev_err(&pdev->dev,
+ "CDRP invalid or unknown cmd received: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_TIMEOUT:
+ dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ default:
+ dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
+ cmd->rsp.cmd);
+ }
} else if (rsp == QLCNIC_CDRP_RSP_OK) {
cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
if (cmd->rsp.arg2)
@@ -237,6 +264,9 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLCNIC_CAP0_LRO_MSS;
+
prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
msix_handler);
prq->txrx_sds_binding = nsds_rings - 1;
@@ -954,9 +984,6 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
- } else {
- dev_info(&adapter->pdev->dev,
- "%s: Get mac stats failed =%d.\n", __func__, err);
}
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 6ced319..28a6b28 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -588,6 +588,7 @@ enum {
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 799fd40..0bcda9c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1488,8 +1488,6 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb_checksum_none_assert(skb);
}
- skb->dev = adapter->netdev;
-
buffer->skb = NULL;
return skb;
@@ -1653,6 +1651,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
length = skb->len;
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid);
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 46e77a2..212c121 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -479,7 +479,7 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pfn = pci_info[i].id;
- if (pfn > QLCNIC_MAX_PCI_FUNC) {
+ if (pfn >= QLCNIC_MAX_PCI_FUNC) {
ret = QL_STATUS_INVALID_PARAM;
goto err_eswitch;
}
@@ -1136,6 +1136,8 @@ static int
__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int ring;
+ u32 capab2;
+
struct qlcnic_host_rds_ring *rds_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1146,6 +1148,12 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
@@ -1215,6 +1223,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_napi_disable(adapter);
qlcnic_fw_destroy_ctx(adapter);
+ adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
qlcnic_release_tx_buffers(adapter);
@@ -2024,6 +2033,7 @@ qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
vh = (struct vlan_ethhdr *)skb->data;
flags = FLAGS_VLAN_TAGGED;
vlan_tci = vh->h_vlan_TCI;
+ protocol = ntohs(vh->h_vlan_encapsulated_proto);
} else if (vlan_tx_tag_present(skb)) {
flags = FLAGS_VLAN_OOB;
vlan_tci = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 5a639df..a131d7b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,13 +18,15 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.30.00.00-01"
+#define DRV_VERSION "v1.00.00.31"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
#define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID_8012 0x8012
#define QLGE_DEVICE_ID_8000 0x8000
+#define QLGE_MEZZ_SSYS_ID_068 0x0068
+#define QLGE_MEZZ_SSYS_ID_180 0x0180
#define MAX_CPUS 8
#define MAX_TX_RINGS MAX_CPUS
#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
@@ -1397,7 +1399,6 @@ struct tx_ring {
struct tx_ring_desc *q; /* descriptor list for the queue */
spinlock_t lock;
atomic_t tx_count; /* counts down for every outstanding IO */
- atomic_t queue_stopped; /* Turns queue off when full. */
struct delayed_work tx_work;
struct ql_adapter *qdev;
u64 tx_packets;
@@ -1535,6 +1536,14 @@ struct nic_stats {
u64 rx_1024_to_1518_pkts;
u64 rx_1519_to_max_pkts;
u64 rx_len_err_pkts;
+ /* Receive Mac Err stats */
+ u64 rx_code_err;
+ u64 rx_oversize_err;
+ u64 rx_undersize_err;
+ u64 rx_preamble_err;
+ u64 rx_frame_len_err;
+ u64 rx_crc_err;
+ u64 rx_err_count;
/*
* These stats come from offset 500h to 5C8h
* in the XGMAC register.
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8e2c2a7..6f316ab 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -35,10 +35,152 @@
#include "qlge.h"
+struct ql_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
+#define QL_OFF(m) offsetof(struct ql_adapter, m)
+
+static const struct ql_stats ql_gstrings_stats[] = {
+ {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
+ {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
+ {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
+ QL_OFF(nic_stats.tx_mcast_pkts)},
+ {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
+ QL_OFF(nic_stats.tx_bcast_pkts)},
+ {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
+ QL_OFF(nic_stats.tx_ucast_pkts)},
+ {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
+ QL_OFF(nic_stats.tx_ctl_pkts)},
+ {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
+ QL_OFF(nic_stats.tx_pause_pkts)},
+ {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
+ QL_OFF(nic_stats.tx_64_pkt)},
+ {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
+ QL_OFF(nic_stats.tx_65_to_127_pkt)},
+ {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
+ QL_OFF(nic_stats.tx_128_to_255_pkt)},
+ {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
+ QL_OFF(nic_stats.tx_256_511_pkt)},
+ {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
+ QL_OFF(nic_stats.tx_512_to_1023_pkt)},
+ {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
+ QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
+ {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
+ QL_OFF(nic_stats.tx_1519_to_max_pkt)},
+ {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
+ QL_OFF(nic_stats.tx_undersize_pkt)},
+ {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
+ QL_OFF(nic_stats.tx_oversize_pkt)},
+ {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
+ {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
+ QL_OFF(nic_stats.rx_bytes_ok)},
+ {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
+ {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
+ QL_OFF(nic_stats.rx_pkts_ok)},
+ {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
+ QL_OFF(nic_stats.rx_bcast_pkts)},
+ {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
+ QL_OFF(nic_stats.rx_mcast_pkts)},
+ {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
+ QL_OFF(nic_stats.rx_ucast_pkts)},
+ {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
+ QL_OFF(nic_stats.rx_undersize_pkts)},
+ {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
+ QL_OFF(nic_stats.rx_oversize_pkts)},
+ {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
+ QL_OFF(nic_stats.rx_jabber_pkts)},
+ {"rx_undersize_fcerr_pkts",
+ QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
+ QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
+ {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
+ QL_OFF(nic_stats.rx_drop_events)},
+ {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
+ QL_OFF(nic_stats.rx_fcerr_pkts)},
+ {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
+ QL_OFF(nic_stats.rx_align_err)},
+ {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
+ QL_OFF(nic_stats.rx_symbol_err)},
+ {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
+ QL_OFF(nic_stats.rx_mac_err)},
+ {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
+ QL_OFF(nic_stats.rx_ctl_pkts)},
+ {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
+ QL_OFF(nic_stats.rx_pause_pkts)},
+ {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
+ QL_OFF(nic_stats.rx_64_pkts)},
+ {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
+ QL_OFF(nic_stats.rx_65_to_127_pkts)},
+ {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
+ QL_OFF(nic_stats.rx_128_255_pkts)},
+ {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
+ QL_OFF(nic_stats.rx_256_511_pkts)},
+ {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
+ QL_OFF(nic_stats.rx_512_to_1023_pkts)},
+ {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
+ QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
+ {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
+ QL_OFF(nic_stats.rx_1519_to_max_pkts)},
+ {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
+ QL_OFF(nic_stats.rx_len_err_pkts)},
+ {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
+ QL_OFF(nic_stats.rx_code_err)},
+ {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
+ QL_OFF(nic_stats.rx_oversize_err)},
+ {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
+ QL_OFF(nic_stats.rx_undersize_err)},
+ {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
+ QL_OFF(nic_stats.rx_preamble_err)},
+ {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
+ QL_OFF(nic_stats.rx_frame_len_err)},
+ {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
+ QL_OFF(nic_stats.rx_crc_err)},
+ {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
+ QL_OFF(nic_stats.rx_err_count)},
+ {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
+ {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
+ {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
+ {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
+ {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
+ {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
+ {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
+ {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
+ {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
+ {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
+ {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
+ {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
+ {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
+ {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
+ {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
+ {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
+ {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
+ QL_OFF(nic_stats.rx_nic_fifo_drop)},
+};
+
static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
"Loopback test (offline)"
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
@@ -183,73 +325,19 @@ quit:
QL_DUMP_STAT(qdev);
}
-static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
- {"tx_pkts"},
- {"tx_bytes"},
- {"tx_mcast_pkts"},
- {"tx_bcast_pkts"},
- {"tx_ucast_pkts"},
- {"tx_ctl_pkts"},
- {"tx_pause_pkts"},
- {"tx_64_pkts"},
- {"tx_65_to_127_pkts"},
- {"tx_128_to_255_pkts"},
- {"tx_256_511_pkts"},
- {"tx_512_to_1023_pkts"},
- {"tx_1024_to_1518_pkts"},
- {"tx_1519_to_max_pkts"},
- {"tx_undersize_pkts"},
- {"tx_oversize_pkts"},
- {"rx_bytes"},
- {"rx_bytes_ok"},
- {"rx_pkts"},
- {"rx_pkts_ok"},
- {"rx_bcast_pkts"},
- {"rx_mcast_pkts"},
- {"rx_ucast_pkts"},
- {"rx_undersize_pkts"},
- {"rx_oversize_pkts"},
- {"rx_jabber_pkts"},
- {"rx_undersize_fcerr_pkts"},
- {"rx_drop_events"},
- {"rx_fcerr_pkts"},
- {"rx_align_err"},
- {"rx_symbol_err"},
- {"rx_mac_err"},
- {"rx_ctl_pkts"},
- {"rx_pause_pkts"},
- {"rx_64_pkts"},
- {"rx_65_to_127_pkts"},
- {"rx_128_255_pkts"},
- {"rx_256_511_pkts"},
- {"rx_512_to_1023_pkts"},
- {"rx_1024_to_1518_pkts"},
- {"rx_1519_to_max_pkts"},
- {"rx_len_err_pkts"},
- {"tx_cbfc_pause_frames0"},
- {"tx_cbfc_pause_frames1"},
- {"tx_cbfc_pause_frames2"},
- {"tx_cbfc_pause_frames3"},
- {"tx_cbfc_pause_frames4"},
- {"tx_cbfc_pause_frames5"},
- {"tx_cbfc_pause_frames6"},
- {"tx_cbfc_pause_frames7"},
- {"rx_cbfc_pause_frames0"},
- {"rx_cbfc_pause_frames1"},
- {"rx_cbfc_pause_frames2"},
- {"rx_cbfc_pause_frames3"},
- {"rx_cbfc_pause_frames4"},
- {"rx_cbfc_pause_frames5"},
- {"rx_cbfc_pause_frames6"},
- {"rx_cbfc_pause_frames7"},
- {"rx_nic_fifo_drop"},
-};
-
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
+ int index;
switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
case ETH_SS_STATS:
- memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
+ for (index = 0; index < QLGE_STATS_LEN; index++) {
+ memcpy(buf + index * ETH_GSTRING_LEN,
+ ql_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
break;
}
}
@@ -260,7 +348,7 @@ static int ql_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_TEST:
return QLGE_TEST_LEN;
case ETH_SS_STATS:
- return ARRAY_SIZE(ql_stats_str_arr);
+ return QLGE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -271,69 +359,17 @@ ql_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- struct nic_stats *s = &qdev->nic_stats;
+ int index, length;
+ length = QLGE_STATS_LEN;
ql_update_stats(qdev);
- *data++ = s->tx_pkts;
- *data++ = s->tx_bytes;
- *data++ = s->tx_mcast_pkts;
- *data++ = s->tx_bcast_pkts;
- *data++ = s->tx_ucast_pkts;
- *data++ = s->tx_ctl_pkts;
- *data++ = s->tx_pause_pkts;
- *data++ = s->tx_64_pkt;
- *data++ = s->tx_65_to_127_pkt;
- *data++ = s->tx_128_to_255_pkt;
- *data++ = s->tx_256_511_pkt;
- *data++ = s->tx_512_to_1023_pkt;
- *data++ = s->tx_1024_to_1518_pkt;
- *data++ = s->tx_1519_to_max_pkt;
- *data++ = s->tx_undersize_pkt;
- *data++ = s->tx_oversize_pkt;
- *data++ = s->rx_bytes;
- *data++ = s->rx_bytes_ok;
- *data++ = s->rx_pkts;
- *data++ = s->rx_pkts_ok;
- *data++ = s->rx_bcast_pkts;
- *data++ = s->rx_mcast_pkts;
- *data++ = s->rx_ucast_pkts;
- *data++ = s->rx_undersize_pkts;
- *data++ = s->rx_oversize_pkts;
- *data++ = s->rx_jabber_pkts;
- *data++ = s->rx_undersize_fcerr_pkts;
- *data++ = s->rx_drop_events;
- *data++ = s->rx_fcerr_pkts;
- *data++ = s->rx_align_err;
- *data++ = s->rx_symbol_err;
- *data++ = s->rx_mac_err;
- *data++ = s->rx_ctl_pkts;
- *data++ = s->rx_pause_pkts;
- *data++ = s->rx_64_pkts;
- *data++ = s->rx_65_to_127_pkts;
- *data++ = s->rx_128_255_pkts;
- *data++ = s->rx_256_511_pkts;
- *data++ = s->rx_512_to_1023_pkts;
- *data++ = s->rx_1024_to_1518_pkts;
- *data++ = s->rx_1519_to_max_pkts;
- *data++ = s->rx_len_err_pkts;
- *data++ = s->tx_cbfc_pause_frames0;
- *data++ = s->tx_cbfc_pause_frames1;
- *data++ = s->tx_cbfc_pause_frames2;
- *data++ = s->tx_cbfc_pause_frames3;
- *data++ = s->tx_cbfc_pause_frames4;
- *data++ = s->tx_cbfc_pause_frames5;
- *data++ = s->tx_cbfc_pause_frames6;
- *data++ = s->tx_cbfc_pause_frames7;
- *data++ = s->rx_cbfc_pause_frames0;
- *data++ = s->rx_cbfc_pause_frames1;
- *data++ = s->rx_cbfc_pause_frames2;
- *data++ = s->rx_cbfc_pause_frames3;
- *data++ = s->rx_cbfc_pause_frames4;
- *data++ = s->rx_cbfc_pause_frames5;
- *data++ = s->rx_cbfc_pause_frames6;
- *data++ = s->rx_cbfc_pause_frames7;
- *data++ = s->rx_nic_fifo_drop;
+ for (index = 0; index < length; index++) {
+ char *p = (char *)qdev +
+ ql_gstrings_stats[index].stat_offset;
+ *data++ = (ql_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
+ }
}
static int ql_get_settings(struct net_device *ndev,
@@ -388,30 +424,33 @@ static void ql_get_drvinfo(struct net_device *ndev,
static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- /* What we support. */
- wol->supported = WAKE_MAGIC;
- /* What we've currently got set. */
- wol->wolopts = qdev->wol;
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
+ ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = qdev->wol;
+ }
}
static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
+ ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+ netif_info(qdev, drv, qdev->ndev,
+ "WOL is only supported for mezz card\n");
+ return -EOPNOTSUPP;
+ }
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
qdev->wol = wol->wolopts;
netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
- if (!qdev->wol) {
- u32 wol = 0;
- status = ql_mb_wol_mode(qdev, wol);
- netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
- status == 0 ? "cleared successfully" : "clear failed",
- wol);
- }
-
return 0;
}
@@ -528,6 +567,8 @@ static void ql_self_test(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
+
if (netif_running(ndev)) {
set_bit(QL_SELFTEST, &qdev->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 09d8d33..3769f57 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1433,6 +1433,36 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Categorizing receive firmware frame errors */
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+{
+ struct nic_stats *stats = &qdev->nic_stats;
+
+ stats->rx_err_count++;
+
+ switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
+ case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
+ stats->rx_code_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
+ stats->rx_oversize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
+ stats->rx_undersize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
+ stats->rx_preamble_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
+ stats->rx_frame_len_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_CRC:
+ stats->rx_crc_err++;
+ default:
+ break;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
@@ -1499,15 +1529,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- rx_ring->rx_errors++;
- goto err_out;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1546,7 +1567,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct iphdr *iph =
(struct iphdr *) ((u8 *)addr + ETH_HLEN);
if (!(iph->frag_off &
- cpu_to_be16(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1593,15 +1614,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- dev_kfree_skb_any(skb);
- rx_ring->rx_errors++;
- return;
- }
-
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
@@ -1619,7 +1631,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
prefetch(skb->data);
- skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%s Multicast.\n",
@@ -1654,7 +1665,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
- ntohs(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1908,15 +1919,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return;
}
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- dev_kfree_skb_any(skb);
- rx_ring->rx_errors++;
- return;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1934,7 +1936,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
}
prefetch(skb->data);
- skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -1968,7 +1969,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
- ntohs(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
@@ -1999,6 +2000,12 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
+ return (unsigned long)length;
+ }
+
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
@@ -2173,8 +2180,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
- if (atomic_read(&tx_ring->queue_stopped) &&
- (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
@@ -2558,10 +2564,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_info(qdev, tx_queued, qdev->ndev,
- "%s: shutting down tx queue %d du to lack of resources.\n",
+ "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
__func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
- atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -2612,6 +2617,16 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
+
+ if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+ netif_stop_subqueue(ndev, tx_ring->wq_id);
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ /*
+ * The queue got stopped because the tx_ring was full.
+ * Wake it up, because it's now at least 25% empty.
+ */
+ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+ }
return NETDEV_TX_OK;
}
@@ -2680,7 +2695,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
tx_ring_desc++;
}
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
- atomic_set(&tx_ring->queue_stopped, 0);
}
static void ql_free_tx_resources(struct ql_adapter *qdev,
@@ -2703,10 +2717,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
&tx_ring->wq_base_dma);
if ((tx_ring->wq_base == NULL) ||
- tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
- netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
- return -ENOMEM;
- }
+ tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
+ goto pci_alloc_err;
+
tx_ring->q =
kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
if (tx_ring->q == NULL)
@@ -2716,6 +2729,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
err:
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
+ tx_ring->wq_base = NULL;
+pci_alloc_err:
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -4649,7 +4665,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
int err = 0;
ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
- min(MAX_CPUS, (int)num_online_cpus()));
+ min(MAX_CPUS, netif_get_num_default_rss_queues()));
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4de7364..557a265 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1096,20 +1096,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
- goto err_out;
+ goto err_out_disable_dev;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "32-bit PCI DMA addresses"
"not supported by the card\n");
- goto err_out;
+ goto err_out_disable_dev;
}
/* IO Size check */
if (pci_resource_len(pdev, bar) < io_size) {
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
err = -EIO;
- goto err_out;
+ goto err_out_disable_dev;
}
pci_set_master(pdev);
@@ -1117,7 +1117,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct r6040_private));
if (!dev) {
err = -ENOMEM;
- goto err_out;
+ goto err_out_disable_dev;
}
SET_NETDEV_DEV(dev, &pdev->dev);
lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@ err_out_mdio_irq:
err_out_mdio:
mdiobus_free(lp->mii_bus);
err_out_unmap:
+ netif_napi_del(&lp->napi);
+ pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
err_out_free_dev:
free_netdev(dev);
+err_out_disable_dev:
+ pci_disable_device(pdev);
err_out:
return err;
}
@@ -1251,6 +1255,8 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
mdiobus_unregister(lp->mii_bus);
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
+ netif_napi_del(&lp->napi);
+ pci_iounmap(pdev, lp->base);
pci_release_regions(pdev);
free_netdev(dev);
pci_disable_device(pdev);
@@ -1271,17 +1277,4 @@ static struct pci_driver r6040_driver = {
.remove = __devexit_p(r6040_remove_one),
};
-
-static int __init r6040_init(void)
-{
- return pci_register_driver(&r6040_driver);
-}
-
-
-static void __exit r6040_cleanup(void)
-{
- pci_unregister_driver(&r6040_driver);
-}
-
-module_init(r6040_init);
-module_exit(r6040_cleanup);
+module_pci_driver(r6040_driver);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5eef290..995d0cf 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp)
cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
@@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp)
cpw8(Config5, cpr8(Config5) & PMEStatus);
- cpw32_f(HiTxRingAddr, 0);
- cpw32_f(HiTxRingAddr + 4, 0);
-
- ring_dma = cp->ring_dma;
- cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
- ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
- cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
cpw16(MultiIntr, 0);
cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
static void eeprom_cmd_end(void __iomem *ee_addr)
{
- writeb (~EE_CS, ee_addr);
+ writeb(0, ee_addr);
eeprom_delay ();
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 03df076..1d83565 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
}
/* Terminate the EEPROM access. */
- RTL_W8 (Cfg9346, ~EE_CS);
+ RTL_W8(Cfg9346, 0);
eeprom_delay ();
return retval;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 00b4f56..b47d5b3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -46,6 +46,8 @@
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
+#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
+#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
#ifdef RTL8169_DEBUG
#define assert(expr) \
@@ -141,6 +143,9 @@ enum mac_version {
RTL_GIGA_MAC_VER_36,
RTL_GIGA_MAC_VER_37,
RTL_GIGA_MAC_VER_38,
+ RTL_GIGA_MAC_VER_39,
+ RTL_GIGA_MAC_VER_40,
+ RTL_GIGA_MAC_VER_41,
RTL_GIGA_MAC_NONE = 0xff,
};
@@ -259,6 +264,14 @@ static const struct {
[RTL_GIGA_MAC_VER_38] =
_R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_39] =
+ _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
+ JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_40] =
+ _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_41] =
+ _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
};
#undef _R
@@ -389,8 +402,12 @@ enum rtl8168_8101_registers {
TWSI = 0xd2,
MCU = 0xd3,
#define NOW_IS_OOB (1 << 7)
+#define TX_EMPTY (1 << 5)
+#define RX_EMPTY (1 << 4)
+#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
#define EN_NDP (1 << 3)
#define EN_OOB_RESET (1 << 2)
+#define LINK_LIST_RDY (1 << 1)
EFUSEAR = 0xdc,
#define EFUSEAR_FLAG 0x80000000
#define EFUSEAR_WRITE_CMD 0x80000000
@@ -416,6 +433,7 @@ enum rtl8168_registers {
#define ERIAR_MASK_SHIFT 12
#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
EPHY_RXER_NUM = 0x7c,
OCPDR = 0xb0, /* OCP GPHY access */
@@ -428,10 +446,14 @@ enum rtl8168_registers {
#define OCPAR_FLAG 0x80000000
#define OCPAR_GPHY_WRITE_CMD 0x8000f060
#define OCPAR_GPHY_READ_CMD 0x0000f060
+ GPHY_OCP = 0xb8,
RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
MISC = 0xf0, /* 8168e only. */
#define TXPLA_RST (1 << 29)
+#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
#define PWM_EN (1 << 22)
+#define RXDV_GATED_EN (1 << 19)
+#define EARLY_TALLY_EN (1 << 16)
};
enum rtl_register_content {
@@ -721,8 +743,8 @@ struct rtl8169_private {
u16 event_slow;
struct mdio_ops {
- void (*write)(void __iomem *, int, int);
- int (*read)(void __iomem *, int);
+ void (*write)(struct rtl8169_private *, int, int);
+ int (*read)(struct rtl8169_private *, int);
} mdio_ops;
struct pll_power_ops {
@@ -736,8 +758,8 @@ struct rtl8169_private {
} jumbo_ops;
struct csi_ops {
- void (*write)(void __iomem *, int, int);
- u32 (*read)(void __iomem *, int);
+ void (*write)(struct rtl8169_private *, int, int);
+ u32 (*read)(struct rtl8169_private *, int);
} csi_ops;
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
@@ -774,6 +796,8 @@ struct rtl8169_private {
} phy_action;
} *rtl_fw;
#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
+
+ u32 ocp_base;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -794,6 +818,8 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1);
MODULE_FIRMWARE(FIRMWARE_8168F_2);
MODULE_FIRMWARE(FIRMWARE_8402_1);
MODULE_FIRMWARE(FIRMWARE_8411_1);
+MODULE_FIRMWARE(FIRMWARE_8106E_1);
+MODULE_FIRMWARE(FIRMWARE_8168G_1);
static void rtl_lock_work(struct rtl8169_private *tp)
{
@@ -818,47 +844,114 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
}
}
+struct rtl_cond {
+ bool (*check)(struct rtl8169_private *);
+ const char *msg;
+};
+
+static void rtl_udelay(unsigned int d)
+{
+ udelay(d);
+}
+
+static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
+ void (*delay)(unsigned int), unsigned int d, int n,
+ bool high)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ delay(d);
+ if (c->check(tp) == high)
+ return true;
+ }
+ netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
+ c->msg, !high, n, d);
+ return false;
+}
+
+static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
+}
+
+static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
+}
+
+static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, msleep, d, n, true);
+}
+
+static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, msleep, d, n, false);
+}
+
+#define DECLARE_RTL_COND(name) \
+static bool name ## _check(struct rtl8169_private *); \
+ \
+static const struct rtl_cond name = { \
+ .check = name ## _check, \
+ .msg = #name \
+}; \
+ \
+static bool name ## _check(struct rtl8169_private *tp)
+
+DECLARE_RTL_COND(rtl_ocpar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(OCPAR) & OCPAR_FLAG;
+}
+
static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- for (i = 0; i < 20; i++) {
- udelay(100);
- if (RTL_R32(OCPAR) & OCPAR_FLAG)
- break;
- }
- return RTL_R32(OCPDR);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
+ RTL_R32(OCPDR) : ~0;
}
static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W32(OCPDR, data);
RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- for (i = 0; i < 20; i++) {
- udelay(100);
- if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
- break;
- }
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
+}
+
+DECLARE_RTL_COND(rtl_eriar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(ERIAR) & ERIAR_FLAG;
}
static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W8(ERIDR, cmd);
RTL_W32(ERIAR, 0x800010e8);
msleep(2);
- for (i = 0; i < 5; i++) {
- udelay(100);
- if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
- break;
- }
+
+ if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
+ return;
ocp_write(tp, 0x1, 0x30, 0x00000001);
}
@@ -872,36 +965,27 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
}
-static void rtl8168_driver_start(struct rtl8169_private *tp)
+DECLARE_RTL_COND(rtl_ocp_read_cond)
{
u16 reg;
- int i;
-
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
reg = rtl8168_get_ocp_reg(tp);
- for (i = 0; i < 10; i++) {
- msleep(10);
- if (ocp_read(tp, 0x0f, reg) & 0x00000800)
- break;
- }
+ return ocp_read(tp, 0x0f, reg) & 0x00000800;
}
-static void rtl8168_driver_stop(struct rtl8169_private *tp)
+static void rtl8168_driver_start(struct rtl8169_private *tp)
{
- u16 reg;
- int i;
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+ rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
+}
- reg = rtl8168_get_ocp_reg(tp);
+static void rtl8168_driver_stop(struct rtl8169_private *tp)
+{
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- for (i = 0; i < 10; i++) {
- msleep(10);
- if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
- break;
- }
+ rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
}
static int r8168dp_check_dash(struct rtl8169_private *tp)
@@ -911,21 +995,114 @@ static int r8168dp_check_dash(struct rtl8169_private *tp)
return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
}
-static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
{
- int i;
+ if (reg & 0xffff0001) {
+ netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
+ return true;
+ }
+ return false;
+}
- RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
+DECLARE_RTL_COND(rtl_ocp_gphy_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
- for (i = 20; i > 0; i--) {
- /*
- * Check if the RTL8169 has completed writing to the specified
- * MII register.
- */
- if (!(RTL_R32(PHYAR) & 0x80000000))
- break;
- udelay(25);
+ return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
+}
+
+static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return;
+
+ RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
+}
+
+static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return 0;
+
+ RTL_W32(GPHY_OCP, reg << 15);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
+ (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
+}
+
+static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
+{
+ int val;
+
+ val = r8168_phy_ocp_read(tp, reg);
+ r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
+}
+
+static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return;
+
+ RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
+}
+
+static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return 0;
+
+ RTL_W32(OCPDR, reg << 15);
+
+ return RTL_R32(OCPDR);
+}
+
+#define OCP_STD_PHY_BASE 0xa400
+
+static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
+{
+ if (reg == 0x1f) {
+ tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
+ return;
}
+
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
+ r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
+}
+
+static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
+{
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
+ return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
+}
+
+DECLARE_RTL_COND(rtl_phyar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(PHYAR) & 0x80000000;
+}
+
+static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
+
+ rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
* According to hardware specs a 20us delay is required after write
* complete indication, but before sending next command.
@@ -933,23 +1110,16 @@ static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
udelay(20);
}
-static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
{
- int i, value = -1;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int value;
- RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
+ RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
+
+ value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
+ RTL_R32(PHYAR) & 0xffff : ~0;
- for (i = 20; i > 0; i--) {
- /*
- * Check if the RTL8169 has completed retrieving data from
- * the specified MII register.
- */
- if (RTL_R32(PHYAR) & 0x80000000) {
- value = RTL_R32(PHYAR) & 0xffff;
- break;
- }
- udelay(25);
- }
/*
* According to hardware specs a 20us delay is required after read
* complete indication, but before sending next command.
@@ -959,45 +1129,35 @@ static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
+static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
{
- int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W32(OCPDR, data |
- ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
+ RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
RTL_W32(EPHY_RXER_NUM, 0);
- for (i = 0; i < 100; i++) {
- mdelay(1);
- if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
- break;
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
-static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
- r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
- (value & OCPDR_DATA_MASK));
+ r8168dp_1_mdio_access(tp, reg,
+ OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
}
-static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
{
- int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
+ r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
mdelay(1);
RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
RTL_W32(EPHY_RXER_NUM, 0);
- for (i = 0; i < 100; i++) {
- mdelay(1);
- if (RTL_R32(OCPAR) & OCPAR_FLAG)
- break;
- }
-
- return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
+ RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
}
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
@@ -1012,22 +1172,25 @@ static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
}
-static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
r8168dp_2_mdio_start(ioaddr);
- r8169_mdio_write(ioaddr, reg_addr, value);
+ r8169_mdio_write(tp, reg, value);
r8168dp_2_mdio_stop(ioaddr);
}
-static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
int value;
r8168dp_2_mdio_start(ioaddr);
- value = r8169_mdio_read(ioaddr, reg_addr);
+ value = r8169_mdio_read(tp, reg);
r8168dp_2_mdio_stop(ioaddr);
@@ -1036,12 +1199,12 @@ static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
{
- tp->mdio_ops.write(tp->mmio_addr, location, val);
+ tp->mdio_ops.write(tp, location, val);
}
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
- return tp->mdio_ops.read(tp->mmio_addr, location);
+ return tp->mdio_ops.read(tp, location);
}
static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
@@ -1072,79 +1235,64 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
return rtl_readphy(tp, location);
}
-static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
+DECLARE_RTL_COND(rtl_ephyar_cond)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(EPHYAR) & EPHYAR_FLAG;
+}
+
+static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
+
+ udelay(10);
}
-static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
+static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
- u16 value = 0xffff;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
- value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
+ RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
-static
-void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
BUG_ON((addr & 3) || (mask == 0));
RTL_W32(ERIDR, val);
RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
- break;
- udelay(100);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
-static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(ERIAR) & ERIAR_FLAG) {
- value = RTL_R32(ERIDR);
- break;
- }
- udelay(100);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ RTL_R32(ERIDR) : ~0;
}
-static void
-rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
+static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
+ u32 m, int type)
{
u32 val;
- val = rtl_eri_read(ioaddr, addr, type);
- rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
+ val = rtl_eri_read(tp, addr, type);
+ rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
}
struct exgmac_reg {
@@ -1153,31 +1301,30 @@ struct exgmac_reg {
u32 val;
};
-static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
const struct exgmac_reg *r, int len)
{
while (len-- > 0) {
- rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
r++;
}
}
-static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
+DECLARE_RTL_COND(rtl_efusear_cond)
{
- u8 value = 0xff;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+ return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
+}
- for (i = 0; i < 300; i++) {
- if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
- value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
- break;
- }
- udelay(100);
- }
+static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
- return value;
+ RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
+ RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
static u16 rtl_get_events(struct rtl8169_private *tp)
@@ -1276,48 +1423,48 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (RTL_R8(PHYstatus) & _1000bpsF) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x00000011, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else if (RTL_R8(PHYstatus) & _100bps) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x0000003f, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
+ ERIAR_EXGMAC);
}
/* Reset packet filter */
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
if (RTL_R8(PHYstatus) & _1000bpsF) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x00000011, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x0000003f, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
+ ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
if (RTL_R8(PHYstatus) & _10bps) {
- rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
- 0x4d02, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
- 0x0060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
- 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
+ ERIAR_EXGMAC);
}
}
}
@@ -1784,6 +1931,13 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
}
}
+DECLARE_RTL_COND(rtl_counters_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(CounterAddrLow) & CounterDump;
+}
+
static void rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1792,7 +1946,6 @@ static void rtl8169_update_counters(struct net_device *dev)
struct rtl8169_counters *counters;
dma_addr_t paddr;
u32 cmd;
- int wait = 1000;
/*
* Some chips are unable to dump tally counters when the receiver
@@ -1810,13 +1963,8 @@ static void rtl8169_update_counters(struct net_device *dev)
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | CounterDump);
- while (wait--) {
- if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
- memcpy(&tp->counters, counters, sizeof(*counters));
- break;
- }
- udelay(10);
- }
+ if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
+ memcpy(&tp->counters, counters, sizeof(*counters));
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
@@ -1894,6 +2042,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
u32 val;
int mac_version;
} mac_info[] = {
+ /* 8168G family. */
+ { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
+ { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
+
/* 8168F family. */
{ 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
{ 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
@@ -1933,6 +2085,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
{ 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
+ { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
+ { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
{ 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
{ 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
{ 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
@@ -2186,7 +2340,7 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index -= regno;
break;
case PHY_READ_EFUSE:
- predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ predata = rtl8168d_efuse_read(tp, regno);
index++;
break;
case PHY_CLEAR_READCOUNT:
@@ -2626,7 +2780,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 }
};
- void __iomem *ioaddr = tp->mmio_addr;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
@@ -2638,7 +2791,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
- if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
@@ -2738,11 +2891,10 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 }
};
- void __iomem *ioaddr = tp->mmio_addr;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
- if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
@@ -3010,8 +3162,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* EEE setting */
- rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3115,7 +3266,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct phy_reg phy_reg_init[] = {
/* Channel estimation fine tune */
{ 0x1f, 0x0003 },
@@ -3189,7 +3339,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* eee setting */
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3211,6 +3361,55 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const u16 mac_ocp_patch[] = {
+ 0xe008, 0xe01b, 0xe01d, 0xe01f,
+ 0xe021, 0xe023, 0xe025, 0xe027,
+ 0x49d2, 0xf10d, 0x766c, 0x49e2,
+ 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
+
+ 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
+ 0xc707, 0x8ee1, 0x9d6c, 0xc603,
+ 0xbe00, 0xb416, 0x0076, 0xe86c,
+ 0xc602, 0xbe00, 0x0000, 0xc602,
+
+ 0xbe00, 0x0000, 0xc602, 0xbe00,
+ 0x0000, 0xc602, 0xbe00, 0x0000,
+ 0xc602, 0xbe00, 0x0000, 0xc602,
+ 0xbe00, 0x0000, 0xc602, 0xbe00,
+
+ 0x0000, 0x0000, 0x0000, 0x0000
+ };
+ u32 i;
+
+ /* Patch code for GPHY reset */
+ for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
+ r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
+ r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
+ r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
+
+ rtl_apply_firmware(tp);
+
+ if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
+ else
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
+
+ if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
+ rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
+ else
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
+
+ rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
+ rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
+
+ r8168_phy_ocp_write(tp, 0xa436, 0x8012);
+ rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
+
+ rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3256,8 +3455,6 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
/* Disable ALDPS before setting firmware */
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x18, 0x0310);
@@ -3266,13 +3463,35 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
/* EEE setting */
- rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0004 },
+ { 0x10, 0xc07f },
+ { 0x19, 0x7030 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+
+ rtl_apply_firmware(tp);
+
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+}
+
static void rtl_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3369,6 +3588,15 @@ static void rtl_hw_phy_config(struct net_device *dev)
rtl8411_hw_phy_config(tp);
break;
+ case RTL_GIGA_MAC_VER_39:
+ rtl8106e_hw_phy_config(tp);
+ break;
+
+ case RTL_GIGA_MAC_VER_40:
+ rtl8168g_1_hw_phy_config(tp);
+ break;
+
+ case RTL_GIGA_MAC_VER_41:
default:
break;
}
@@ -3426,18 +3654,16 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
free_netdev(dev);
}
+DECLARE_RTL_COND(rtl_phy_reset_cond)
+{
+ return tp->phy_reset_pending(tp);
+}
+
static void rtl8169_phy_reset(struct net_device *dev,
struct rtl8169_private *tp)
{
- unsigned int i;
-
tp->phy_reset_enable(tp);
- for (i = 0; i < 100; i++) {
- if (!tp->phy_reset_pending(tp))
- return;
- msleep(1);
- }
- netif_err(tp, link, dev, "PHY reset failed\n");
+ rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
}
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
@@ -3512,7 +3738,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
low >> 16 },
};
- rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -3589,6 +3815,11 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
ops->write = r8168dp_2_mdio_write;
ops->read = r8168dp_2_mdio_read;
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ ops->write = r8168g_mdio_write;
+ ops->read = r8168g_mdio_read;
+ break;
default:
ops->write = r8169_mdio_write;
ops->read = r8169_mdio_read;
@@ -3608,6 +3839,9 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
RTL_W32(RxConfig, RTL_R32(RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3761,7 +3995,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
- rtl_ephy_write(ioaddr, 0x19, 0xff64);
+ rtl_ephy_write(tp, 0x19, 0xff64);
if (rtl_wol_pll_power_down(tp))
return;
@@ -3830,6 +4064,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_29:
case RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
ops->down = r810x_pll_power_down;
ops->up = r810x_pll_power_up;
break;
@@ -3855,6 +4090,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_35:
case RTL_GIGA_MAC_VER_36:
case RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -3894,6 +4131,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_22:
case RTL_GIGA_MAC_VER_23:
case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_34:
RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
default:
@@ -4050,6 +4288,8 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
* No action needed for jumbo frames with 8169.
* No jumbo for 810x at all.
*/
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
default:
ops->disable = NULL;
ops->enable = NULL;
@@ -4057,20 +4297,20 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
}
}
+DECLARE_RTL_COND(rtl_chipcmd_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(ChipCmd) & CmdReset;
+}
+
static void rtl_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
- /* Soft reset the chip. */
RTL_W8(ChipCmd, CmdReset);
- /* Check that the chip has finished the reset. */
- for (i = 0; i < 100; i++) {
- if ((RTL_R8(ChipCmd) & CmdReset) == 0)
- break;
- udelay(100);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -4124,6 +4364,20 @@ static void rtl_rx_close(struct rtl8169_private *tp)
RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
}
+DECLARE_RTL_COND(rtl_npq_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(TxPoll) & NPQ;
+}
+
+DECLARE_RTL_COND(rtl_txcfg_empty_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(TxConfig) & TXCFG_EMPTY;
+}
+
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -4136,16 +4390,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
tp->mac_version == RTL_GIGA_MAC_VER_28 ||
tp->mac_version == RTL_GIGA_MAC_VER_31) {
- while (RTL_R8(TxPoll) & NPQ)
- udelay(20);
+ rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36 ||
tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_40 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_41 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
- while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
- udelay(100);
+ rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
} else {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
udelay(100);
@@ -4351,15 +4605,12 @@ static void rtl_hw_start_8169(struct net_device *dev)
static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
{
if (tp->csi_ops.write)
- tp->csi_ops.write(tp->mmio_addr, addr, value);
+ tp->csi_ops.write(tp, addr, value);
}
static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
{
- if (tp->csi_ops.read)
- return tp->csi_ops.read(tp->mmio_addr, addr);
- else
- return ~0;
+ return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
}
static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
@@ -4380,73 +4631,56 @@ static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
rtl_csi_access_enable(tp, 0x27000000);
}
-static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
+DECLARE_RTL_COND(rtl_csiar_cond)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(CSIAR) & CSIAR_FLAG;
+}
+
+static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
+static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(CSIAR) & CSIAR_FLAG) {
- value = RTL_R32(CSIDR);
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
}
-static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
+static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
CSIAR_FUNC_NIC);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
+static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(CSIAR) & CSIAR_FLAG) {
- value = RTL_R32(CSIDR);
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
}
static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
@@ -4491,13 +4725,14 @@ struct ephy_info {
u16 bits;
};
-static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
+static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
+ int len)
{
u16 w;
while (len-- > 0) {
- w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
- rtl_ephy_write(ioaddr, e->offset, w);
+ w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
+ rtl_ephy_write(tp, e->offset, w);
e++;
}
}
@@ -4581,7 +4816,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168cp[] = {
{ 0x01, 0, 0x0001 },
{ 0x02, 0x0800, 0x1000 },
@@ -4592,7 +4826,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+ rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
__rtl_hw_start_8168cp(tp);
}
@@ -4643,14 +4877,13 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
- rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+ rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
__rtl_hw_start_8168cp(tp);
}
static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168c_2[] = {
{ 0x01, 0, 0x0001 },
{ 0x03, 0x0400, 0x0220 }
@@ -4658,7 +4891,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+ rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
__rtl_hw_start_8168cp(tp);
}
@@ -4726,8 +4959,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
const struct ephy_info *e = e_info_8168d_4 + i;
u16 w;
- w = rtl_ephy_read(ioaddr, e->offset);
- rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
+ w = rtl_ephy_read(tp, e->offset);
+ rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
}
rtl_enable_clock_request(pdev);
@@ -4755,7 +4988,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
+ rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
@@ -4781,19 +5014,18 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+ rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -4819,16 +5051,16 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -4853,10 +5085,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */
RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
@@ -4864,7 +5095,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
static void rtl_hw_start_8411(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168f_1[] = {
{ 0x06, 0x00c0, 0x0020 },
{ 0x0f, 0xffff, 0x5200 },
@@ -4874,10 +5104,39 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+
+ rtl_csi_access_enable_1(tp);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+ RTL_W8(MaxTxPacketSize, EarlySize);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
}
static void rtl_hw_start_8168(struct net_device *dev)
@@ -4981,6 +5240,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_hw_start_8411(tp);
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_hw_start_8168g_1(tp);
+ break;
+
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
@@ -5035,7 +5299,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+ rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
@@ -5055,7 +5319,7 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
{
rtl_hw_start_8102e_2(tp);
- rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
+ rtl_ephy_write(tp, 0x03, 0xc2f9);
}
static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
@@ -5081,15 +5345,13 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+ rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
rtl_hw_start_8105e_1(tp);
- rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+ rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
}
static void rtl_hw_start_8402(struct rtl8169_private *tp)
@@ -5108,18 +5370,29 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
+ rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8106(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* Force LAN exit from ASPM if Rx/Tx are not idle */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+ RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+ RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
static void rtl_hw_start_8101(struct net_device *dev)
@@ -5166,6 +5439,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
case RTL_GIGA_MAC_VER_37:
rtl_hw_start_8402(tp);
break;
+
+ case RTL_GIGA_MAC_VER_39:
+ rtl_hw_start_8106(tp);
+ break;
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -5379,7 +5656,6 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
tp->cur_tx = tp->dirty_tx = 0;
- netdev_reset_queue(tp->dev);
}
static void rtl_reset_work(struct rtl8169_private *tp)
@@ -5534,8 +5810,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
wmb();
@@ -5632,16 +5906,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-struct rtl_txc {
- int packets;
- int bytes;
-};
-
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
- struct rtl8169_stats *tx_stats = &tp->tx_stats;
unsigned int dirty_tx, tx_left;
- struct rtl_txc txc = { 0, 0 };
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -5660,24 +5927,17 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- struct sk_buff *skb = tx_skb->skb;
-
- txc.packets++;
- txc.bytes += skb->len;
- dev_kfree_skb(skb);
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += tx_skb->skb->len;
+ u64_stats_update_end(&tp->tx_stats.syncp);
+ dev_kfree_skb(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
tx_left--;
}
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets += txc.packets;
- tx_stats->bytes += txc.bytes;
- u64_stats_update_end(&tx_stats->syncp);
-
- netdev_completed_queue(dev, txc.packets, txc.bytes);
-
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
@@ -5889,11 +6149,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
if (status & LinkChg)
__rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
- napi_disable(&tp->napi);
- rtl_irq_disable(tp);
-
- napi_enable(&tp->napi);
- napi_schedule(&tp->napi);
+ rtl_irq_enable_all(tp);
}
static void rtl_task(struct work_struct *work)
@@ -6345,6 +6601,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->wk.work);
+ netif_napi_del(&tp->napi);
+
unregister_netdev(dev);
rtl_release_firmware(tp);
@@ -6436,6 +6694,67 @@ static unsigned rtl_try_msi(struct rtl8169_private *tp,
return msi;
}
+DECLARE_RTL_COND(rtl_link_list_ready_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(MCU) & LINK_LIST_RDY;
+}
+
+DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+}
+
+static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 data;
+
+ tp->ocp_base = OCP_STD_PHY_BASE;
+
+ RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
+ return;
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
+ return;
+
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
+ msleep(1);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ data = r8168_mac_ocp_read(tp, 0xe8de);
+ data &= ~(1 << 14);
+ r8168_mac_ocp_write(tp, 0xe8de, data);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+
+ data = r8168_mac_ocp_read(tp, 0xe8de);
+ data |= (1 << 15);
+ r8168_mac_ocp_write(tp, 0xe8de, data);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+}
+
+static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_hw_init_8168g(tp);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int __devinit
rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -6545,6 +6864,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_irq_disable(tp);
+ rtl_hw_initialize(tp);
+
rtl_hw_reset(tp);
rtl_ack_events(tp, 0xffff);
@@ -6668,6 +6989,7 @@ out:
return rc;
err_out_msi_4:
+ netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
iounmap(ioaddr);
err_out_free_res_3:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index be3c221..af0b867 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -49,6 +49,34 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_ARCH_R8A7740)
+static void sh_eth_select_mii(struct net_device *ndev)
+{
+ u32 value = 0x0;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->phy_interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ value = 0x2;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ value = 0x1;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ value = 0x0;
+ break;
+ default:
+ pr_warn("PHY interface mode was not setup. Set to MII.\n");
+ value = 0x1;
+ break;
+ }
+
+ sh_eth_write(ndev, value, RMII_MII);
+}
+#endif
+
/* There is CPU dependent code */
#if defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SH_ETH_RESET_DEFAULT 1
@@ -102,6 +130,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
#define SH_ETH_HAS_BOTH_MODULES 1
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
+
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -176,23 +206,19 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
}
static int sh_eth_is_gether(struct sh_eth_private *mdp);
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int cnt = 100;
+ int ret = 0;
if (sh_eth_is_gether(mdp)) {
sh_eth_write(ndev, 0x03, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt < 0)
- printk(KERN_ERR "Device reset fail\n");
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -210,6 +236,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
EDMR);
}
+
+out:
+ return ret;
}
static void sh_eth_set_duplex_giga(struct net_device *ndev)
@@ -282,7 +311,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
static void sh_eth_reset_hw_crc(struct net_device *ndev);
+
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -292,35 +323,6 @@ static void sh_eth_chip_reset(struct net_device *ndev)
mdelay(1);
}
-static void sh_eth_reset(struct net_device *ndev)
-{
- int cnt = 100;
-
- sh_eth_write(ndev, EDSR_ENALL, EDSR);
- sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt == 0)
- printk(KERN_ERR "Device reset fail\n");
-
- /* Table Init */
- sh_eth_write(ndev, 0x0, TDLAR);
- sh_eth_write(ndev, 0x0, TDFAR);
- sh_eth_write(ndev, 0x0, TDFXR);
- sh_eth_write(ndev, 0x0, TDFFR);
- sh_eth_write(ndev, 0x0, RDLAR);
- sh_eth_write(ndev, 0x0, RDFAR);
- sh_eth_write(ndev, 0x0, RDFXR);
- sh_eth_write(ndev, 0x0, RDFFR);
-
- /* Reset HW CRC register */
- sh_eth_reset_hw_crc(ndev);
-}
-
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -377,9 +379,41 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tsu = 1,
#if defined(CONFIG_CPU_SUBTYPE_SH7734)
.hw_crc = 1,
+ .select_mii = 1,
#endif
};
+static int sh_eth_reset(struct net_device *ndev)
+{
+ int ret = 0;
+
+ sh_eth_write(ndev, EDSR_ENALL, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+
+ /* Reset HW CRC register */
+ sh_eth_reset_hw_crc(ndev);
+
+ /* Select MII mode */
+ if (sh_eth_my_cpu_data.select_mii)
+ sh_eth_select_mii(ndev);
+out:
+ return ret;
+}
+
static void sh_eth_reset_hw_crc(struct net_device *ndev)
{
if (sh_eth_my_cpu_data.hw_crc)
@@ -388,44 +422,29 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
#elif defined(CONFIG_ARCH_R8A7740)
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
+
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long mii;
/* reset device */
sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
mdelay(1);
- switch (mdp->phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- mii = 2;
- break;
- case PHY_INTERFACE_MODE_MII:
- mii = 1;
- break;
- case PHY_INTERFACE_MODE_RMII:
- default:
- mii = 0;
- break;
- }
- sh_eth_write(ndev, mii, RMII_MII);
+ sh_eth_select_mii(ndev);
}
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
- int cnt = 100;
+ int ret = 0;
sh_eth_write(ndev, EDSR_ENALL, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt == 0)
- printk(KERN_ERR "Device reset fail\n");
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -436,6 +455,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, 0x0, RDFAR);
sh_eth_write(ndev, 0x0, RDFXR);
sh_eth_write(ndev, 0x0, RDFFR);
+
+out:
+ return ret;
}
static void sh_eth_set_duplex(struct net_device *ndev)
@@ -492,6 +514,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
+ .select_mii = 1,
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -543,11 +566,31 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
#if defined(SH_ETH_RESET_DEFAULT)
/* Chip Reset */
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
mdelay(3);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
+
+ return 0;
+}
+#else
+static int sh_eth_check_reset(struct net_device *ndev)
+{
+ int ret = 0;
+ int cnt = 100;
+
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0) {
+ printk(KERN_ERR "Device reset fail\n");
+ ret = -ETIMEDOUT;
+ }
+ return ret;
}
#endif
@@ -739,21 +782,23 @@ static void sh_eth_ring_free(struct net_device *ndev)
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
if (mdp->rx_skbuff[i])
dev_kfree_skb(mdp->rx_skbuff[i]);
}
}
kfree(mdp->rx_skbuff);
+ mdp->rx_skbuff = NULL;
/* Free Tx skb ringbuffer */
if (mdp->tx_skbuff) {
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
}
}
kfree(mdp->tx_skbuff);
+ mdp->tx_skbuff = NULL;
}
/* format skb and descriptor buffer */
@@ -764,8 +809,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
struct sk_buff *skb;
struct sh_eth_rxdesc *rxdesc = NULL;
struct sh_eth_txdesc *txdesc = NULL;
- int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
- int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
+ int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
+ int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
mdp->cur_rx = mdp->cur_tx = 0;
mdp->dirty_rx = mdp->dirty_tx = 0;
@@ -773,7 +818,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
memset(mdp->rx_ring, 0, rx_ringsize);
/* build Rx ring buffer */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
@@ -799,7 +844,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
}
}
- mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
+ mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
@@ -807,7 +852,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
memset(mdp->tx_ring, 0, tx_ringsize);
/* build Tx ring buffer */
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
@@ -841,7 +886,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
+ mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
GFP_KERNEL);
if (!mdp->rx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
@@ -849,7 +894,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
return ret;
}
- mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
+ mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
GFP_KERNEL);
if (!mdp->tx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
@@ -858,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
}
/* Allocate all Rx descriptors. */
- rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
+ rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL);
@@ -872,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->dirty_rx = 0;
/* Allocate all Tx descriptors. */
- tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
+ tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL);
if (!mdp->tx_ring) {
@@ -890,19 +935,41 @@ desc_ring_free:
skb_ring_free:
/* Free Rx and Tx skb ring buffer */
sh_eth_ring_free(ndev);
+ mdp->tx_ring = NULL;
+ mdp->rx_ring = NULL;
return ret;
}
-static int sh_eth_dev_init(struct net_device *ndev)
+static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
+{
+ int ringsize;
+
+ if (mdp->rx_ring) {
+ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ mdp->rx_desc_dma);
+ mdp->rx_ring = NULL;
+ }
+
+ if (mdp->tx_ring) {
+ ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+ mdp->tx_desc_dma);
+ mdp->tx_ring = NULL;
+ }
+}
+
+static int sh_eth_dev_init(struct net_device *ndev, bool start)
{
int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
- u_int32_t rx_int_var, tx_int_var;
u32 val;
/* Soft Reset */
- sh_eth_reset(ndev);
+ ret = sh_eth_reset(ndev);
+ if (ret)
+ goto out;
/* Descriptor format */
sh_eth_ring_format(ndev);
@@ -926,9 +993,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Frame recv control */
sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
- rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
- tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
- sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
+ sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -943,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ if (start)
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -958,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
- sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+ if (start)
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
@@ -971,11 +1038,14 @@ static int sh_eth_dev_init(struct net_device *ndev)
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
+ if (start) {
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
- netif_start_queue(ndev);
+ netif_start_queue(ndev);
+ }
+out:
return ret;
}
@@ -988,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
- entry = mdp->dirty_tx % TX_RING_SIZE;
+ entry = mdp->dirty_tx % mdp->num_tx_ring;
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
@@ -1001,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
freeNum++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
- if (entry >= TX_RING_SIZE - 1)
+ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
ndev->stats.tx_packets++;
@@ -1011,13 +1081,13 @@ static int sh_eth_txfree(struct net_device *ndev)
}
/* Packet receive function */
-static int sh_eth_rx(struct net_device *ndev)
+static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
- int entry = mdp->cur_rx % RX_RING_SIZE;
- int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
+ int entry = mdp->cur_rx % mdp->num_rx_ring;
+ int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
@@ -1068,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev)
ndev->stats.rx_bytes += pkt_len;
}
rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
- entry = (++mdp->cur_rx) % RX_RING_SIZE;
+ entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
}
/* Refill the Rx ring buffers. */
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
- entry = mdp->dirty_rx % RX_RING_SIZE;
+ entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
@@ -1091,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev)
skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
}
- if (entry >= RX_RING_SIZE - 1)
+ if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
else
@@ -1101,8 +1171,14 @@ static int sh_eth_rx(struct net_device *ndev)
/* Restart Rx engine if stopped. */
/* If we don't need to check status, don't. -KDU */
- if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
+ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
+ /* fix the values for the next receiving if RDE is set */
+ if (intr_status & EESR_RDE)
+ mdp->cur_rx = mdp->dirty_rx =
+ (sh_eth_read(ndev, RDFAR) -
+ sh_eth_read(ndev, RDLAR)) >> 4;
sh_eth_write(ndev, EDRRR_R, EDRRR);
+ }
return 0;
}
@@ -1199,8 +1275,6 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
- if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
- sh_eth_write(ndev, EDRRR_R, EDRRR);
if (netif_msg_rx_err(mdp))
dev_err(&ndev->dev, "Receive Descriptor Empty\n");
}
@@ -1271,7 +1345,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
EESR_RTSF | /* short frame recv */
EESR_PRE | /* PHY-LSI recv error */
EESR_CERF)){ /* recv frame CRC error */
- sh_eth_rx(ndev);
+ sh_eth_rx(ndev, intr_status);
}
/* Tx Check */
@@ -1289,14 +1363,6 @@ other_irq:
return ret;
}
-static void sh_eth_timer(unsigned long data)
-{
- struct net_device *ndev = (struct net_device *)data;
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- mod_timer(&mdp->timer, jiffies + (10 * HZ));
-}
-
/* PHY state control function */
static void sh_eth_adjust_link(struct net_device *ndev)
{
@@ -1495,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
}
+static void sh_eth_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ ring->rx_max_pending = RX_RING_MAX;
+ ring->tx_max_pending = TX_RING_MAX;
+ ring->rx_pending = mdp->num_rx_ring;
+ ring->tx_pending = mdp->num_tx_ring;
+}
+
+static int sh_eth_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
+
+ if (ring->tx_pending > TX_RING_MAX ||
+ ring->rx_pending > RX_RING_MAX ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_pending < RX_RING_MIN)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(ndev)) {
+ netif_tx_disable(ndev);
+ /* Disable interrupts by clearing the interrupt mask. */
+ sh_eth_write(ndev, 0x0000, EESIPR);
+ /* Stop the chip's Tx and Rx processes. */
+ sh_eth_write(ndev, 0, EDTRR);
+ sh_eth_write(ndev, 0, EDRRR);
+ synchronize_irq(ndev->irq);
+ }
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+ /* Free DMA buffer */
+ sh_eth_free_dma_buffer(mdp);
+
+ /* Set new parameters */
+ mdp->num_rx_ring = ring->rx_pending;
+ mdp->num_tx_ring = ring->tx_pending;
+
+ ret = sh_eth_ring_init(ndev);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
+ return ret;
+ }
+ ret = sh_eth_dev_init(ndev, false);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+ netif_wake_queue(ndev);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
@@ -1505,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_strings = sh_eth_get_strings,
.get_ethtool_stats = sh_eth_get_ethtool_stats,
.get_sset_count = sh_eth_get_sset_count,
+ .get_ringparam = sh_eth_get_ringparam,
+ .set_ringparam = sh_eth_set_ringparam,
};
/* network device open function */
@@ -1535,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
goto out_free_irq;
/* device init */
- ret = sh_eth_dev_init(ndev);
+ ret = sh_eth_dev_init(ndev, true);
if (ret)
goto out_free_irq;
@@ -1544,11 +1677,6 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
- /* Set the timer to check for link beat. */
- init_timer(&mdp->timer);
- mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
-
return ret;
out_free_irq:
@@ -1573,11 +1701,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* tx_errors count up */
ndev->stats.tx_errors++;
- /* timer off */
- del_timer_sync(&mdp->timer);
-
/* Free all the skbuffs in the Rx queue. */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
rxdesc = &mdp->rx_ring[i];
rxdesc->status = 0;
rxdesc->addr = 0xBADF00D0;
@@ -1585,18 +1710,14 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
}
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
mdp->tx_skbuff[i] = NULL;
}
/* device init */
- sh_eth_dev_init(ndev);
-
- /* timer on */
- mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- add_timer(&mdp->timer);
+ sh_eth_dev_init(ndev, true);
}
/* Packet transmit function */
@@ -1608,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
- if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
+ if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
if (!sh_eth_txfree(ndev)) {
if (netif_msg_tx_queued(mdp))
dev_warn(&ndev->dev, "TxFD exhausted.\n");
@@ -1619,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
- entry = mdp->cur_tx % TX_RING_SIZE;
+ entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
/* soft swap. */
@@ -1633,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
else
txdesc->buffer_length = skb->len;
- if (entry >= TX_RING_SIZE - 1)
+ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
@@ -1650,7 +1771,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ringsize;
netif_stop_queue(ndev);
@@ -1669,18 +1789,11 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
- del_timer_sync(&mdp->timer);
-
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
/* free DMA buffer */
- ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
-
- /* free DMA buffer */
- ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
- dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
+ sh_eth_free_dma_buffer(mdp);
pm_runtime_put_sync(&mdp->pdev->dev);
@@ -2271,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ether_setup(ndev);
mdp = netdev_priv(ndev);
+ mdp->num_tx_ring = TX_RING_SIZE;
+ mdp->num_rx_ring = RX_RING_SIZE;
mdp->addr = ioremap(res->start, resource_size(res));
if (mdp->addr == NULL) {
ret = -ENOMEM;
@@ -2308,8 +2423,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* debug message level */
mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
- mdp->post_rx = POST_RX >> (devno << 1);
- mdp->post_fw = POST_FW >> (devno << 1);
/* read and set MAC address */
read_mac_address(ndev, pd->mac_addr);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57b8e1f..bae84fd 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,6 +27,10 @@
#define TX_TIMEOUT (5*HZ)
#define TX_RING_SIZE 64 /* Tx ring size */
#define RX_RING_SIZE 64 /* Rx ring size */
+#define TX_RING_MIN 64
+#define RX_RING_MIN 64
+#define TX_RING_MAX 1024
+#define RX_RING_MAX 1024
#define ETHERSMALL 60
#define PKT_BUF_SZ 1538
#define SH_ETH_TSU_TIMEOUT_MS 500
@@ -585,71 +589,6 @@ enum RPADIR_BIT {
/* FDR */
#define DEFAULT_FDR_INIT 0x00000707
-enum phy_offsets {
- PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
- PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
- PHY_16 = 16,
-};
-
-/* PHY_CTRL */
-enum PHY_CTRL_BIT {
- PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
- PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
- PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
-};
-#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
-
-/* PHY_STAT */
-enum PHY_STAT_BIT {
- PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
- PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
- PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
- PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
-};
-
-/* PHY_ANA */
-enum PHY_ANA_BIT {
- PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
- PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
- PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
- PHY_A_SEL = 0x001e,
-};
-/* PHY_ANL */
-enum PHY_ANL_BIT {
- PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
- PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
- PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
- PHY_L_SEL = 0x001f,
-};
-
-/* PHY_ANE */
-enum PHY_ANE_BIT {
- PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
- PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
-};
-
-/* DM9161 */
-enum PHY_16_BIT {
- PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
- PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
- PHY_16_TXselect = 0x0400,
- PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
- PHY_16_Force100LNK = 0x0080,
- PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
- PHY_16_RPDCTR_EN = 0x0010,
- PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
- PHY_16_Sleepmode = 0x0002,
- PHY_16_RemoteLoopOut = 0x0001,
-};
-
-#define POST_RX 0x08
-#define POST_FW 0x04
-#define POST0_RX (POST_RX)
-#define POST0_FW (POST_FW)
-#define POST1_RX (POST_RX >> 2)
-#define POST1_FW (POST_FW >> 2)
-#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
-
/* ARSTR */
enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
@@ -757,6 +696,7 @@ struct sh_eth_cpu_data {
unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
unsigned hw_crc:1; /* E-DMAC have CSMR */
+ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
};
struct sh_eth_private {
@@ -765,13 +705,14 @@ struct sh_eth_private {
const u16 *reg_offset;
void __iomem *addr;
void __iomem *tsu_addr;
+ u32 num_rx_ring;
+ u32 num_tx_ring;
dma_addr_t rx_desc_dma;
dma_addr_t tx_desc_dma;
struct sh_eth_rxdesc *rx_ring;
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- struct timer_list timer;
spinlock_t lock;
u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
@@ -786,10 +727,6 @@ struct sh_eth_private {
int msg_enable;
int speed;
int duplex;
- u32 rx_int_var, tx_int_var; /* interrupt control variables */
- char post_rx; /* POST receive */
- char post_fw; /* POST forward */
- struct net_device_stats tsu_stats; /* TSU forward status */
int port; /* for TSU */
int vlan_num_ids; /* for VLAN tag filter */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b95f2e1..70554a1 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
* masks event though they reject 46 bit masks.
*/
while (dma_mask > 0x7fffffffUL) {
- if (pci_dma_supported(pci_dev, dma_mask)) {
- rc = pci_set_dma_mask(pci_dev, dma_mask);
+ if (dma_supported(&pci_dev->dev, dma_mask)) {
+ rc = dma_set_mask(&pci_dev->dev, dma_mask);
if (rc == 0)
break;
}
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
+ rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
if (rc) {
- /* pci_set_consistent_dma_mask() is not *allowed* to
- * fail with a mask that pci_set_dma_mask() accepted,
+ /* dma_set_coherent_mask() is not *allowed* to
+ * fail with a mask that dma_set_mask() accepted,
* but just in case...
*/
netif_err(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d725a8f..182dbe2 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -136,10 +136,10 @@ enum efx_loopback_mode {
*
* Reset methods are numbered in order of increasing scope.
*
- * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
- * @RESET_TYPE_ALL: reset everything but PCI core blocks
- * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
- * @RESET_TYPE_DISABLE: disable NIC
+ * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
+ * @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 03ded36..10536f9 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -453,7 +453,7 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
- strncpy(ethtool_strings[i].name,
+ strlcpy(ethtool_strings[i].name,
efx_ethtool_stats[i].name,
sizeof(ethtool_strings[i].name));
break;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 3a1ca2b..12b573a 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -25,9 +25,12 @@
#include "io.h"
#include "phy.h"
#include "workarounds.h"
+#include "selftest.h"
/* Hardware control for SFC4000 (aka Falcon). */
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
* 8 KB, 16-bit address, 32 B write block */
@@ -1034,10 +1037,34 @@ static const struct efx_nic_register_test falcon_b0_register_tests[] = {
EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
};
-static int falcon_b0_test_registers(struct efx_nic *efx)
+static int
+falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
- return efx_nic_test_registers(efx, falcon_b0_register_tests,
- ARRAY_SIZE(falcon_b0_register_tests));
+ enum reset_type reset_method = RESET_TYPE_INVISIBLE;
+ int rc, rc2;
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->loopback_modes) {
+ /* We need the 312 clock from the PHY to test the XMAC
+ * registers, so move into XGMII loopback if available */
+ if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
+ efx->loopback_mode = LOOPBACK_XGMII;
+ else
+ efx->loopback_mode = __ffs(efx->loopback_modes);
+ }
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_reset_down(efx, reset_method);
+
+ tests->registers =
+ efx_nic_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests))
+ ? -1 : 1;
+
+ rc = falcon_reset_hw(efx, reset_method);
+ rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ return rc ? rc : rc2;
}
/**************************************************************************
@@ -1818,7 +1845,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
- .test_registers = falcon_b0_test_registers,
+ .test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram,
.revision = EFX_REV_FALCON_B0,
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
index 6106ef1..8333865 100644
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
@@ -341,12 +341,12 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
/* Update derived statistics */
- mac_stats->tx_good_bytes =
- (mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
- mac_stats->tx_control * 64);
- mac_stats->rx_bad_bytes =
- (mac_stats->rx_bytes - mac_stats->rx_good_bytes -
- mac_stats->rx_control * 64);
+ efx_update_diff_stat(&mac_stats->tx_good_bytes,
+ mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
+ mac_stats->tx_control * 64);
+ efx_update_diff_stat(&mac_stats->rx_bad_bytes,
+ mac_stats->rx_bytes - mac_stats->rx_good_bytes -
+ mac_stats->rx_control * 64);
}
void falcon_poll_xmac(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index fea7f73..c3fd61f 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -662,7 +662,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- unsigned int filter_idx, depth;
+ unsigned int filter_idx, depth = 0;
u32 key;
int rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 17b6463..fc5e7bb 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1001,12 +1001,17 @@ static void efx_mcdi_exit_assertion(struct efx_nic *efx)
{
u8 inbuf[MC_CMD_REBOOT_IN_LEN];
- /* Atomically reboot the mcfw out of the assertion handler */
+ /* If the MC is running debug firmware, it might now be
+ * waiting for a debugger to attach, but we just want it to
+ * reboot. We set a flag that makes the command a no-op if it
+ * has already done so. We don't know what return code to
+ * expect (0 or -EIO), so ignore it.
+ */
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
+ (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index fb7f65b..1d552f0 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -222,6 +222,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
attr->index = index;
attr->type = type;
attr->limit_value = limit_value;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.show = reader;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 0310b9f..db4beed 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -48,8 +48,7 @@
/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
-/**
- * MCDI version 1
+/* MCDI version 1
*
* Each MCDI request starts with an MCDI_HEADER, which is a 32byte
* structure, filled in by the client.
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0e57535..cd9c0a9 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -68,6 +68,8 @@
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+struct efx_self_tests;
+
/**
* struct efx_special_buffer - An Efx special buffer
* @addr: CPU base address of the buffer
@@ -100,7 +102,7 @@ struct efx_special_buffer {
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if pci_unmap_single should be used.
+ * @unmap_single: True if dma_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap
*/
struct efx_tx_buffer {
@@ -527,7 +529,7 @@ struct efx_phy_operations {
};
/**
- * @enum efx_phy_mode - PHY operating mode flags
+ * enum efx_phy_mode - PHY operating mode flags
* @PHY_MODE_NORMAL: on and should pass traffic
* @PHY_MODE_TX_DISABLED: on with TX disabled
* @PHY_MODE_LOW_POWER: set to low power through MDIO
@@ -901,7 +903,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
- * @test_registers: Test read/write functionality of control registers
+ * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
+ * expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
* @revision: Hardware architecture revision
* @mem_map_size: Memory BAR mapped size
@@ -946,7 +949,7 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
- int (*test_registers)(struct efx_nic *efx);
+ int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
int revision;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 4a9a5be..326d799 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -124,9 +124,6 @@ int efx_nic_test_registers(struct efx_nic *efx,
unsigned address = 0, i, j;
efx_oword_t mask, imask, original, reg, buf;
- /* Falcon should be in loopback to isolate the XMAC from the PHY */
- WARN_ON(!LOOPBACK_INTERNAL(efx));
-
for (i = 0; i < n_regs; ++i) {
address = regs[i].address;
mask = imask = regs[i].mask;
@@ -308,8 +305,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len)
{
- buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
- &buffer->dma_addr);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, GFP_ATOMIC);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -320,8 +317,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
{
if (buffer->addr) {
- pci_free_consistent(efx->pci_dev, buffer->len,
- buffer->addr, buffer->dma_addr);
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len,
+ buffer->addr, buffer->dma_addr);
buffer->addr = NULL;
}
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index f48ccf6..bab5cd9 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -294,6 +294,24 @@ extern bool falcon_xmac_check_fault(struct efx_nic *efx);
extern int falcon_reconfigure_xmac(struct efx_nic *efx);
extern void falcon_update_stats_xmac(struct efx_nic *efx);
+/* Some statistics are computed as A - B where A and B each increase
+ * linearly with some hardware counter(s) and the counters are read
+ * asynchronously. If the counters contributing to B are always read
+ * after those contributing to A, the computed value may be lower than
+ * the true value by some variable amount, and may decrease between
+ * subsequent computations.
+ *
+ * We should never allow statistics to decrease or to exceed the true
+ * value. Since the computed value will never be greater than the
+ * true value, we can achieve this by only storing the computed value
+ * when it increases.
+ */
+static inline void efx_update_diff_stat(u64 *stat, u64 diff)
+{
+ if ((s64)(diff - *stat) > 0)
+ *stat = diff;
+}
+
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 243e91f..719319b 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->flags = 0;
- rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
skb->data, rx_buf->len,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(efx->pci_dev,
- rx_buf->dma_addr))) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ rx_buf->dma_addr))) {
dev_kfree_skb_any(skb);
rx_buf->u.skb = NULL;
return -EIO;
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
- dma_addr = pci_map_page(efx->pci_dev, page, 0,
+ dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
efx_rx_buf_size(efx),
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) {
- pci_unmap_page(efx->pci_dev,
+ dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr,
efx_rx_buf_size(efx),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
- rx_buf->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
+ rx_buf->len, DMA_FROM_DEVICE);
}
}
@@ -336,6 +336,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
+ *
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index de4c006..96068d1 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
return rc;
}
-static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
-{
- int rc = 0;
-
- /* Test register access */
- if (efx->type->test_registers) {
- rc = efx->type->test_registers(efx);
- tests->registers = rc ? -1 : 1;
- }
-
- return rc;
-}
-
/**************************************************************************
*
* Interrupt and event queue testing
@@ -488,7 +475,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
skb = state->skbs[i];
if (skb && !skb_shared(skb))
++tx_done;
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
}
netif_tx_unlock_bh(efx->net_dev);
@@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
- enum reset_type reset_method = RESET_TYPE_INVISIBLE;
- int rc_test = 0, rc_reset = 0, rc;
+ int rc_test = 0, rc_reset, rc;
efx_selftest_async_cancel(efx);
@@ -737,44 +723,26 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
*/
netif_device_detach(efx->net_dev);
- mutex_lock(&efx->mac_lock);
- if (efx->loopback_modes) {
- /* We need the 312 clock from the PHY to test the XMAC
- * registers, so move into XGMII loopback if available */
- if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
- efx->loopback_mode = LOOPBACK_XGMII;
- else
- efx->loopback_mode = __ffs(efx->loopback_modes);
- }
-
- __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- /* free up all consumers of SRAM (including all the queues) */
- efx_reset_down(efx, reset_method);
-
- rc = efx_test_chip(efx, tests);
- if (rc && !rc_test)
- rc_test = rc;
+ if (efx->type->test_chip) {
+ rc_reset = efx->type->test_chip(efx, tests);
+ if (rc_reset) {
+ netif_err(efx, hw, efx->net_dev,
+ "Unable to recover from chip test\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ return rc_reset;
+ }
- /* reset the chip to recover from the register test */
- rc_reset = efx->type->reset(efx, reset_method);
+ if ((tests->registers < 0) && !rc_test)
+ rc_test = -EIO;
+ }
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
+ mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
-
- rc = efx_reset_up(efx, reset_method, rc_reset == 0);
- if (rc && !rc_reset)
- rc_reset = rc;
-
- if (rc_reset) {
- netif_err(efx, drv, efx->net_dev,
- "Unable to recover from chip test\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- return rc_reset;
- }
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
rc = efx_test_phy(efx, tests, flags);
if (rc && !rc_test)
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 9f8d7ce..6bafd21 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,10 +25,12 @@
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "selftest.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
+static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -154,10 +156,29 @@ static const struct efx_nic_register_test siena_register_tests[] = {
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
};
-static int siena_test_registers(struct efx_nic *efx)
+static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
- return efx_nic_test_registers(efx, siena_register_tests,
- ARRAY_SIZE(siena_register_tests));
+ enum reset_type reset_method = reset_method;
+ int rc, rc2;
+
+ efx_reset_down(efx, reset_method);
+
+ /* Reset the chip immediately so that it is completely
+ * quiescent regardless of what any VF driver does.
+ */
+ rc = siena_reset_hw(efx, reset_method);
+ if (rc)
+ goto out;
+
+ tests->registers =
+ efx_nic_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests))
+ ? -1 : 1;
+
+ rc = siena_reset_hw(efx, reset_method);
+out:
+ rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ return rc ? rc : rc2;
}
/**************************************************************************
@@ -437,8 +458,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(tx_bytes, TX_BYTES);
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
- mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
- mac_stats->tx_bad_bytes);
+ efx_update_diff_stat(&mac_stats->tx_good_bytes,
+ mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
MAC_STAT(tx_packets, TX_PKTS);
MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
MAC_STAT(tx_pause, TX_PAUSE_PKTS);
@@ -471,8 +492,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
MAC_STAT(rx_bytes, RX_BYTES);
MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
- mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
- mac_stats->rx_bad_bytes);
+ efx_update_diff_stat(&mac_stats->rx_good_bytes,
+ mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS);
MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
@@ -649,7 +670,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
- .test_registers = siena_test_registers,
+ .test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 94d0365..9b225a7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
- struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
if (buffer->unmap_single)
- pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
buffer->unmap_len = 0;
buffer->unmap_single = false;
}
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
struct efx_nic *efx = tx_queue->efx;
- struct pci_dev *pci_dev = efx->pci_dev;
+ struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
unsigned int len, unmap_len = 0, fill_level, insert_ptr;
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
q_space = efx->txq_entries - 1 - fill_level;
- /* Map for DMA. Use pci_map_single rather than pci_map_page
+ /* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse
* memory.
*/
unmap_single = true;
- dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
/* Process all fragments */
while (1) {
- if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
- goto pci_err;
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ goto dma_err;
/* Store fields for marking in the per-fragment final
* descriptor */
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
i++;
/* Map for DMA */
unmap_single = false;
- dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
DMA_TO_DEVICE);
}
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
- pci_err:
+ dma_err:
netif_err(efx, tx_err, efx->net_dev,
" TX queue %d could not map skb with %d bytes %d "
"fragments for DMA\n", tx_queue->queue, skb->len,
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Free the fragment we were mid-way through pushing */
if (unmap_len) {
if (unmap_single)
- pci_unmap_single(pci_dev, unmap_addr, unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dma_dev, unmap_addr, unmap_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pci_dev, unmap_addr, unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dma_dev, unmap_addr, unmap_len,
+ DMA_TO_DEVICE);
}
return rc;
@@ -651,17 +651,8 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
protocol);
if (protocol == htons(ETH_P_8021Q)) {
- /* Find the encapsulated protocol; reset network header
- * and transport header based on that. */
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
- skb_set_network_header(skb, sizeof(*veh));
- if (protocol == htons(ETH_P_IP))
- skb_set_transport_header(skb, sizeof(*veh) +
- 4 * ip_hdr(skb)->ihl);
- else if (protocol == htons(ETH_P_IPV6))
- skb_set_transport_header(skb, sizeof(*veh) +
- sizeof(struct ipv6hdr));
}
if (protocol == htons(ETH_P_IP)) {
@@ -684,20 +675,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
*/
static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
{
-
- struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
struct efx_tso_header *tsoh;
dma_addr_t dma_addr;
u8 *base_kva, *kva;
- base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
+ base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
if (base_kva == NULL) {
netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
"Unable to allocate page for TSO headers\n");
return -ENOMEM;
}
- /* pci_alloc_consistent() allocates pages. */
+ /* dma_alloc_coherent() allocates pages. */
EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
@@ -714,7 +704,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
/* Free up a TSO header, and all others in the same page. */
static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
struct efx_tso_header *tsoh,
- struct pci_dev *pci_dev)
+ struct device *dma_dev)
{
struct efx_tso_header **p;
unsigned long base_kva;
@@ -731,7 +721,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
p = &(*p)->next;
}
- pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
+ dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
}
static struct efx_tso_header *
@@ -743,11 +733,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
if (unlikely(!tsoh))
return NULL;
- tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
+ tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
TSOH_BUFFER(tsoh), header_len,
- PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
- tsoh->dma_addr))) {
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+ tsoh->dma_addr))) {
kfree(tsoh);
return NULL;
}
@@ -759,9 +749,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
static void
efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
{
- pci_unmap_single(tx_queue->efx->pci_dev,
+ dma_unmap_single(&tx_queue->efx->pci_dev->dev,
tsoh->dma_addr, tsoh->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
kfree(tsoh);
}
@@ -892,13 +882,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
if (buffer->unmap_single)
- pci_unmap_single(tx_queue->efx->pci_dev,
+ dma_unmap_single(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_page(tx_queue->efx->pci_dev,
+ dma_unmap_page(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer->unmap_len = 0;
}
buffer->len = 0;
@@ -927,7 +917,6 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
- st->packet_space = st->full_packet_size;
st->out_len = skb->len - st->header_len;
st->unmap_len = 0;
st->unmap_single = false;
@@ -954,9 +943,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
int hl = st->header_len;
int len = skb_headlen(skb) - hl;
- st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
- len, PCI_DMA_TODEVICE);
- if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+ st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
+ len, DMA_TO_DEVICE);
+ if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = true;
st->unmap_len = len;
st->in_len = len;
@@ -1008,7 +997,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
buffer->continuation = !end_of_packet;
if (st->in_len == 0) {
- /* Transfer ownership of the pci mapping */
+ /* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len;
buffer->unmap_single = st->unmap_single;
st->unmap_len = 0;
@@ -1181,18 +1170,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
mem_err:
netif_err(efx, tx_err, efx->net_dev,
- "Out of memory for TSO headers, or PCI mapping error\n");
+ "Out of memory for TSO headers, or DMA mapping error\n");
dev_kfree_skb_any(skb);
unwind:
/* Free the DMA mapping we were in the process of writing out */
if (state.unmap_len) {
if (state.unmap_single)
- pci_unmap_single(efx->pci_dev, state.unmap_addr,
- state.unmap_len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
else
- pci_unmap_page(efx->pci_dev, state.unmap_addr,
- state.unmap_len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
}
efx_enqueue_unwind(tx_queue);
@@ -1216,5 +1205,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
- tx_queue->efx->pci_dev);
+ &tx_queue->efx->pci_dev->dev);
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ac149d9..b5ba308 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -583,7 +583,7 @@ static inline void ioc3_rx(struct net_device *dev)
unsigned long *rxr;
u32 w0, err;
- rxr = (unsigned long *) ip->rxr; /* Ring base */
+ rxr = ip->rxr; /* Ring base */
rx_entry = ip->rx_ci; /* RX consume index */
n_entry = ip->rx_pi;
@@ -903,7 +903,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
if (ip->rxr == NULL) {
/* Allocate and initialize rx ring. 4kb = 512 entries */
ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
- rxr = (unsigned long *) ip->rxr;
+ rxr = ip->rxr;
if (!rxr)
printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8814b2f..8d15f7a 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -773,7 +773,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
return 1;
}
-/*
+/**
* smc911x_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
@@ -819,7 +819,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
return reg & PMT_CTRL_PHY_RST_;
}
-/*
+/**
* smc911x_phy_powerdown - powerdown phy
* @dev: net device
* @phy: phy address
@@ -837,7 +837,7 @@ static void smc911x_phy_powerdown(struct net_device *dev, int phy)
SMC_SET_PHY_BMCR(lp, phy, bmcr);
}
-/*
+/**
* smc911x_phy_check_media - check the media status and adjust BMCR
* @dev: net device
* @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index fee4493..318adc9 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -942,7 +942,7 @@ static int smc_phy_fixed(struct net_device *dev)
return 1;
}
-/*
+/**
* smc_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
@@ -976,7 +976,7 @@ static int smc_phy_reset(struct net_device *dev, int phy)
return bmcr & BMCR_RESET;
}
-/*
+/**
* smc_phy_powerdown - powerdown phy
* @dev: net device
*
@@ -1000,7 +1000,7 @@ static void smc_phy_powerdown(struct net_device *dev)
smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
}
-/*
+/**
* smc_phy_check_media - check the media status and adjust TCR
* @dev: net device
* @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index dab9c6f..62d1baf 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1442,6 +1442,14 @@ smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
smsc911x_mac_write(pdata, ADDRL, mac_low32);
}
+static void smsc911x_disable_irq_chip(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ smsc911x_reg_write(pdata, INT_EN, 0);
+ smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+}
+
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
@@ -1494,8 +1502,7 @@ static int smsc911x_open(struct net_device *dev)
spin_unlock_irq(&pdata->mac_lock);
/* Initialise irqs, but leave all sources disabled */
- smsc911x_reg_write(pdata, INT_EN, 0);
- smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+ smsc911x_disable_irq_chip(dev);
/* Set interrupt deassertion to 100uS */
intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
@@ -2215,9 +2222,6 @@ static int __devinit smsc911x_init(struct net_device *dev)
if (smsc911x_soft_reset(pdata))
return -ENODEV;
- /* Disable all interrupt sources until we bring the device up */
- smsc911x_reg_write(pdata, INT_EN, 0);
-
ether_setup(dev);
dev->flags |= IFF_MULTICAST;
netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
@@ -2390,11 +2394,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_request_resources(pdev);
if (retval)
- goto out_return_resources;
+ goto out_request_resources_fail;
retval = smsc911x_enable_resources(pdev);
if (retval)
- goto out_disable_resources;
+ goto out_enable_resources_fail;
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2434,8 +2438,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
smsc911x_reg_write(pdata, INT_CFG, intcfg);
/* Ensure interrupts are globally disabled before connecting ISR */
- smsc911x_reg_write(pdata, INT_EN, 0);
- smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+ smsc911x_disable_irq_chip(dev);
retval = request_irq(dev->irq, smsc911x_irqhandler,
irq_flags | IRQF_SHARED, dev->name, dev);
@@ -2485,7 +2488,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
eth_hw_addr_random(dev);
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
SMSC_TRACE(pdata, probe,
- "MAC Address is set to random_ether_addr");
+ "MAC Address is set to eth_random_addr");
}
}
@@ -2501,8 +2504,9 @@ out_free_irq:
free_irq(dev->irq, dev);
out_disable_resources:
(void)smsc911x_disable_resources(pdev);
-out_return_resources:
+out_enable_resources_fail:
smsc911x_free_resources(pdev);
+out_request_resources_fail:
platform_set_drvdata(pdev, NULL);
iounmap(pdata->ioaddr);
free_netdev(dev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fd33b21..1fcd914e 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1640,8 +1640,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_io_4;
/* descriptors are aligned due to the nature of pci_alloc_consistent */
- pd->tx_ring = (struct smsc9420_dma_desc *)
- (pd->rx_ring + RX_RING_SIZE);
+ pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
pd->tx_dma_addr = pd->rx_dma_addr +
sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 0364283..9f44827 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -13,7 +13,7 @@ config STMMAC_ETH
if STMMAC_ETH
config STMMAC_PLATFORM
- tristate "STMMAC platform bus support"
+ bool "STMMAC Platform bus support"
depends on STMMAC_ETH
default y
---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
If unsure, say N.
config STMMAC_PCI
- tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+ bool "STMMAC PCI bus support (EXPERIMENTAL)"
depends on STMMAC_ETH && PCI && EXPERIMENTAL
---help---
This is to select the Synopsys DWMAC available on PCI devices,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index bcd54d6..e2d0832 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -95,6 +95,16 @@ struct stmmac_extra_stats {
unsigned long poll_n;
unsigned long sched_timer_n;
unsigned long normal_irq_n;
+ unsigned long mmc_tx_irq_n;
+ unsigned long mmc_rx_irq_n;
+ unsigned long mmc_rx_csum_offload_irq_n;
+ /* EEE */
+ unsigned long irq_receive_pmt_irq_n;
+ unsigned long irq_tx_path_in_lpi_mode_n;
+ unsigned long irq_tx_path_exit_lpi_mode_n;
+ unsigned long irq_rx_path_in_lpi_mode_n;
+ unsigned long irq_rx_path_exit_lpi_mode_n;
+ unsigned long phy_eee_wakeup_error_n;
};
/* CSR Frequency Access Defines*/
@@ -162,6 +172,17 @@ enum tx_dma_irq_status {
handle_tx_rx = 3,
};
+enum core_specific_irq_mask {
+ core_mmc_tx_irq = 1,
+ core_mmc_rx_irq = 2,
+ core_mmc_rx_csum_offload_irq = 4,
+ core_irq_receive_pmt_irq = 8,
+ core_irq_tx_path_in_lpi_mode = 16,
+ core_irq_tx_path_exit_lpi_mode = 32,
+ core_irq_rx_path_in_lpi_mode = 64,
+ core_irq_rx_path_exit_lpi_mode = 128,
+};
+
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
@@ -208,6 +229,10 @@ struct dma_features {
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+/* Default LPI timers */
+#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
+#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
@@ -278,7 +303,7 @@ struct stmmac_ops {
/* Dump MAC registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (void __iomem *ioaddr);
+ int (*host_irq_status) (void __iomem *ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev, int id);
/* Flow control setting */
@@ -291,6 +316,10 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
+ void (*set_eee_mode) (void __iomem *ioaddr);
+ void (*reset_eee_mode) (void __iomem *ioaddr);
+ void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
+ void (*set_eee_pls) (void __iomem *ioaddr, int link);
};
struct mac_link {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 23478bf..f90fcb5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -36,6 +36,7 @@
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
enum dwmac1000_irq_status {
+ lpiis_irq = 0x400,
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -60,6 +61,25 @@ enum power_event {
power_down = 0x00000001,
};
+/* Energy Efficient Ethernet (EEE)
+ *
+ * LPI status, timer and control register offset
+ */
+#define LPI_CTRL_STATUS 0x0030
+#define LPI_TIMER_CTRL 0x0034
+
+/* LPI control and status defines */
+#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
+#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
+#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
+
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
(reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b5e4d02..bfe0226 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -194,26 +194,107 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
}
-static void dwmac1000_irq_status(void __iomem *ioaddr)
+static int dwmac1000_irq_status(void __iomem *ioaddr)
{
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ int status = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ if ((intr_status & mmc_tx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ status |= core_mmc_tx_irq;
+ }
+ if (unlikely(intr_status & mmc_rx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ status |= core_mmc_rx_irq;
+ }
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ status |= core_mmc_rx_csum_offload_irq;
+ }
if (unlikely(intr_status & pmt_irq)) {
- CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
+ status |= core_irq_receive_pmt_irq;
}
+ /* MAC trx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & lpiis_irq) {
+ /* Clean LPI interrupt by reading the Reg 12 */
+ u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
+ status |= core_irq_tx_path_in_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
+ status |= core_irq_tx_path_exit_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
+ status |= core_irq_rx_path_in_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
+ status |= core_irq_rx_path_exit_lpi_mode;
+ }
+ }
+
+ return status;
+}
+
+static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ /* Enable the link status receive on RGMII, SGMII ore SMII
+ * receive path and instruct the transmit to enter in LPI
+ * state. */
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (link)
+ value |= LPI_CTRL_STATUS_PLS;
+ else
+ value &= ~LPI_CTRL_STATUS_PLS;
+
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
+{
+ int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + LPI_TIMER_CTRL);
}
static const struct stmmac_ops dwmac1000_ops = {
@@ -226,6 +307,10 @@ static const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
+ .set_eee_mode = dwmac1000_set_eee_mode,
+ .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_eee_timer = dwmac1000_set_eee_timer,
+ .set_eee_pls = dwmac1000_set_eee_pls,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 19e0f4e..f83210e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -72,9 +72,9 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
return 0;
}
-static void dwmac100_irq_status(void __iomem *ioaddr)
+static int dwmac100_irq_status(void __iomem *ioaddr)
{
- return;
+ return 0;
}
static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 6e0360f..e678ce3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -70,6 +70,7 @@
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
/* DMA Status register defines */
+#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index fb8377d..4b785e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
csum);
-
+ wmb();
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+ wmb();
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6b5d060..ab4c376 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,6 +26,7 @@
#include <linux/clk.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
+#include <linux/pci.h>
#include "common.h"
#ifdef CONFIG_STMMAC_TIMER
#include "stmmac_timer.h"
@@ -86,6 +87,12 @@ struct stmmac_priv {
#endif
int clk_csr;
int synopsys_id;
+ struct timer_list eee_ctrl_timer;
+ bool tx_path_in_lpi_mode;
+ int lpi_irq;
+ int eee_enabled;
+ int eee_active;
+ int tx_lpi_timer;
};
extern int phyaddr;
@@ -95,7 +102,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
-
int stmmac_freeze(struct net_device *ndev);
int stmmac_restore(struct net_device *ndev);
int stmmac_resume(struct net_device *ndev);
@@ -104,12 +110,14 @@ int stmmac_dvr_remove(struct net_device *ndev);
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
void __iomem *addr);
+void stmmac_disable_eee_mode(struct stmmac_priv *priv);
+bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_HAVE_CLK
static inline int stmmac_clk_enable(struct stmmac_priv *priv)
{
if (!IS_ERR(priv->stmmac_clk))
- return clk_enable(priv->stmmac_clk);
+ return clk_prepare_enable(priv->stmmac_clk);
return 0;
}
@@ -119,7 +127,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
if (IS_ERR(priv->stmmac_clk))
return;
- clk_disable(priv->stmmac_clk);
+ clk_disable_unprepare(priv->stmmac_clk);
}
static inline int stmmac_clk_get(struct stmmac_priv *priv)
{
@@ -143,3 +151,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
return 0;
}
#endif /* CONFIG_HAVE_CLK */
+
+
+#ifdef CONFIG_STMMAC_PLATFORM
+extern struct platform_driver stmmac_pltfr_driver;
+static inline int stmmac_register_platform(void)
+{
+ int err;
+
+ err = platform_driver_register(&stmmac_pltfr_driver);
+ if (err)
+ pr_err("stmmac: failed to register the platform driver\n");
+
+ return err;
+}
+static inline void stmmac_unregister_platform(void)
+{
+ platform_driver_register(&stmmac_pltfr_driver);
+}
+#else
+static inline int stmmac_register_platform(void)
+{
+ pr_debug("stmmac: do not register the platf driver\n");
+
+ return -EINVAL;
+}
+static inline void stmmac_unregister_platform(void)
+{
+}
+#endif /* CONFIG_STMMAC_PLATFORM */
+
+#ifdef CONFIG_STMMAC_PCI
+extern struct pci_driver stmmac_pci_driver;
+static inline int stmmac_register_pci(void)
+{
+ int err;
+
+ err = pci_register_driver(&stmmac_pci_driver);
+ if (err)
+ pr_err("stmmac: failed to register the PCI driver\n");
+
+ return err;
+}
+static inline void stmmac_unregister_pci(void)
+{
+ pci_unregister_driver(&stmmac_pci_driver);
+}
+#else
+static inline int stmmac_register_pci(void)
+{
+ pr_debug("stmmac: do not register the PCI driver\n");
+
+ return -EINVAL;
+}
+static inline void stmmac_unregister_pci(void)
+{
+}
+#endif /* CONFIG_STMMAC_PCI */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ce43184..76fd61a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -93,6 +93,16 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(poll_n),
STMMAC_STAT(sched_timer_n),
STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(mmc_tx_irq_n),
+ STMMAC_STAT(mmc_rx_irq_n),
+ STMMAC_STAT(mmc_rx_csum_offload_irq_n),
+ STMMAC_STAT(irq_receive_pmt_irq_n),
+ STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
+ STMMAC_STAT(phy_eee_wakeup_error_n),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -366,6 +376,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
+ if (priv->eee_enabled) {
+ int val = phy_get_eee_err(priv->phydev);
+ if (val)
+ priv->xstats.phy_eee_wakeup_error_n = val;
+ }
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -464,6 +479,46 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
+static int stmmac_ethtool_op_get_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->dma_cap.eee)
+ return -EOPNOTSUPP;
+
+ edata->eee_enabled = priv->eee_enabled;
+ edata->eee_active = priv->eee_active;
+ edata->tx_lpi_timer = priv->tx_lpi_timer;
+
+ return phy_ethtool_get_eee(priv->phydev, edata);
+}
+
+static int stmmac_ethtool_op_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ priv->eee_enabled = edata->eee_enabled;
+
+ if (!priv->eee_enabled)
+ stmmac_disable_eee_mode(priv);
+ else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
+ if (!priv->eee_enabled)
+ return -EOPNOTSUPP;
+
+ /* Do not change tx_lpi_timer in case of failure */
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, edata);
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -480,6 +535,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_strings = stmmac_get_strings,
.get_wol = stmmac_get_wol,
.set_wol = stmmac_set_wol,
+ .get_eee = stmmac_ethtool_op_get_eee,
+ .set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7096633..f6b04c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -133,6 +133,12 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+#define STMMAC_DEFAULT_LPI_TIMER 1000
+static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -161,6 +167,8 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
+ if (eee_timer < 0)
+ eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
@@ -229,6 +237,85 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
phydev->speed);
}
+static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Check and enter in LPI mode */
+ if ((priv->dirty_tx == priv->cur_tx) &&
+ (priv->tx_path_in_lpi_mode == false))
+ priv->hw->mac->set_eee_mode(priv->ioaddr);
+}
+
+void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Exit and disable EEE in case of we are are in LPI state. */
+ priv->hw->mac->reset_eee_mode(priv->ioaddr);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * stmmac_eee_ctrl_timer
+ * @arg : data hook
+ * Description:
+ * If there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void stmmac_eee_ctrl_timer(unsigned long arg)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+}
+
+/**
+ * stmmac_eee_init
+ * @priv: private device pointer
+ * Description:
+ * If the EEE support has been enabled while configuring the driver,
+ * if the GMAC actually supports the EEE (from the HW cap reg) and the
+ * phy can also manage EEE, so enable the LPI state and start the timer
+ * to verify if the tx path can enter in LPI state.
+ */
+bool stmmac_eee_init(struct stmmac_priv *priv)
+{
+ bool ret = false;
+
+ /* MAC core supports the EEE feature. */
+ if (priv->dma_cap.eee) {
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1))
+ goto out;
+
+ priv->eee_active = 1;
+ init_timer(&priv->eee_ctrl_timer);
+ priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
+ priv->eee_ctrl_timer.data = (unsigned long)priv;
+ priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
+ add_timer(&priv->eee_ctrl_timer);
+
+ priv->hw->mac->set_eee_timer(priv->ioaddr,
+ STMMAC_DEFAULT_LIT_LS_TIMER,
+ priv->tx_lpi_timer);
+
+ pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+
+ ret = true;
+ }
+out:
+ return ret;
+}
+
+static void stmmac_eee_adjust(struct stmmac_priv *priv)
+{
+ /* When the EEE has been already initialised we have to
+ * modify the PLS bit in the LPI ctrl & status reg according
+ * to the PHY link status. For this reason.
+ */
+ if (priv->eee_enabled)
+ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+}
+
/**
* stmmac_adjust_link
* @dev: net device structure
@@ -249,6 +336,7 @@ static void stmmac_adjust_link(struct net_device *dev)
phydev->addr, phydev->link);
spin_lock_irqsave(&priv->lock, flags);
+
if (phydev->link) {
u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -315,6 +403,8 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
+ stmmac_eee_adjust(priv);
+
spin_unlock_irqrestore(&priv->lock, flags);
DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
@@ -332,7 +422,7 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[MII_BUS_ID_SIZE + 3];
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
priv->oldlink = 0;
@@ -346,11 +436,12 @@ static int stmmac_init_phy(struct net_device *dev)
snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
priv->plat->bus_id);
- snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
- phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
+ interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -677,7 +768,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->hw->desc->release_tx_desc(p);
- entry = (++priv->dirty_tx) % txsize;
+ priv->dirty_tx++;
}
if (unlikely(netif_queue_stopped(priv->dev) &&
stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
@@ -689,6 +780,11 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
+
+ if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+ }
spin_unlock(&priv->tx_lock);
}
@@ -833,8 +929,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
/**
* stmmac_selec_desc_mode
- * @dev : device pointer
- * Description: select the Enhanced/Alternate or Normal descriptors */
+ * @priv : private structure
+ * Description: select the Enhanced/Alternate or Normal descriptors
+ */
static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
{
if (priv->plat->enh_desc) {
@@ -1026,6 +1123,17 @@ static int stmmac_open(struct net_device *dev)
}
}
+ /* Request the IRQ lines */
+ if (priv->lpi_irq != -ENXIO) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ goto open_error_lpiirq;
+ }
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_set_mac(priv->ioaddr, true);
@@ -1061,12 +1169,19 @@ static int stmmac_open(struct net_device *dev)
if (priv->phydev)
phy_start(priv->phydev);
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
+ priv->eee_enabled = stmmac_eee_init(priv);
+
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
return 0;
+open_error_lpiirq:
+ if (priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+
open_error_wolirq:
free_irq(dev->irq, dev);
@@ -1092,6 +1207,9 @@ static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ if (priv->eee_enabled)
+ del_timer_sync(&priv->eee_ctrl_timer);
+
/* Stop and disconnect the PHY */
if (priv->phydev) {
phy_stop(priv->phydev);
@@ -1114,6 +1232,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
+ if (priv->lpi_irq != -ENXIO)
+ free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1163,6 +1283,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&priv->tx_lock);
+ if (priv->tx_path_in_lpi_mode)
+ stmmac_disable_eee_mode(priv);
+
entry = priv->cur_tx % txsize;
#ifdef STMMAC_XMIT_DEBUG
@@ -1211,6 +1334,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
wmb();
priv->hw->desc->set_tx_owner(desc);
+ wmb();
}
/* Interrupt on completition only for the latest segment */
@@ -1226,6 +1350,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* To avoid raise condition */
priv->hw->desc->set_tx_owner(first);
+ wmb();
priv->cur_tx++;
@@ -1289,6 +1414,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
wmb();
priv->hw->desc->set_rx_owner(p + entry);
+ wmb();
}
}
@@ -1307,7 +1433,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
display_ring(priv->dma_rx, rxsize);
}
#endif
- count = 0;
while (!priv->hw->desc->get_rx_owner(p)) {
int status;
@@ -1540,10 +1665,37 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- if (priv->plat->has_gmac)
- /* To handle GMAC own interrupts */
- priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
+ /* To handle GMAC own interrupts */
+ if (priv->plat->has_gmac) {
+ int status = priv->hw->mac->host_irq_status((void __iomem *)
+ dev->base_addr);
+ if (unlikely(status)) {
+ if (status & core_mmc_tx_irq)
+ priv->xstats.mmc_tx_irq_n++;
+ if (status & core_mmc_rx_irq)
+ priv->xstats.mmc_rx_irq_n++;
+ if (status & core_mmc_rx_csum_offload_irq)
+ priv->xstats.mmc_rx_csum_offload_irq_n++;
+ if (status & core_irq_receive_pmt_irq)
+ priv->xstats.irq_receive_pmt_irq_n++;
+
+ /* For LPI we need to save the tx status */
+ if (status & core_irq_tx_path_in_lpi_mode) {
+ priv->xstats.irq_tx_path_in_lpi_mode_n++;
+ priv->tx_path_in_lpi_mode = true;
+ }
+ if (status & core_irq_tx_path_exit_lpi_mode) {
+ priv->xstats.irq_tx_path_exit_lpi_mode_n++;
+ priv->tx_path_in_lpi_mode = false;
+ }
+ if (status & core_irq_rx_path_in_lpi_mode)
+ priv->xstats.irq_rx_path_in_lpi_mode_n++;
+ if (status & core_irq_rx_path_exit_lpi_mode)
+ priv->xstats.irq_rx_path_exit_lpi_mode_n++;
+ }
+ }
+ /* To handle DMA interrupts */
stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
@@ -1861,6 +2013,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/**
* stmmac_dvr_probe
* @device: device pointer
+ * @plat_dat: platform data pointer
+ * @addr: iobase memory address
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
*/
@@ -2090,6 +2244,34 @@ int stmmac_restore(struct net_device *ndev)
}
#endif /* CONFIG_PM */
+/* Driver can be configured w/ and w/ both PCI and Platf drivers
+ * depending on the configuration selected.
+ */
+static int __init stmmac_init(void)
+{
+ int err_plt = 0;
+ int err_pci = 0;
+
+ err_plt = stmmac_register_platform();
+ err_pci = stmmac_register_pci();
+
+ if ((err_pci) && (err_plt)) {
+ pr_err("stmmac: driver registration failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+ stmmac_unregister_platform();
+ stmmac_unregister_pci();
+}
+
+module_init(stmmac_init);
+module_exit(stmmac_exit);
+
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -2099,42 +2281,38 @@ static int __init stmmac_cmdline_opt(char *str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
- if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug))
+ if (kstrtoint(opt + 6, 0, &debug))
goto err;
} else if (!strncmp(opt, "phyaddr:", 8)) {
- if (strict_strtoul(opt + 8, 0,
- (unsigned long *)&phyaddr))
+ if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
} else if (!strncmp(opt, "dma_txsize:", 11)) {
- if (strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_txsize))
+ if (kstrtoint(opt + 11, 0, &dma_txsize))
goto err;
} else if (!strncmp(opt, "dma_rxsize:", 11)) {
- if (strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_rxsize))
+ if (kstrtoint(opt + 11, 0, &dma_rxsize))
goto err;
} else if (!strncmp(opt, "buf_sz:", 7)) {
- if (strict_strtoul(opt + 7, 0,
- (unsigned long *)&buf_sz))
+ if (kstrtoint(opt + 7, 0, &buf_sz))
goto err;
} else if (!strncmp(opt, "tc:", 3)) {
- if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc))
+ if (kstrtoint(opt + 3, 0, &tc))
goto err;
} else if (!strncmp(opt, "watchdog:", 9)) {
- if (strict_strtoul(opt + 9, 0,
- (unsigned long *)&watchdog))
+ if (kstrtoint(opt + 9, 0, &watchdog))
goto err;
} else if (!strncmp(opt, "flow_ctrl:", 10)) {
- if (strict_strtoul(opt + 10, 0,
- (unsigned long *)&flow_ctrl))
+ if (kstrtoint(opt + 10, 0, &flow_ctrl))
goto err;
} else if (!strncmp(opt, "pause:", 6)) {
- if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause))
+ if (kstrtoint(opt + 6, 0, &pause))
+ goto err;
+ } else if (!strncmp(opt, "eee_timer:", 6)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
#ifdef CONFIG_STMMAC_TIMER
} else if (!strncmp(opt, "tmrate:", 7)) {
- if (strict_strtoul(opt + 7, 0,
- (unsigned long *)&tmrate))
+ if (kstrtoint(opt + 7, 0, &tmrate))
goto err;
#endif
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 58fab53..13afb8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -125,7 +125,7 @@ err_out_req_reg_failed:
}
/**
- * stmmac_dvr_remove
+ * stmmac_pci_remove
*
* @pdev: platform device pointer
* Description: this function calls the main to free the net resources
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
MODULE_DEVICE_TABLE(pci, stmmac_id_table);
-static struct pci_driver stmmac_driver = {
+struct pci_driver stmmac_pci_driver = {
.name = STMMAC_RESOURCE_NAME,
.id_table = stmmac_id_table,
.probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
#endif
};
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
- int ret;
-
- ret = pci_register_driver(&stmmac_driver);
- if (ret < 0)
- pr_err("%s: ERROR: driver registration failed\n", __func__);
-
- return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
- pci_unregister_driver(&stmmac_driver);
-}
-
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3dd8f08..cd01ee7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -49,7 +49,9 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
* are provided. All other properties should be added
* once needed on other platforms.
*/
- if (of_device_is_compatible(np, "st,spear600-gmac")) {
+ if (of_device_is_compatible(np, "st,spear600-gmac") ||
+ of_device_is_compatible(np, "snps,dwmac-3.70a") ||
+ of_device_is_compatible(np, "snps,dwmac")) {
plat->has_gmac = 1;
plat->pmt = 1;
}
@@ -156,6 +158,8 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
if (priv->wol_irq == -ENXIO)
priv->wol_irq = priv->dev->irq;
+ priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+
platform_set_drvdata(pdev, priv->dev);
pr_debug("STMMAC platform driver registration completed");
@@ -190,7 +194,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- iounmap((void *)priv->ioaddr);
+ iounmap((void __force __iomem *)priv->ioaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -250,12 +254,14 @@ static const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* CONFIG_PM */
static const struct of_device_id stmmac_dt_ids[] = {
- { .compatible = "st,spear600-gmac", },
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
-static struct platform_driver stmmac_driver = {
+struct platform_driver stmmac_pltfr_driver = {
.probe = stmmac_pltfr_probe,
.remove = stmmac_pltfr_remove,
.driver = {
@@ -266,8 +272,6 @@ static struct platform_driver stmmac_driver = {
},
};
-module_platform_driver(stmmac_driver);
-
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 703c8cc..c2a0fe3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3335,6 +3335,10 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
addr = np->ops->map_page(np->device, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
+ if (!addr) {
+ __free_page(page);
+ return -ENOMEM;
+ }
niu_hash_page(rp, page, addr);
if (rp->rbr_blocks_per_page > 1)
@@ -3513,7 +3517,7 @@ static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
err = 0;
while (index < (rp->rbr_table_size - blocks_per_page)) {
err = niu_rbr_add_page(np, rp, mask, index);
- if (err)
+ if (unlikely(err))
break;
index += blocks_per_page;
@@ -3598,7 +3602,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
{
struct netdev_queue *txq;
- unsigned int tx_bytes;
u16 pkt_cnt, tmp;
int cons, index;
u64 cs;
@@ -3621,18 +3624,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
netif_printk(np, tx_done, KERN_DEBUG, np->dev,
"%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
- tx_bytes = 0;
- tmp = pkt_cnt;
- while (tmp--) {
- tx_bytes += rp->tx_buffs[cons].skb->len;
+ while (pkt_cnt--)
cons = release_tx_packet(np, rp, cons);
- }
rp->cons = cons;
smp_mb();
- netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
-
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4330,6 @@ static void niu_free_channels(struct niu *np)
struct tx_ring_info *rp = &np->tx_rings[i];
niu_free_tx_ring_info(np, rp);
- netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
}
kfree(np->tx_rings);
np->tx_rings = NULL;
@@ -6739,8 +6735,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
prod = NEXT_TX(rp, prod);
}
- netdev_tx_sent_queue(txq, skb->len);
-
if (prod < rp->prod)
rp->wrap_bit ^= TX_RING_KICK_WRAP;
rp->prod = prod;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 2a83fc5..967fe8c 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -233,7 +233,6 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
continue;
bp->rx_skbs[i] = skb;
- skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, ETH_FRAME_LEN);
@@ -838,7 +837,6 @@ static void bigmac_rx(struct bigmac *bp)
RX_BUF_ALLOC_SIZE - 34,
DMA_FROM_DEVICE);
bp->rx_skbs[elem] = new_skb;
- new_skb->dev = bp->dev;
skb_put(new_skb, ETH_FRAME_LEN);
skb_reserve(new_skb, 34);
this->rx_addr =
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3cf4ab7..9ae12d0 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -752,7 +752,6 @@ static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size
if (likely(skb)) {
unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
skb_reserve(skb, offset);
- skb->dev = dev;
}
return skb;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index dfc00c4..73f341b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1249,7 +1249,6 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
static void happy_meal_init_rings(struct happy_meal *hp)
{
struct hmeal_init_block *hb = hp->happy_block;
- struct net_device *dev = hp->dev;
int i;
HMD(("happy_meal_init_rings: counters to zero, "));
@@ -1270,7 +1269,6 @@ static void happy_meal_init_rings(struct happy_meal *hp)
continue;
}
hp->rx_skbs[i] = skb;
- skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
@@ -2031,7 +2029,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
}
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
- new_skb->dev = dev;
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 7d4a040..aeded7f 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -441,7 +441,7 @@ static void qe_rx(struct sunqe *qep)
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
- skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ skb_copy_to_linear_data(skb, this_qbuf,
len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 447a693..6ce9edd 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -137,14 +137,15 @@ static void print_eth_id(struct net_device *ndev)
#define bdx_disable_interrupts(priv) \
do { WRITE_REG(priv, regIMR, 0); } while (0)
-/* bdx_fifo_init
- * create TX/RX descriptor fifo for host-NIC communication.
+/**
+ * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication.
+ * @priv: NIC private structure
+ * @f: fifo to initialize
+ * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
+ * @reg_XXX: offsets of registers relative to base address
+ *
* 1K extra space is allocated at the end of the fifo to simplify
* processing of descriptors that wraps around fifo's end
- * @priv - NIC private structure
- * @f - fifo to initialize
- * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
- * @reg_XXX - offsets of registers relative to base address
*
* Returns 0 on success, negative value on failure
*
@@ -177,9 +178,10 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
RET(0);
}
-/* bdx_fifo_free - free all resources used by fifo
- * @priv - NIC private structure
- * @f - fifo to release
+/**
+ * bdx_fifo_free - free all resources used by fifo
+ * @priv: NIC private structure
+ * @f: fifo to release
*/
static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
{
@@ -192,9 +194,9 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
RET();
}
-/*
+/**
* bdx_link_changed - notifies OS about hw link state.
- * @bdx_priv - hw adapter structure
+ * @priv: hw adapter structure
*/
static void bdx_link_changed(struct bdx_priv *priv)
{
@@ -233,10 +235,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
}
-/* bdx_isr - Interrupt Service Routine for Bordeaux NIC
- * @irq - interrupt number
- * @ndev - network device
- * @regs - CPU registers
+/**
+ * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC
+ * @irq: interrupt number
+ * @dev: network device
*
* Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
*
@@ -307,8 +309,10 @@ static int bdx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-/* bdx_fw_load - loads firmware to NIC
- * @priv - NIC private structure
+/**
+ * bdx_fw_load - loads firmware to NIC
+ * @priv: NIC private structure
+ *
* Firmware is loaded via TXD fifo, so it must be initialized first.
* Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
* can have few of them). So all drivers use semaphore register to choose one
@@ -380,8 +384,9 @@ static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
RET();
}
-/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines
- * @priv - NIC private structure
+/**
+ * bdx_hw_start - inits registers and starts HW's Rx and Tx engines
+ * @priv: NIC private structure
*/
static int bdx_hw_start(struct bdx_priv *priv)
{
@@ -691,12 +696,13 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
RET(-EOPNOTSUPP);
}
-/*
+/**
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
- * by passing VLAN filter table to hardware
- * @ndev network device
- * @vid VLAN vid
- * @op add or kill operation
+ * @ndev: network device
+ * @vid: VLAN vid
+ * @op: add or kill operation
+ *
+ * Passes VLAN filter table to hardware
*/
static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
{
@@ -722,10 +728,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
RET();
}
-/*
+/**
* bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
- * @ndev network device
- * @vid VLAN vid to add
+ * @ndev: network device
+ * @vid: VLAN vid to add
*/
static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
{
@@ -733,10 +739,10 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
return 0;
}
-/*
+/**
* bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
- * @ndev network device
- * @vid VLAN vid to kill
+ * @ndev: network device
+ * @vid: VLAN vid to kill
*/
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
{
@@ -974,8 +980,9 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
* Rx Init *
*************************************************************************/
-/* bdx_rx_init - initialize RX all related HW and SW resources
- * @priv - NIC private structure
+/**
+ * bdx_rx_init - initialize RX all related HW and SW resources
+ * @priv: NIC private structure
*
* Returns 0 on success, negative value on failure
*
@@ -1016,9 +1023,10 @@ err_mem:
return -ENOMEM;
}
-/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
- * @priv - NIC private structure
- * @f - RXF fifo
+/**
+ * bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
+ * @priv: NIC private structure
+ * @f: RXF fifo
*/
static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
{
@@ -1045,8 +1053,10 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
}
}
-/* bdx_rx_free - release all Rx resources
- * @priv - NIC private structure
+/**
+ * bdx_rx_free - release all Rx resources
+ * @priv: NIC private structure
+ *
* It assumes that Rx is desabled in HW
*/
static void bdx_rx_free(struct bdx_priv *priv)
@@ -1067,9 +1077,11 @@ static void bdx_rx_free(struct bdx_priv *priv)
* Rx Engine *
*************************************************************************/
-/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs
- * @priv - nic's private structure
- * @f - RXF fifo that needs skbs
+/**
+ * bdx_rx_alloc_skbs - fill rxf fifo with new skbs
+ * @priv: nic's private structure
+ * @f: RXF fifo that needs skbs
+ *
* It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
* skb's virtual and physical addresses are stored in skb db.
* To calculate free space, func uses cached values of RPTR and WPTR
@@ -1179,13 +1191,15 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
RET();
}
-/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
+/**
+ * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
* NOTE: a special treatment is given to non-continuous descriptors
* that start near the end, wraps around and continue at the beginning. a second
* part is copied right after the first, and then descriptor is interpreted as
* normal. fifo has an extra space to allow such operations
- * @priv - nic's private structure
- * @f - RXF fifo that needs skbs
+ * @priv: nic's private structure
+ * @f: RXF fifo that needs skbs
+ * @budget: maximum number of packets to receive
*/
/* TBD: replace memcpy func call by explicite inline asm */
@@ -1375,9 +1389,10 @@ static inline int bdx_tx_db_size(struct txdb *db)
return db->size - taken;
}
-/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap
- * @d - tx data base
- * @ptr - read or write pointer
+/**
+ * __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
+ * @db: tx data base
+ * @pptr: read or write pointer
*/
static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
{
@@ -1394,8 +1409,9 @@ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
*pptr = db->start;
}
-/* bdx_tx_db_inc_rptr - increment read pointer
- * @d - tx data base
+/**
+ * bdx_tx_db_inc_rptr - increment read pointer
+ * @db: tx data base
*/
static inline void bdx_tx_db_inc_rptr(struct txdb *db)
{
@@ -1403,8 +1419,9 @@ static inline void bdx_tx_db_inc_rptr(struct txdb *db)
__bdx_tx_db_ptr_next(db, &db->rptr);
}
-/* bdx_tx_db_inc_rptr - increment write pointer
- * @d - tx data base
+/**
+ * bdx_tx_db_inc_wptr - increment write pointer
+ * @db: tx data base
*/
static inline void bdx_tx_db_inc_wptr(struct txdb *db)
{
@@ -1413,9 +1430,11 @@ static inline void bdx_tx_db_inc_wptr(struct txdb *db)
a result of write */
}
-/* bdx_tx_db_init - creates and initializes tx db
- * @d - tx data base
- * @sz_type - size of tx fifo
+/**
+ * bdx_tx_db_init - creates and initializes tx db
+ * @d: tx data base
+ * @sz_type: size of tx fifo
+ *
* Returns 0 on success, error code otherwise
*/
static int bdx_tx_db_init(struct txdb *d, int sz_type)
@@ -1441,8 +1460,9 @@ static int bdx_tx_db_init(struct txdb *d, int sz_type)
return 0;
}
-/* bdx_tx_db_close - closes tx db and frees all memory
- * @d - tx data base
+/**
+ * bdx_tx_db_close - closes tx db and frees all memory
+ * @d: tx data base
*/
static void bdx_tx_db_close(struct txdb *d)
{
@@ -1463,9 +1483,11 @@ static struct {
u16 qwords; /* qword = 64 bit */
} txd_sizes[MAX_SKB_FRAGS + 1];
-/* txdb_map_skb - creates and stores dma mappings for skb's data blocks
- * @priv - NIC private structure
- * @skb - socket buffer to map
+/**
+ * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks
+ * @priv: NIC private structure
+ * @skb: socket buffer to map
+ * @txdd: TX descriptor to use
*
* It makes dma mappings for skb's data blocks and writes them to PBL of
* new tx descriptor. It also stores them in the tx db, so they could be
@@ -1562,9 +1584,10 @@ err_mem:
return -ENOMEM;
}
-/*
+/**
* bdx_tx_space - calculates available space in TX fifo
- * @priv - NIC private structure
+ * @priv: NIC private structure
+ *
* Returns available space in TX fifo in bytes
*/
static inline int bdx_tx_space(struct bdx_priv *priv)
@@ -1579,9 +1602,10 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
return fsize;
}
-/* bdx_tx_transmit - send packet to NIC
- * @skb - packet to send
- * ndev - network device assigned to NIC
+/**
+ * bdx_tx_transmit - send packet to NIC
+ * @skb: packet to send
+ * @ndev: network device assigned to NIC
* Return codes:
* o NETDEV_TX_OK everything ok.
* o NETDEV_TX_BUSY Cannot transmit packet, try later
@@ -1699,8 +1723,10 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
- * @priv - bdx adapter
+/**
+ * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
+ * @priv: bdx adapter
+ *
* It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
* that those packets were sent
*/
@@ -1761,7 +1787,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
spin_unlock(&priv->tx_lock);
}
-/* bdx_tx_free_skbs - frees all skbs from TXD fifo.
+/**
+ * bdx_tx_free_skbs - frees all skbs from TXD fifo.
* It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
*/
static void bdx_tx_free_skbs(struct bdx_priv *priv)
@@ -1790,10 +1817,11 @@ static void bdx_tx_free(struct bdx_priv *priv)
bdx_tx_db_close(&priv->txdb);
}
-/* bdx_tx_push_desc - push descriptor to TxD fifo
- * @priv - NIC private structure
- * @data - desc's data
- * @size - desc's size
+/**
+ * bdx_tx_push_desc - push descriptor to TxD fifo
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
*
* Pushes desc to TxD fifo and overlaps it if needed.
* NOTE: this func does not check for available space. this is responsibility
@@ -1819,10 +1847,11 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
}
-/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
- * @priv - NIC private structure
- * @data - desc's data
- * @size - desc's size
+/**
+ * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
*
* NOTE: this func does check for available space and, if necessary, waits for
* NIC to read existing data before writing new one.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 6685bbb..1e5d85b 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -27,6 +27,7 @@
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_data/cpsw.h>
@@ -494,11 +495,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_intr_disable(priv);
netif_carrier_off(ndev);
- ret = clk_enable(priv->clk);
- if (ret < 0) {
- dev_err(priv->dev, "unable to turn on device clock\n");
- return ret;
- }
+ pm_runtime_get_sync(&priv->pdev->dev);
reg = __raw_readl(&priv->regs->id_ver);
@@ -569,7 +566,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_carrier_off(priv->ndev);
cpsw_ale_stop(priv->ale);
for_each_slave(priv, cpsw_slave_stop, priv);
- clk_disable(priv->clk);
+ pm_runtime_put_sync(&priv->pdev->dev);
return 0;
}
@@ -748,7 +745,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
pr_info("Detected MACID = %pM", priv->mac_addr);
} else {
- random_ether_addr(priv->mac_addr);
+ eth_random_addr(priv->mac_addr);
pr_info("Random MACID = %pM", priv->mac_addr);
}
@@ -763,10 +760,12 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
- priv->clk = clk_get(&pdev->dev, NULL);
+ pm_runtime_enable(&pdev->dev);
+ priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(priv->dev, "failed to get device clock)\n");
- ret = -EBUSY;
+ dev_err(&pdev->dev, "fck is not found\n");
+ ret = -ENODEV;
+ goto clean_slave_ret;
}
priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -935,6 +934,8 @@ clean_cpsw_iores_ret:
resource_size(priv->cpsw_res));
clean_clk_ret:
clk_put(priv->clk);
+clean_slave_ret:
+ pm_runtime_disable(&pdev->dev);
kfree(priv->slaves);
clean_ndev_ret:
free_netdev(ndev);
@@ -959,6 +960,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
resource_size(priv->cpsw_res));
release_mem_region(priv->cpsw_ss_res->start,
resource_size(priv->cpsw_ss_res));
+ pm_runtime_disable(&pdev->dev);
clk_put(priv->clk);
kfree(priv->slaves);
free_netdev(ndev);
@@ -973,6 +975,8 @@ static int cpsw_suspend(struct device *dev)
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -981,6 +985,7 @@ static int cpsw_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
if (netif_running(ndev))
cpsw_ndo_open(ndev);
return 0;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index d614c37..3b5c457 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4da93a5..fce89a0 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -57,7 +57,12 @@
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <asm/irq.h>
#include <asm/page.h>
@@ -339,6 +344,9 @@ struct emac_priv {
u32 rx_addr_type;
atomic_t cur_tx;
const char *phy_id;
+#ifdef CONFIG_OF
+ struct device_node *phy_node;
+#endif
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -346,10 +354,6 @@ struct emac_priv {
void (*int_disable) (void);
};
-/* clock frequency for EMAC */
-static struct clk *emac_clk;
-static unsigned long emac_bus_frequency;
-
/* EMAC TX Host Error description strings */
static char *emac_txhost_errcodes[16] = {
"No error", "SOP error", "Ownership bit not set in SOP buffer",
@@ -375,7 +379,7 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
/**
- * emac_dump_regs: Dump important EMAC registers to debug terminal
+ * emac_dump_regs - Dump important EMAC registers to debug terminal
* @priv: The DaVinci EMAC private adapter structure
*
* Executes ethtool set cmd & sets phy mode
@@ -466,7 +470,7 @@ static void emac_dump_regs(struct emac_priv *priv)
}
/**
- * emac_get_drvinfo: Get EMAC driver information
+ * emac_get_drvinfo - Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
* @info: ethtool info structure containing name and version
*
@@ -481,7 +485,7 @@ static void emac_get_drvinfo(struct net_device *ndev,
}
/**
- * emac_get_settings: Get EMAC settings
+ * emac_get_settings - Get EMAC settings
* @ndev: The DaVinci EMAC network adapter
* @ecmd: ethtool command
*
@@ -500,7 +504,7 @@ static int emac_get_settings(struct net_device *ndev,
}
/**
- * emac_set_settings: Set EMAC settings
+ * emac_set_settings - Set EMAC settings
* @ndev: The DaVinci EMAC network adapter
* @ecmd: ethtool command
*
@@ -518,7 +522,7 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
}
/**
- * emac_get_coalesce : Get interrupt coalesce settings for this device
+ * emac_get_coalesce - Get interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
*
@@ -536,7 +540,7 @@ static int emac_get_coalesce(struct net_device *ndev,
}
/**
- * emac_set_coalesce : Set interrupt coalesce settings for this device
+ * emac_set_coalesce - Set interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
*
@@ -614,11 +618,9 @@ static int emac_set_coalesce(struct net_device *ndev,
}
-/**
- * ethtool_ops: DaVinci EMAC Ethtool structure
+/* ethtool_ops: DaVinci EMAC Ethtool structure
*
* Ethtool support for EMAC adapter
- *
*/
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
@@ -631,7 +633,7 @@ static const struct ethtool_ops ethtool_ops = {
};
/**
- * emac_update_phystatus: Update Phy status
+ * emac_update_phystatus - Update Phy status
* @priv: The DaVinci EMAC private adapter structure
*
* Updates phy status and takes action for network queue if required
@@ -697,7 +699,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
}
/**
- * hash_get: Calculate hash value from mac address
+ * hash_get - Calculate hash value from mac address
* @addr: mac address to delete from hash table
*
* Calculates hash value from mac address
@@ -723,9 +725,9 @@ static u32 hash_get(u8 *addr)
}
/**
- * hash_add: Hash function to add mac addr from hash table
+ * hash_add - Hash function to add mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
- * mac_addr: mac address to delete from hash table
+ * @mac_addr: mac address to delete from hash table
*
* Adds mac address to the internal hash table
*
@@ -765,9 +767,9 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
}
/**
- * hash_del: Hash function to delete mac addr from hash table
+ * hash_del - Hash function to delete mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
- * mac_addr: mac address to delete from hash table
+ * @mac_addr: mac address to delete from hash table
*
* Removes mac address from the internal hash table
*
@@ -807,7 +809,7 @@ static int hash_del(struct emac_priv *priv, u8 *mac_addr)
#define EMAC_ALL_MULTI_CLR 3
/**
- * emac_add_mcast: Set multicast address in the EMAC adapter (Internal)
+ * emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
* @priv: The DaVinci EMAC private adapter structure
* @action: multicast operation to perform
* mac_addr: mac address to set
@@ -855,7 +857,7 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
}
/**
- * emac_dev_mcast_set: Set multicast address in the EMAC adapter
+ * emac_dev_mcast_set - Set multicast address in the EMAC adapter
* @ndev: The DaVinci EMAC network adapter
*
* Set multicast addresses in EMAC adapter
@@ -901,7 +903,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
*************************************************************************/
/**
- * emac_int_disable: Disable EMAC module interrupt (from adapter)
+ * emac_int_disable - Disable EMAC module interrupt (from adapter)
* @priv: The DaVinci EMAC private adapter structure
*
* Disable EMAC interrupt on the adapter
@@ -931,7 +933,7 @@ static void emac_int_disable(struct emac_priv *priv)
}
/**
- * emac_int_enable: Enable EMAC module interrupt (from adapter)
+ * emac_int_enable - Enable EMAC module interrupt (from adapter)
* @priv: The DaVinci EMAC private adapter structure
*
* Enable EMAC interrupt on the adapter
@@ -967,7 +969,7 @@ static void emac_int_enable(struct emac_priv *priv)
}
/**
- * emac_irq: EMAC interrupt handler
+ * emac_irq - EMAC interrupt handler
* @irq: interrupt number
* @dev_id: EMAC network adapter data structure ptr
*
@@ -1060,7 +1062,7 @@ static void emac_tx_handler(void *token, int len, int status)
}
/**
- * emac_dev_xmit: EMAC Transmit function
+ * emac_dev_xmit - EMAC Transmit function
* @skb: SKB pointer
* @ndev: The DaVinci EMAC network adapter
*
@@ -1111,7 +1113,7 @@ fail_tx:
}
/**
- * emac_dev_tx_timeout: EMAC Transmit timeout function
+ * emac_dev_tx_timeout - EMAC Transmit timeout function
* @ndev: The DaVinci EMAC network adapter
*
* Called when system detects that a skb timeout period has expired
@@ -1138,7 +1140,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
}
/**
- * emac_set_type0addr: Set EMAC Type0 mac address
+ * emac_set_type0addr - Set EMAC Type0 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1165,7 +1167,7 @@ static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_set_type1addr: Set EMAC Type1 mac address
+ * emac_set_type1addr - Set EMAC Type1 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1187,7 +1189,7 @@ static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_set_type2addr: Set EMAC Type2 mac address
+ * emac_set_type2addr - Set EMAC Type2 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1213,7 +1215,7 @@ static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
}
/**
- * emac_setmac: Set mac address in the adapter (internal function)
+ * emac_setmac - Set mac address in the adapter (internal function)
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1242,7 +1244,7 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_dev_setmac_addr: Set mac address in the adapter
+ * emac_dev_setmac_addr - Set mac address in the adapter
* @ndev: The DaVinci EMAC network adapter
* @addr: MAC address to set in device
*
@@ -1277,7 +1279,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
}
/**
- * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
+ * emac_hw_enable - Enable EMAC hardware for packet transmission/reception
* @priv: The DaVinci EMAC private adapter structure
*
* Enables EMAC hardware for packet processing - enables PHY, enables RX
@@ -1347,7 +1349,7 @@ static int emac_hw_enable(struct emac_priv *priv)
}
/**
- * emac_poll: EMAC NAPI Poll function
+ * emac_poll - EMAC NAPI Poll function
* @ndev: The DaVinci EMAC network adapter
* @budget: Number of receive packets to process (as told by NAPI layer)
*
@@ -1430,7 +1432,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * emac_poll_controller: EMAC Poll controller function
+ * emac_poll_controller - EMAC Poll controller function
* @ndev: The DaVinci EMAC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
@@ -1489,7 +1491,7 @@ static void emac_adjust_link(struct net_device *ndev)
*************************************************************************/
/**
- * emac_devioctl: EMAC adapter ioctl
+ * emac_devioctl - EMAC adapter ioctl
* @ndev: The DaVinci EMAC network adapter
* @ifrq: request parameter
* @cmd: command parameter
@@ -1516,7 +1518,7 @@ static int match_first_device(struct device *dev, void *data)
}
/**
- * emac_dev_open: EMAC device open
+ * emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to start the interface. We init TX/RX channels
@@ -1535,6 +1537,8 @@ static int emac_dev_open(struct net_device *ndev)
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
+ pm_runtime_get(&priv->pdev->dev);
+
netif_carrier_off(ndev);
for (cnt = 0; cnt < ETH_ALEN; cnt++)
ndev->dev_addr[cnt] = priv->mac_addr[cnt];
@@ -1604,7 +1608,7 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id);
ret = PTR_ERR(priv->phydev);
priv->phydev = NULL;
- return ret;
+ goto err;
}
priv->link = 0;
@@ -1645,11 +1649,15 @@ rollback:
res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
m = res->end;
}
- return -EBUSY;
+
+ ret = -EBUSY;
+err:
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
/**
- * emac_dev_stop: EMAC device stop
+ * emac_dev_stop - EMAC device stop
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to stop or down the interface. We stop the network
@@ -1687,11 +1695,12 @@ static int emac_dev_stop(struct net_device *ndev)
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+ pm_runtime_put(&priv->pdev->dev);
return 0;
}
/**
- * emac_dev_getnetstats: EMAC get statistics function
+ * emac_dev_getnetstats - EMAC get statistics function
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to get statistics from the device.
@@ -1762,8 +1771,79 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+#ifdef CONFIG_OF
+static struct emac_platform_data
+ *davinci_emac_of_get_pdata(struct platform_device *pdev,
+ struct emac_priv *priv)
+{
+ struct device_node *np;
+ struct emac_platform_data *pdata = NULL;
+ const u8 *mac_addr;
+ u32 data;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto nodata;
+ }
+
+ np = pdev->dev.of_node;
+ if (!np)
+ goto nodata;
+ else
+ pdata->version = EMAC_VERSION_2;
+
+ if (!is_valid_ether_addr(pdata->mac_addr)) {
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+ }
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data);
+ if (!ret)
+ pdata->ctrl_reg_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
+ &data);
+ if (!ret)
+ pdata->ctrl_mod_reg_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data);
+ if (!ret)
+ pdata->ctrl_ram_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data);
+ if (!ret)
+ pdata->ctrl_ram_size = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data);
+ if (!ret)
+ pdata->rmii_en = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data);
+ if (!ret)
+ pdata->no_bd_ram = data;
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!priv->phy_node)
+ pdata->phy_id = "";
+
+ pdev->dev.platform_data = pdata;
+nodata:
+ return pdata;
+}
+#else
+static struct emac_platform_data
+ *davinci_emac_of_get_pdata(struct platform_device *pdev,
+ struct emac_priv *priv)
+{
+ return pdev->dev.platform_data;
+}
+#endif
/**
- * davinci_emac_probe: EMAC device probe
+ * davinci_emac_probe - EMAC device probe
* @pdev: The DaVinci EMAC device that we are removing
*
* Called when probing for emac devicesr. We get details of instances and
@@ -1780,6 +1860,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
struct emac_platform_data *pdata;
struct device *emac_dev;
struct cpdma_params dma_params;
+ struct clk *emac_clk;
+ unsigned long emac_bus_frequency;
+
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
@@ -1788,12 +1871,14 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
+ clk_put(emac_clk);
+
/* TODO: Probe PHY here if possible */
ndev = alloc_etherdev(sizeof(struct emac_priv));
if (!ndev) {
rc = -ENOMEM;
- goto free_clk;
+ goto no_ndev;
}
platform_set_drvdata(pdev, ndev);
@@ -1804,7 +1889,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
- pdata = pdev->dev.platform_data;
+ pdata = davinci_emac_of_get_pdata(pdev, priv);
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
rc = -ENODEV;
@@ -1909,15 +1994,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_ETHTOOL_OPS(ndev, &ethtool_ops);
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
- clk_enable(emac_clk);
-
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
- goto netdev_reg_err;
+ goto no_irq_res;
}
@@ -1926,10 +2009,12 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
return 0;
-netdev_reg_err:
- clk_disable(emac_clk);
no_irq_res:
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
@@ -1943,13 +2028,12 @@ no_dma:
probe_quit:
free_netdev(ndev);
-free_clk:
- clk_put(emac_clk);
+no_ndev:
return rc;
}
/**
- * davinci_emac_remove: EMAC device remove
+ * davinci_emac_remove - EMAC device remove
* @pdev: The DaVinci EMAC device that we are removing
*
* Called when removing the device driver. We disable clock usage and release
@@ -1978,9 +2062,6 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
iounmap(priv->remap_addr);
free_netdev(ndev);
- clk_disable(emac_clk);
- clk_put(emac_clk);
-
return 0;
}
@@ -1992,8 +2073,6 @@ static int davinci_emac_suspend(struct device *dev)
if (netif_running(ndev))
emac_dev_stop(ndev);
- clk_disable(emac_clk);
-
return 0;
}
@@ -2002,8 +2081,6 @@ static int davinci_emac_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- clk_enable(emac_clk);
-
if (netif_running(ndev))
emac_dev_open(ndev);
@@ -2015,21 +2092,26 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
.resume = davinci_emac_resume,
};
-/**
- * davinci_emac_driver: EMAC platform driver structure
- */
+static const struct of_device_id davinci_emac_of_match[] = {
+ {.compatible = "ti,davinci-dm6467-emac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
+
+/* davinci_emac_driver: EMAC platform driver structure */
static struct platform_driver davinci_emac_driver = {
.driver = {
.name = "davinci_emac",
.owner = THIS_MODULE,
.pm = &davinci_emac_pm_ops,
+ .of_match_table = of_match_ptr(davinci_emac_of_match),
},
.probe = davinci_emac_probe,
.remove = __devexit_p(davinci_emac_remove),
};
/**
- * davinci_emac_init: EMAC driver module init
+ * davinci_emac_init - EMAC driver module init
*
* Called when initializing the driver. We register the driver with
* the platform.
@@ -2041,7 +2123,7 @@ static int __init davinci_emac_init(void)
late_initcall(davinci_emac_init);
/**
- * davinci_emac_exit: EMAC driver module exit
+ * davinci_emac_exit - EMAC driver module exit
*
* Called when exiting the driver completely. We unregister the driver with
* the platform and exit
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index e4e4708..cd7ee20 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -34,6 +34,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
/*
@@ -321,7 +322,9 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, pdev->id);
- data->clk = clk_get(dev, NULL);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ data->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
@@ -329,8 +332,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
goto bail_out;
}
- clk_enable(data->clk);
-
dev_set_drvdata(dev, data);
data->dev = dev;
spin_lock_init(&data->lock);
@@ -378,10 +379,10 @@ bail_out:
if (data->bus)
mdiobus_free(data->bus);
- if (data->clk) {
- clk_disable(data->clk);
+ if (data->clk)
clk_put(data->clk);
- }
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
kfree(data);
@@ -396,10 +397,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
if (data->bus)
mdiobus_free(data->bus);
- if (data->clk) {
- clk_disable(data->clk);
+ if (data->clk)
clk_put(data->clk);
- }
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
dev_set_drvdata(dev, NULL);
@@ -421,8 +422,7 @@ static int davinci_mdio_suspend(struct device *dev)
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
- if (data->clk)
- clk_disable(data->clk);
+ pm_runtime_put_sync(data->dev);
data->suspended = true;
spin_unlock(&data->lock);
@@ -436,8 +436,7 @@ static int davinci_mdio_resume(struct device *dev)
u32 ctrl;
spin_lock(&data->lock);
- if (data->clk)
- clk_enable(data->clk);
+ pm_runtime_put_sync(data->dev);
/* restart the scan state machine */
ctrl = __raw_readl(&data->regs->control);
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 2d9218f..098b1c4 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -7,6 +7,8 @@ config TILE_NET
depends on TILE
default y
select CRC32
+ select TILE_GXIO_MPIPE if TILEGX
+ select HIGH_RES_TIMERS if TILEGX
---help---
This is a standard Linux network device driver for the
on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f14..0ef9eef 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -4,7 +4,7 @@
obj-$(CONFIG_TILE_NET) += tile_net.o
ifdef CONFIG_TILEGX
-tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
+tile_net-y := tilegx.o
else
-tile_net-objs := tilepro.o
+tile_net-y := tilepro.o
endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644
index 0000000..4e2a162
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -0,0 +1,1905 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/kernel.h> /* printk() */
+#include <linux/slab.h> /* kmalloc() */
+#include <linux/errno.h> /* error codes */
+#include <linux/types.h> /* size_t */
+#include <linux/interrupt.h>
+#include <linux/in.h>
+#include <linux/irq.h>
+#include <linux/netdevice.h> /* struct device, and other headers */
+#include <linux/etherdevice.h> /* eth_type_trans */
+#include <linux/skbuff.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/hugetlb.h>
+#include <linux/in6.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/io.h>
+#include <linux/ctype.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+
+#include <asm/checksum.h>
+#include <asm/homecache.h>
+#include <gxio/mpipe.h>
+#include <arch/sim.h>
+
+/* Default transmit lockup timeout period, in jiffies. */
+#define TILE_NET_TIMEOUT (5 * HZ)
+
+/* The maximum number of distinct channels (idesc.channel is 5 bits). */
+#define TILE_NET_CHANNELS 32
+
+/* Maximum number of idescs to handle per "poll". */
+#define TILE_NET_BATCH 128
+
+/* Maximum number of packets to handle per "poll". */
+#define TILE_NET_WEIGHT 64
+
+/* Number of entries in each iqueue. */
+#define IQUEUE_ENTRIES 512
+
+/* Number of entries in each equeue. */
+#define EQUEUE_ENTRIES 2048
+
+/* Total header bytes per equeue slot. Must be big enough for 2 bytes
+ * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
+ * 60 bytes of actual TCP header. We round up to align to cache lines.
+ */
+#define HEADER_BYTES 128
+
+/* Maximum completions per cpu per device (must be a power of two).
+ * ISSUE: What is the right number here? If this is too small, then
+ * egress might block waiting for free space in a completions array.
+ * ISSUE: At the least, allocate these only for initialized echannels.
+ */
+#define TILE_NET_MAX_COMPS 64
+
+#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+
+/* Size of completions data to allocate.
+ * ISSUE: Probably more than needed since we don't use all the channels.
+ */
+#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
+
+/* Size of NotifRing data to allocate. */
+#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
+
+/* Timeout to wake the per-device TX timer after we stop the queue.
+ * We don't want the timeout too short (adds overhead, and might end
+ * up causing stop/wake/stop/wake cycles) or too long (affects performance).
+ * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
+ */
+#define TX_TIMER_DELAY_USEC 30
+
+/* Timeout to wake the per-cpu egress timer to free completions. */
+#define EGRESS_TIMER_DELAY_USEC 1000
+
+MODULE_AUTHOR("Tilera Corporation");
+MODULE_LICENSE("GPL");
+
+/* A "packet fragment" (a chunk of memory). */
+struct frag {
+ void *buf;
+ size_t length;
+};
+
+/* A single completion. */
+struct tile_net_comp {
+ /* The "complete_count" when the completion will be complete. */
+ s64 when;
+ /* The buffer to be freed when the completion is complete. */
+ struct sk_buff *skb;
+};
+
+/* The completions for a given cpu and echannel. */
+struct tile_net_comps {
+ /* The completions. */
+ struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
+ /* The number of completions used. */
+ unsigned long comp_next;
+ /* The number of completions freed. */
+ unsigned long comp_last;
+};
+
+/* The transmit wake timer for a given cpu and echannel. */
+struct tile_net_tx_wake {
+ int tx_queue_idx;
+ struct hrtimer timer;
+ struct net_device *dev;
+};
+
+/* Info for a specific cpu. */
+struct tile_net_info {
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Packet queue. */
+ gxio_mpipe_iqueue_t iqueue;
+ /* Our cpu. */
+ int my_cpu;
+ /* True if iqueue is valid. */
+ bool has_iqueue;
+ /* NAPI flags. */
+ bool napi_added;
+ bool napi_enabled;
+ /* Number of small sk_buffs which must still be provided. */
+ unsigned int num_needed_small_buffers;
+ /* Number of large sk_buffs which must still be provided. */
+ unsigned int num_needed_large_buffers;
+ /* A timer for handling egress completions. */
+ struct hrtimer egress_timer;
+ /* True if "egress_timer" is scheduled. */
+ bool egress_timer_scheduled;
+ /* Comps for each egress channel. */
+ struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+ /* Transmit wake timer for each egress channel. */
+ struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+};
+
+/* Info for egress on a particular egress channel. */
+struct tile_net_egress {
+ /* The "equeue". */
+ gxio_mpipe_equeue_t *equeue;
+ /* The headers for TSO. */
+ unsigned char *headers;
+};
+
+/* Info for a specific device. */
+struct tile_net_priv {
+ /* Our network device. */
+ struct net_device *dev;
+ /* The primary link. */
+ gxio_mpipe_link_t link;
+ /* The primary channel, if open, else -1. */
+ int channel;
+ /* The "loopify" egress link, if needed. */
+ gxio_mpipe_link_t loopify_link;
+ /* The "loopify" egress channel, if open, else -1. */
+ int loopify_channel;
+ /* The egress channel (channel or loopify_channel). */
+ int echannel;
+ /* Total stats. */
+ struct net_device_stats stats;
+};
+
+/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
+static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+
+/* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+/* A mutex for "tile_net_devs_for_channel". */
+static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
+
+/* The per-cpu info. */
+static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
+
+/* The "context" for all devices. */
+static gxio_mpipe_context_t context;
+
+/* Buffer sizes and mpipe enum codes for buffer stacks.
+ * See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ */
+#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
+#define BUFFER_SIZE_SMALL 128
+#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
+#define BUFFER_SIZE_LARGE 1664
+
+/* The small/large "buffer stacks". */
+static int small_buffer_stack = -1;
+static int large_buffer_stack = -1;
+
+/* Amount of memory allocated for each buffer stack. */
+static size_t buffer_stack_size;
+
+/* The actual memory allocated for the buffer stacks. */
+static void *small_buffer_stack_va;
+static void *large_buffer_stack_va;
+
+/* The buckets. */
+static int first_bucket = -1;
+static int num_buckets = 1;
+
+/* The ingress irq. */
+static int ingress_irq = -1;
+
+/* Text value of tile_net.cpus if passed as a module parameter. */
+static char *network_cpus_string;
+
+/* The actual cpus in "network_cpus". */
+static struct cpumask network_cpus_map;
+
+/* If "loopify=LINK" was specified, this is "LINK". */
+static char *loopify_link_name;
+
+/* If "tile_net.custom" was specified, this is non-NULL. */
+static char *custom_str;
+
+/* The "tile_net.cpus" argument specifies the cpus that are dedicated
+ * to handle ingress packets.
+ *
+ * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
+ * m, n, x, y are integer numbers that represent the cpus that can be
+ * neither a dedicated cpu nor a dataplane cpu.
+ */
+static bool network_cpus_init(void)
+{
+ char buf[1024];
+ int rc;
+
+ if (network_cpus_string == NULL)
+ return false;
+
+ rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
+ if (rc != 0) {
+ pr_warn("tile_net.cpus=%s: malformed cpu list\n",
+ network_cpus_string);
+ return false;
+ }
+
+ /* Remove dedicated cpus. */
+ cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
+
+ if (cpumask_empty(&network_cpus_map)) {
+ pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
+ network_cpus_string);
+ return false;
+ }
+
+ cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
+ pr_info("Linux network CPUs: %s\n", buf);
+ return true;
+}
+
+module_param_named(cpus, network_cpus_string, charp, 0444);
+MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
+
+/* The "tile_net.loopify=LINK" argument causes the named device to
+ * actually use "loop0" for ingress, and "loop1" for egress. This
+ * allows an app to sit between the actual link and linux, passing
+ * (some) packets along to linux, and forwarding (some) packets sent
+ * out by linux.
+ */
+module_param_named(loopify, loopify_link_name, charp, 0444);
+MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
+
+/* The "tile_net.custom" argument causes us to ignore the "conventional"
+ * classifier metadata, in particular, the "l2_offset".
+ */
+module_param_named(custom, custom_str, charp, 0444);
+MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+
+/* Atomically update a statistics field.
+ * Note that on TILE-Gx, this operation is fire-and-forget on the
+ * issuing core (single-cycle dispatch) and takes only a few cycles
+ * longer than a regular store when the request reaches the home cache.
+ * No expensive bus management overhead is required.
+ */
+static void tile_net_stats_add(unsigned long value, unsigned long *field)
+{
+ BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
+ atomic_long_add(value, (atomic_long_t *)field);
+}
+
+/* Allocate and push a buffer. */
+static bool tile_net_provide_buffer(bool small)
+{
+ int stack = small ? small_buffer_stack : large_buffer_stack;
+ const unsigned long buffer_alignment = 128;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(struct sk_buff **) + buffer_alignment;
+ len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+ skb = dev_alloc_skb(len);
+ if (skb == NULL)
+ return false;
+
+ /* Make room for a back-pointer to 'skb' and guarantee alignment. */
+ skb_reserve(skb, sizeof(struct sk_buff **));
+ skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
+
+ /* Save a back-pointer to 'skb'. */
+ *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
+
+ /* Make sure "skb" and the back-pointer have been flushed. */
+ wmb();
+
+ gxio_mpipe_push_buffer(&context, stack,
+ (void *)va_to_tile_io_addr(skb->data));
+
+ return true;
+}
+
+/* Convert a raw mpipe buffer to its matching skb pointer. */
+static struct sk_buff *mpipe_buf_to_skb(void *va)
+{
+ /* Acquire the associated "skb". */
+ struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
+ struct sk_buff *skb = *skb_ptr;
+
+ /* Paranoia. */
+ if (skb->data != va) {
+ /* Panic here since there's a reasonable chance
+ * that corrupt buffers means generic memory
+ * corruption, with unpredictable system effects.
+ */
+ panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
+ va, skb, skb->data);
+ }
+
+ return skb;
+}
+
+static void tile_net_pop_all_buffers(int stack)
+{
+ for (;;) {
+ tile_io_addr_t addr =
+ (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+ if (addr == 0)
+ break;
+ dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
+ }
+}
+
+/* Provide linux buffers to mPIPE. */
+static void tile_net_provide_needed_buffers(void)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+ while (info->num_needed_small_buffers != 0) {
+ if (!tile_net_provide_buffer(true))
+ goto oops;
+ info->num_needed_small_buffers--;
+ }
+
+ while (info->num_needed_large_buffers != 0) {
+ if (!tile_net_provide_buffer(false))
+ goto oops;
+ info->num_needed_large_buffers--;
+ }
+
+ return;
+
+oops:
+ /* Add a description to the page allocation failure dump. */
+ pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+}
+
+static inline bool filter_packet(struct net_device *dev, void *buf)
+{
+ /* Filter packets received before we're up. */
+ if (dev == NULL || !(dev->flags & IFF_UP))
+ return true;
+
+ /* Filter out packets that aren't for us. */
+ if (!(dev->flags & IFF_PROMISC) &&
+ !is_multicast_ether_addr(buf) &&
+ compare_ether_addr(dev->dev_addr, buf) != 0)
+ return true;
+
+ return false;
+}
+
+static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
+ gxio_mpipe_idesc_t *idesc, unsigned long len)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+
+ /* Encode the actual packet length. */
+ skb_put(skb, len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Acknowledge "good" hardware checksums. */
+ if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ netif_receive_skb(skb);
+
+ /* Update stats. */
+ tile_net_stats_add(1, &priv->stats.rx_packets);
+ tile_net_stats_add(len, &priv->stats.rx_bytes);
+
+ /* Need a new buffer. */
+ if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
+ info->num_needed_small_buffers++;
+ else
+ info->num_needed_large_buffers++;
+}
+
+/* Handle a packet. Return true if "processed", false if "filtered". */
+static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+ uint8_t l2_offset;
+ void *va;
+ void *buf;
+ unsigned long len;
+ bool filter;
+
+ /* Drop packets for which no buffer was available.
+ * NOTE: This happens under heavy load.
+ */
+ if (idesc->be) {
+ struct tile_net_priv *priv = netdev_priv(dev);
+ tile_net_stats_add(1, &priv->stats.rx_dropped);
+ gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ if (net_ratelimit())
+ pr_info("Dropping packet (insufficient buffers).\n");
+ return false;
+ }
+
+ /* Get the "l2_offset", if allowed. */
+ l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+
+ /* Get the raw buffer VA (includes "headroom"). */
+ va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+
+ /* Get the actual packet start/length. */
+ buf = va + l2_offset;
+ len = idesc->l2_size - l2_offset;
+
+ /* Point "va" at the raw buffer. */
+ va -= NET_IP_ALIGN;
+
+ filter = filter_packet(dev, buf);
+ if (filter) {
+ gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+ } else {
+ struct sk_buff *skb = mpipe_buf_to_skb(va);
+
+ /* Skip headroom, and any custom header. */
+ skb_reserve(skb, NET_IP_ALIGN + l2_offset);
+
+ tile_net_receive_skb(dev, skb, idesc, len);
+ }
+
+ gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ return !filter;
+}
+
+/* Handle some packets for the current CPU.
+ *
+ * This function handles up to TILE_NET_BATCH idescs per call.
+ *
+ * ISSUE: Since we do not provide new buffers until this function is
+ * complete, we must initially provide enough buffers for each network
+ * cpu to fill its iqueue and also its batched idescs.
+ *
+ * ISSUE: The "rotting packet" race condition occurs if a packet
+ * arrives after the queue appears to be empty, and before the
+ * hypervisor interrupt is re-enabled.
+ */
+static int tile_net_poll(struct napi_struct *napi, int budget)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ unsigned int work = 0;
+ gxio_mpipe_idesc_t *idesc;
+ int i, n;
+
+ /* Process packets. */
+ while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+ for (i = 0; i < n; i++) {
+ if (i == TILE_NET_BATCH)
+ goto done;
+ if (tile_net_handle_packet(idesc + i)) {
+ if (++work >= budget)
+ goto done;
+ }
+ }
+ }
+
+ /* There are no packets left. */
+ napi_complete(&info->napi);
+
+ /* Re-enable hypervisor interrupts. */
+ gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+
+ /* HACK: Avoid the "rotting packet" problem. */
+ if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
+ napi_schedule(&info->napi);
+
+ /* ISSUE: Handle completions? */
+
+done:
+ tile_net_provide_needed_buffers();
+
+ return work;
+}
+
+/* Handle an ingress interrupt on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ napi_schedule(&info->napi);
+ return IRQ_HANDLED;
+}
+
+/* Free some completions. This must be called with interrupts blocked. */
+static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
+ struct tile_net_comps *comps,
+ int limit, bool force_update)
+{
+ int n = 0;
+ while (comps->comp_last < comps->comp_next) {
+ unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
+ struct tile_net_comp *comp = &comps->comp_queue[cid];
+ if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
+ force_update || n == 0))
+ break;
+ dev_kfree_skb_irq(comp->skb);
+ comps->comp_last++;
+ if (++n == limit)
+ break;
+ }
+ return n;
+}
+
+/* Add a completion. This must be called with interrupts blocked.
+ * tile_net_equeue_try_reserve() will have ensured a free completion entry.
+ */
+static void add_comp(gxio_mpipe_equeue_t *equeue,
+ struct tile_net_comps *comps,
+ uint64_t when, struct sk_buff *skb)
+{
+ int cid = comps->comp_next % TILE_NET_MAX_COMPS;
+ comps->comp_queue[cid].when = when;
+ comps->comp_queue[cid].skb = skb;
+ comps->comp_next++;
+}
+
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
+ int tx_queue_idx)
+{
+ struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
+ struct tile_net_priv *priv = netdev_priv(dev);
+ struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel];
+
+ hrtimer_start(&tx_wake->timer,
+ ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
+{
+ struct tile_net_tx_wake *tx_wake =
+ container_of(t, struct tile_net_tx_wake, timer);
+ netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx);
+ return HRTIMER_NORESTART;
+}
+
+/* Make sure the egress timer is scheduled. */
+static void tile_net_schedule_egress_timer(void)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+
+ if (!info->egress_timer_scheduled) {
+ hrtimer_start(&info->egress_timer,
+ ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
+ HRTIMER_MODE_REL_PINNED);
+ info->egress_timer_scheduled = true;
+ }
+}
+
+/* The "function" for "info->egress_timer".
+ *
+ * This timer will reschedule itself as long as there are any pending
+ * completions expected for this tile.
+ */
+static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ unsigned long irqflags;
+ bool pending = false;
+ int i;
+
+ local_irq_save(irqflags);
+
+ /* The timer is no longer scheduled. */
+ info->egress_timer_scheduled = false;
+
+ /* Free all possible comps for this tile. */
+ for (i = 0; i < TILE_NET_CHANNELS; i++) {
+ struct tile_net_egress *egress = &egress_for_echannel[i];
+ struct tile_net_comps *comps = info->comps_for_echannel[i];
+ if (comps->comp_last >= comps->comp_next)
+ continue;
+ tile_net_free_comps(egress->equeue, comps, -1, true);
+ pending = pending || (comps->comp_last < comps->comp_next);
+ }
+
+ /* Reschedule timer if needed. */
+ if (pending)
+ tile_net_schedule_egress_timer();
+
+ local_irq_restore(irqflags);
+
+ return HRTIMER_NORESTART;
+}
+
+/* Helper function for "tile_net_update()".
+ * "dev" (i.e. arg) is the device being brought up or down,
+ * or NULL if all devices are now down.
+ */
+static void tile_net_update_cpu(void *arg)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct net_device *dev = arg;
+
+ if (!info->has_iqueue)
+ return;
+
+ if (dev != NULL) {
+ if (!info->napi_added) {
+ netif_napi_add(dev, &info->napi,
+ tile_net_poll, TILE_NET_WEIGHT);
+ info->napi_added = true;
+ }
+ if (!info->napi_enabled) {
+ napi_enable(&info->napi);
+ info->napi_enabled = true;
+ }
+ enable_percpu_irq(ingress_irq, 0);
+ } else {
+ disable_percpu_irq(ingress_irq);
+ if (info->napi_enabled) {
+ napi_disable(&info->napi);
+ info->napi_enabled = false;
+ }
+ /* FIXME: Drain the iqueue. */
+ }
+}
+
+/* Helper function for tile_net_open() and tile_net_stop().
+ * Always called under tile_net_devs_for_channel_mutex.
+ */
+static int tile_net_update(struct net_device *dev)
+{
+ static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
+ bool saw_channel = false;
+ int channel;
+ int rc;
+ int cpu;
+
+ gxio_mpipe_rules_init(&rules, &context);
+
+ for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
+ if (tile_net_devs_for_channel[channel] == NULL)
+ continue;
+ if (!saw_channel) {
+ saw_channel = true;
+ gxio_mpipe_rules_begin(&rules, first_bucket,
+ num_buckets, NULL);
+ gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
+ }
+ gxio_mpipe_rules_add_channel(&rules, channel);
+ }
+
+ /* NOTE: This can fail if there is no classifier.
+ * ISSUE: Can anything else cause it to fail?
+ */
+ rc = gxio_mpipe_rules_commit(&rules);
+ if (rc != 0) {
+ netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
+ for_each_online_cpu(cpu)
+ smp_call_function_single(cpu, tile_net_update_cpu,
+ (saw_channel ? dev : NULL), 1);
+
+ /* HACK: Allow packets to flow in the simulator. */
+ if (saw_channel)
+ sim_enable_mpipe_links(0, -1);
+
+ return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for both small and large packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+{
+ pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
+ int rc;
+
+ /* Compute stack bytes; we round up to 64KB and then use
+ * alloc_pages() so we get the required 64KB alignment as well.
+ */
+ buffer_stack_size =
+ ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
+ 64 * 1024);
+
+ /* Allocate two buffer stack indices. */
+ rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
+ rc);
+ return rc;
+ }
+ small_buffer_stack = rc;
+ large_buffer_stack = rc + 1;
+
+ /* Allocate the small memory stack. */
+ small_buffer_stack_va =
+ alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+ if (small_buffer_stack_va == NULL) {
+ netdev_err(dev,
+ "Could not alloc %zd bytes for buffer stacks\n",
+ buffer_stack_size);
+ return -ENOMEM;
+ }
+ rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
+ BUFFER_SIZE_SMALL_ENUM,
+ small_buffer_stack_va,
+ buffer_stack_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+ return rc;
+ }
+ rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+ hash_pte, 0);
+ if (rc != 0) {
+ netdev_err(dev,
+ "gxio_mpipe_register_buffer_memory failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Allocate the large buffer stack. */
+ large_buffer_stack_va =
+ alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
+ if (large_buffer_stack_va == NULL) {
+ netdev_err(dev,
+ "Could not alloc %zd bytes for buffer stacks\n",
+ buffer_stack_size);
+ return -ENOMEM;
+ }
+ rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
+ BUFFER_SIZE_LARGE_ENUM,
+ large_buffer_stack_va,
+ buffer_stack_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
+ rc);
+ return rc;
+ }
+ rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
+ hash_pte, 0);
+ if (rc != 0) {
+ netdev_err(dev,
+ "gxio_mpipe_register_buffer_memory failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Allocate per-cpu resources (memory for completions and idescs).
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int alloc_percpu_mpipe_resources(struct net_device *dev,
+ int cpu, int ring)
+{
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ int order, i, rc;
+ struct page *page;
+ void *addr;
+
+ /* Allocate the "comps". */
+ order = get_order(COMPS_SIZE);
+ page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+ if (page == NULL) {
+ netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
+ COMPS_SIZE);
+ return -ENOMEM;
+ }
+ addr = pfn_to_kaddr(page_to_pfn(page));
+ memset(addr, 0, COMPS_SIZE);
+ for (i = 0; i < TILE_NET_CHANNELS; i++)
+ info->comps_for_echannel[i] =
+ addr + i * sizeof(struct tile_net_comps);
+
+ /* If this is a network cpu, create an iqueue. */
+ if (cpu_isset(cpu, network_cpus_map)) {
+ order = get_order(NOTIF_RING_SIZE);
+ page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
+ if (page == NULL) {
+ netdev_err(dev,
+ "Failed to alloc %zd bytes iqueue memory\n",
+ NOTIF_RING_SIZE);
+ return -ENOMEM;
+ }
+ addr = pfn_to_kaddr(page_to_pfn(page));
+ rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
+ addr, NOTIF_RING_SIZE, 0);
+ if (rc < 0) {
+ netdev_err(dev,
+ "gxio_mpipe_iqueue_init failed: %d\n", rc);
+ return rc;
+ }
+ info->has_iqueue = true;
+ }
+
+ return ring;
+}
+
+/* Initialize NotifGroup and buckets.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_notif_group_and_buckets(struct net_device *dev,
+ int ring, int network_cpus_count)
+{
+ int group, rc;
+
+ /* Allocate one NotifGroup. */
+ rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
+ rc);
+ return rc;
+ }
+ group = rc;
+
+ /* Initialize global num_buckets value. */
+ if (network_cpus_count > 4)
+ num_buckets = 256;
+ else if (network_cpus_count > 1)
+ num_buckets = 16;
+
+ /* Allocate some buckets, and set global first_bucket value. */
+ rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+ return rc;
+ }
+ first_bucket = rc;
+
+ /* Init group and buckets. */
+ rc = gxio_mpipe_init_notif_group_and_buckets(
+ &context, group, ring, network_cpus_count,
+ first_bucket, num_buckets,
+ GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
+ if (rc != 0) {
+ netdev_err(
+ dev,
+ "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* Create an irq and register it, then activate the irq and request
+ * interrupts on all cores. Note that "ingress_irq" being initialized
+ * is how we know not to call tile_net_init_mpipe() again.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int tile_net_setup_interrupts(struct net_device *dev)
+{
+ int cpu, rc;
+
+ rc = create_irq();
+ if (rc < 0) {
+ netdev_err(dev, "create_irq failed: %d\n", rc);
+ return rc;
+ }
+ ingress_irq = rc;
+ tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
+ rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
+ 0, NULL, NULL);
+ if (rc != 0) {
+ netdev_err(dev, "request_irq failed: %d\n", rc);
+ destroy_irq(ingress_irq);
+ ingress_irq = -1;
+ return rc;
+ }
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ if (info->has_iqueue) {
+ gxio_mpipe_request_notif_ring_interrupt(
+ &context, cpu_x(cpu), cpu_y(cpu),
+ 1, ingress_irq, info->iqueue.ring);
+ }
+ }
+
+ return 0;
+}
+
+/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
+static void tile_net_init_mpipe_fail(void)
+{
+ int cpu;
+
+ /* Do cleanups that require the mpipe context first. */
+ if (small_buffer_stack >= 0)
+ tile_net_pop_all_buffers(small_buffer_stack);
+ if (large_buffer_stack >= 0)
+ tile_net_pop_all_buffers(large_buffer_stack);
+
+ /* Destroy mpipe context so the hardware no longer owns any memory. */
+ gxio_mpipe_destroy(&context);
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ free_pages((unsigned long)(info->comps_for_echannel[0]),
+ get_order(COMPS_SIZE));
+ info->comps_for_echannel[0] = NULL;
+ free_pages((unsigned long)(info->iqueue.idescs),
+ get_order(NOTIF_RING_SIZE));
+ info->iqueue.idescs = NULL;
+ }
+
+ if (small_buffer_stack_va)
+ free_pages_exact(small_buffer_stack_va, buffer_stack_size);
+ if (large_buffer_stack_va)
+ free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+
+ small_buffer_stack_va = NULL;
+ large_buffer_stack_va = NULL;
+ large_buffer_stack = -1;
+ small_buffer_stack = -1;
+ first_bucket = -1;
+}
+
+/* The first time any tilegx network device is opened, we initialize
+ * the global mpipe state. If this step fails, we fail to open the
+ * device, but if it succeeds, we never need to do it again, and since
+ * tile_net can't be unloaded, we never undo it.
+ *
+ * Note that some resources in this path (buffer stack indices,
+ * bindings from init_buffer_stack, etc.) are hypervisor resources
+ * that are freed implicitly by gxio_mpipe_destroy().
+ */
+static int tile_net_init_mpipe(struct net_device *dev)
+{
+ int i, num_buffers, rc;
+ int cpu;
+ int first_ring, ring;
+ int network_cpus_count = cpus_weight(network_cpus_map);
+
+ if (!hash_default) {
+ netdev_err(dev, "Networking requires hash_default!\n");
+ return -EIO;
+ }
+
+ rc = gxio_mpipe_init(&context, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+ return -EIO;
+ }
+
+ /* Set up the buffer stacks. */
+ num_buffers =
+ network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+ rc = init_buffer_stacks(dev, num_buffers);
+ if (rc != 0)
+ goto fail;
+
+ /* Provide initial buffers. */
+ rc = -ENOMEM;
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(true)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ goto fail;
+ }
+ }
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(false)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ goto fail;
+ }
+ }
+
+ /* Allocate one NotifRing for each network cpu. */
+ rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+ if (rc < 0) {
+ netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
+ rc);
+ goto fail;
+ }
+
+ /* Init NotifRings per-cpu. */
+ first_ring = rc;
+ ring = first_ring;
+ for_each_online_cpu(cpu) {
+ rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
+ if (rc < 0)
+ goto fail;
+ ring = rc;
+ }
+
+ /* Initialize NotifGroup and buckets. */
+ rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
+ if (rc != 0)
+ goto fail;
+
+ /* Create and enable interrupts. */
+ rc = tile_net_setup_interrupts(dev);
+ if (rc != 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ tile_net_init_mpipe_fail();
+ return rc;
+}
+
+/* Create persistent egress info for a given egress channel.
+ * Note that this may be shared between, say, "gbe0" and "xgbe0".
+ * ISSUE: Defer header allocation until TSO is actually needed?
+ */
+static int tile_net_init_egress(struct net_device *dev, int echannel)
+{
+ struct page *headers_page, *edescs_page, *equeue_page;
+ gxio_mpipe_edesc_t *edescs;
+ gxio_mpipe_equeue_t *equeue;
+ unsigned char *headers;
+ int headers_order, edescs_order, equeue_order;
+ size_t edescs_size;
+ int edma;
+ int rc = -ENOMEM;
+
+ /* Only initialize once. */
+ if (egress_for_echannel[echannel].equeue != NULL)
+ return 0;
+
+ /* Allocate memory for the "headers". */
+ headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
+ headers_page = alloc_pages(GFP_KERNEL, headers_order);
+ if (headers_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for TSO headers.\n",
+ PAGE_SIZE << headers_order);
+ goto fail;
+ }
+ headers = pfn_to_kaddr(page_to_pfn(headers_page));
+
+ /* Allocate memory for the "edescs". */
+ edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
+ edescs_order = get_order(edescs_size);
+ edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
+ if (edescs_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for eDMA ring.\n",
+ edescs_size);
+ goto fail_headers;
+ }
+ edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
+
+ /* Allocate memory for the "equeue". */
+ equeue_order = get_order(sizeof(*equeue));
+ equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
+ if (equeue_page == NULL) {
+ netdev_warn(dev,
+ "Could not alloc %zd bytes for equeue info.\n",
+ PAGE_SIZE << equeue_order);
+ goto fail_edescs;
+ }
+ equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
+
+ /* Allocate an edma ring. Note that in practice this can't
+ * fail, which is good, because we will leak an edma ring if so.
+ */
+ rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
+ rc);
+ goto fail_equeue;
+ }
+ edma = rc;
+
+ /* Initialize the equeue. */
+ rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+ edescs, edescs_size, 0);
+ if (rc != 0) {
+ netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+ goto fail_equeue;
+ }
+
+ /* Done. */
+ egress_for_echannel[echannel].equeue = equeue;
+ egress_for_echannel[echannel].headers = headers;
+ return 0;
+
+fail_equeue:
+ __free_pages(equeue_page, equeue_order);
+
+fail_edescs:
+ __free_pages(edescs_page, edescs_order);
+
+fail_headers:
+ __free_pages(headers_page, headers_order);
+
+fail:
+ return rc;
+}
+
+/* Return channel number for a newly-opened link. */
+static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
+ const char *link_name)
+{
+ int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+ if (rc < 0) {
+ netdev_err(dev, "Failed to open '%s'\n", link_name);
+ return rc;
+ }
+ rc = gxio_mpipe_link_channel(link);
+ if (rc < 0 || rc >= TILE_NET_CHANNELS) {
+ netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
+ gxio_mpipe_link_close(link);
+ return -EINVAL;
+ }
+ return rc;
+}
+
+/* Help the kernel activate the given network interface. */
+static int tile_net_open(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int cpu, rc;
+
+ mutex_lock(&tile_net_devs_for_channel_mutex);
+
+ /* Do one-time initialization the first time any device is opened. */
+ if (ingress_irq < 0) {
+ rc = tile_net_init_mpipe(dev);
+ if (rc != 0)
+ goto fail;
+ }
+
+ /* Determine if this is the "loopify" device. */
+ if (unlikely((loopify_link_name != NULL) &&
+ !strcmp(dev->name, loopify_link_name))) {
+ rc = tile_net_link_open(dev, &priv->link, "loop0");
+ if (rc < 0)
+ goto fail;
+ priv->channel = rc;
+ rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
+ if (rc < 0)
+ goto fail;
+ priv->loopify_channel = rc;
+ priv->echannel = rc;
+ } else {
+ rc = tile_net_link_open(dev, &priv->link, dev->name);
+ if (rc < 0)
+ goto fail;
+ priv->channel = rc;
+ priv->echannel = rc;
+ }
+
+ /* Initialize egress info (if needed). Once ever, per echannel. */
+ rc = tile_net_init_egress(dev, priv->echannel);
+ if (rc != 0)
+ goto fail;
+
+ tile_net_devs_for_channel[priv->channel] = dev;
+
+ rc = tile_net_update(dev);
+ if (rc != 0)
+ goto fail;
+
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ /* Initialize the transmit wake timer for this device for each cpu. */
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ struct tile_net_tx_wake *tx_wake =
+ &info->tx_wake[priv->echannel];
+
+ hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ tx_wake->tx_queue_idx = cpu;
+ tx_wake->timer.function = tile_net_handle_tx_wake_timer;
+ tx_wake->dev = dev;
+ }
+
+ for_each_online_cpu(cpu)
+ netif_start_subqueue(dev, cpu);
+ netif_carrier_on(dev);
+ return 0;
+
+fail:
+ if (priv->loopify_channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+ netdev_warn(dev, "Failed to close loopify link!\n");
+ priv->loopify_channel = -1;
+ }
+ if (priv->channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->link) != 0)
+ netdev_warn(dev, "Failed to close link!\n");
+ priv->channel = -1;
+ }
+ priv->echannel = -1;
+ tile_net_devs_for_channel[priv->channel] = NULL;
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ /* Don't return raw gxio error codes to generic Linux. */
+ return (rc > -512) ? rc : -EIO;
+}
+
+/* Help the kernel deactivate the given network interface. */
+static int tile_net_stop(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+ struct tile_net_tx_wake *tx_wake =
+ &info->tx_wake[priv->echannel];
+
+ hrtimer_cancel(&tx_wake->timer);
+ netif_stop_subqueue(dev, cpu);
+ }
+
+ mutex_lock(&tile_net_devs_for_channel_mutex);
+ tile_net_devs_for_channel[priv->channel] = NULL;
+ (void)tile_net_update(dev);
+ if (priv->loopify_channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
+ netdev_warn(dev, "Failed to close loopify link!\n");
+ priv->loopify_channel = -1;
+ }
+ if (priv->channel >= 0) {
+ if (gxio_mpipe_link_close(&priv->link) != 0)
+ netdev_warn(dev, "Failed to close link!\n");
+ priv->channel = -1;
+ }
+ priv->echannel = -1;
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+
+ return 0;
+}
+
+/* Determine the VA for a fragment. */
+static inline void *tile_net_frag_buf(skb_frag_t *f)
+{
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
+ return pfn_to_kaddr(pfn) + f->page_offset;
+}
+
+/* Acquire a completion entry and an egress slot, or if we can't,
+ * stop the queue and schedule the tx_wake timer.
+ */
+static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+ int tx_queue_idx,
+ struct tile_net_comps *comps,
+ gxio_mpipe_equeue_t *equeue,
+ int num_edescs)
+{
+ /* Try to acquire a completion entry. */
+ if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
+ tile_net_free_comps(equeue, comps, 32, false) != 0) {
+
+ /* Try to acquire an egress slot. */
+ s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+ if (slot >= 0)
+ return slot;
+
+ /* Freeing some completions gives the equeue time to drain. */
+ tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
+
+ slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
+ if (slot >= 0)
+ return slot;
+ }
+
+ /* Still nothing; give up and stop the queue for a short while. */
+ netif_stop_subqueue(dev, tx_queue_idx);
+ tile_net_schedule_tx_wake_timer(dev, tx_queue_idx);
+ return -1;
+}
+
+/* Determine how many edesc's are needed for TSO.
+ *
+ * Sometimes, if "sendfile()" requires copying, we will be called with
+ * "data" containing the header and payload, with "frags" being empty.
+ * Sometimes, for example when using NFS over TCP, a single segment can
+ * span 3 fragments. This requires special care.
+ */
+static int tso_count_edescs(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
+ unsigned int p_len = sh->gso_size;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int num_edescs = 0;
+ int segment;
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+
+ unsigned int p_used = 0;
+
+ /* One edesc for header and for each piece of the payload. */
+ for (num_edescs++; p_used < p_len; num_edescs++) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+ }
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ return num_edescs;
+}
+
+/* Prepare modified copies of the skbuff headers.
+ * FIXME: add support for IPv6.
+ */
+static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
+ s64 slot)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ struct iphdr *ih;
+ struct tcphdr *th;
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
+ unsigned char *data = skb->data;
+ unsigned int ih_off, th_off, p_len;
+ unsigned int isum_seed, tsum_seed, id, seq;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int segment;
+
+ /* Locate original headers and compute various lengths. */
+ ih = ip_hdr(skb);
+ th = tcp_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ th_off = skb_transport_offset(skb);
+ p_len = sh->gso_size;
+
+ /* Set up seed values for IP and TCP csum and initialize id and seq. */
+ isum_seed = ((0xFFFF - ih->check) +
+ (0xFFFF - ih->tot_len) +
+ (0xFFFF - ih->id));
+ tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len));
+ id = ntohs(ih->id);
+ seq = ntohl(th->seq);
+
+ /* Prepare all the headers. */
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned char *buf;
+ unsigned int p_used = 0;
+
+ /* Copy to the header memory for this segment. */
+ buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+ NET_IP_ALIGN;
+ memcpy(buf, data, sh_len);
+
+ /* Update copied ip header. */
+ ih = (struct iphdr *)(buf + ih_off);
+ ih->tot_len = htons(sh_len + p_len - ih_off);
+ ih->id = htons(id);
+ ih->check = csum_long(isum_seed + ih->tot_len +
+ ih->id) ^ 0xffff;
+
+ /* Update copied tcp header. */
+ th = (struct tcphdr *)(buf + th_off);
+ th->seq = htonl(seq);
+ th->check = csum_long(tsum_seed + htons(sh_len + p_len));
+ if (segment != sh->gso_segs - 1) {
+ th->fin = 0;
+ th->psh = 0;
+ }
+
+ /* Skip past the header. */
+ slot++;
+
+ /* Skip past the payload. */
+ while (p_used < p_len) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+
+ slot++;
+ }
+
+ id++;
+ seq += p_len;
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* Flush the headers so they are ready for hardware DMA. */
+ wmb();
+}
+
+/* Pass all the data to mpipe for egress. */
+static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
+ struct sk_buff *skb, unsigned char *headers, s64 slot)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
+ unsigned int p_len = sh->gso_size;
+ gxio_mpipe_edesc_t edesc_head = { { 0 } };
+ gxio_mpipe_edesc_t edesc_body = { { 0 } };
+ long f_id = -1; /* id of the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
+ void *f_data = skb->data;
+ long n; /* size of the current piece of payload */
+ unsigned long tx_packets = 0, tx_bytes = 0;
+ unsigned int csum_start;
+ int segment;
+
+ /* Prepare to egress the headers: set up header edesc. */
+ csum_start = skb_checksum_start_offset(skb);
+ edesc_head.csum = 1;
+ edesc_head.csum_start = csum_start;
+ edesc_head.csum_dest = csum_start + skb->csum_offset;
+ edesc_head.xfer_size = sh_len;
+
+ /* This is only used to specify the TLB. */
+ edesc_head.stack_idx = large_buffer_stack;
+ edesc_body.stack_idx = large_buffer_stack;
+
+ /* Egress all the edescs. */
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned char *buf;
+ unsigned int p_used = 0;
+
+ /* Egress the header. */
+ buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
+ NET_IP_ALIGN;
+ edesc_head.va = va_to_tile_io_addr(buf);
+ gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
+ slot++;
+
+ /* Egress the payload. */
+ while (p_used < p_len) {
+
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = sh->frags[f_id].size;
+ f_used = 0;
+ f_data = tile_net_frag_buf(&sh->frags[f_id]);
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+
+ /* Egress a piece of the payload. */
+ edesc_body.va = va_to_tile_io_addr(f_data) + f_used;
+ edesc_body.xfer_size = n;
+ edesc_body.bound = !(p_used < p_len);
+ gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
+ slot++;
+ }
+
+ tx_packets++;
+ tx_bytes += sh_len + p_len;
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* Update stats. */
+ tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
+ tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+}
+
+/* Do "TSO" handling for egress.
+ *
+ * Normally drivers set NETIF_F_TSO only to support hardware TSO;
+ * otherwise the stack uses scatter-gather to implement GSO in software.
+ * On our testing, enabling GSO support (via NETIF_F_SG) drops network
+ * performance down to around 7.5 Gbps on the 10G interfaces, although
+ * also dropping cpu utilization way down, to under 8%. But
+ * implementing "TSO" in the driver brings performance back up to line
+ * rate, while dropping cpu usage even further, to less than 4%. In
+ * practice, profiling of GSO shows that skb_segment() is what causes
+ * the performance overheads; we benefit in the driver from using
+ * preallocated memory to duplicate the TCP/IP headers.
+ */
+static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+ int channel = priv->echannel;
+ struct tile_net_egress *egress = &egress_for_echannel[channel];
+ struct tile_net_comps *comps = info->comps_for_echannel[channel];
+ gxio_mpipe_equeue_t *equeue = egress->equeue;
+ unsigned long irqflags;
+ int num_edescs;
+ s64 slot;
+
+ /* Determine how many mpipe edesc's are needed. */
+ num_edescs = tso_count_edescs(skb);
+
+ local_irq_save(irqflags);
+
+ /* Try to acquire a completion entry and an egress slot. */
+ slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
+ equeue, num_edescs);
+ if (slot < 0) {
+ local_irq_restore(irqflags);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Set up copies of header data properly. */
+ tso_headers_prepare(skb, egress->headers, slot);
+
+ /* Actually pass the data to the network hardware. */
+ tso_egress(dev, equeue, skb, egress->headers, slot);
+
+ /* Add a completion record. */
+ add_comp(equeue, comps, slot + num_edescs - 1, skb);
+
+ local_irq_restore(irqflags);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer();
+
+ return NETDEV_TX_OK;
+}
+
+/* Analyze the body and frags for a transmit request. */
+static unsigned int tile_net_tx_frags(struct frag *frags,
+ struct sk_buff *skb,
+ void *b_data, unsigned int b_len)
+{
+ unsigned int i, n = 0;
+
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ if (b_len != 0) {
+ frags[n].buf = b_data;
+ frags[n++].length = b_len;
+ }
+
+ for (i = 0; i < sh->nr_frags; i++) {
+ skb_frag_t *f = &sh->frags[i];
+ frags[n].buf = tile_net_frag_buf(f);
+ frags[n++].length = skb_frag_size(f);
+ }
+
+ return n;
+}
+
+/* Help the kernel transmit a packet. */
+static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_priv *priv = netdev_priv(dev);
+ struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+ gxio_mpipe_equeue_t *equeue = egress->equeue;
+ struct tile_net_comps *comps =
+ info->comps_for_echannel[priv->echannel];
+ unsigned int len = skb->len;
+ unsigned char *data = skb->data;
+ unsigned int num_edescs;
+ struct frag frags[MAX_FRAGS];
+ gxio_mpipe_edesc_t edescs[MAX_FRAGS];
+ unsigned long irqflags;
+ gxio_mpipe_edesc_t edesc = { { 0 } };
+ unsigned int i;
+ s64 slot;
+
+ if (skb_is_gso(skb))
+ return tile_net_tx_tso(skb, dev);
+
+ num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
+
+ /* This is only used to specify the TLB. */
+ edesc.stack_idx = large_buffer_stack;
+
+ /* Prepare the edescs. */
+ for (i = 0; i < num_edescs; i++) {
+ edesc.xfer_size = frags[i].length;
+ edesc.va = va_to_tile_io_addr(frags[i].buf);
+ edescs[i] = edesc;
+ }
+
+ /* Mark the final edesc. */
+ edescs[num_edescs - 1].bound = 1;
+
+ /* Add checksum info to the initial edesc, if needed. */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ unsigned int csum_start = skb_checksum_start_offset(skb);
+ edescs[0].csum = 1;
+ edescs[0].csum_start = csum_start;
+ edescs[0].csum_dest = csum_start + skb->csum_offset;
+ }
+
+ local_irq_save(irqflags);
+
+ /* Try to acquire a completion entry and an egress slot. */
+ slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
+ equeue, num_edescs);
+ if (slot < 0) {
+ local_irq_restore(irqflags);
+ return NETDEV_TX_BUSY;
+ }
+
+ for (i = 0; i < num_edescs; i++)
+ gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+
+ /* Add a completion record. */
+ add_comp(equeue, comps, slot - 1, skb);
+
+ /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
+ tile_net_stats_add(1, &priv->stats.tx_packets);
+ tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
+ &priv->stats.tx_bytes);
+
+ local_irq_restore(irqflags);
+
+ /* Make sure the egress timer is scheduled. */
+ tile_net_schedule_egress_timer();
+
+ return NETDEV_TX_OK;
+}
+
+/* Return subqueue id on this core (one per core). */
+static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ return smp_processor_id();
+}
+
+/* Deal with a transmit timeout. */
+static void tile_net_tx_timeout(struct net_device *dev)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ netif_wake_subqueue(dev, cpu);
+}
+
+/* Ioctl commands. */
+static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+/* Get system network statistics for device. */
+static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ return &priv->stats;
+}
+
+/* Change the MTU. */
+static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if ((new_mtu < 68) || (new_mtu > 1500))
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+/* Change the Ethernet address of the NIC.
+ *
+ * The hypervisor driver does not support changing MAC address. However,
+ * the hardware does not do anything with the MAC address, so the address
+ * which gets used on outgoing packets, and which is accepted on incoming
+ * packets, is completely up to us.
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int tile_net_set_mac_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void tile_net_netpoll(struct net_device *dev)
+{
+ disable_percpu_irq(ingress_irq);
+ tile_net_handle_ingress_irq(ingress_irq, NULL);
+ enable_percpu_irq(ingress_irq, 0);
+}
+#endif
+
+static const struct net_device_ops tile_net_ops = {
+ .ndo_open = tile_net_open,
+ .ndo_stop = tile_net_stop,
+ .ndo_start_xmit = tile_net_tx,
+ .ndo_select_queue = tile_net_select_queue,
+ .ndo_do_ioctl = tile_net_ioctl,
+ .ndo_get_stats = tile_net_get_stats,
+ .ndo_change_mtu = tile_net_change_mtu,
+ .ndo_tx_timeout = tile_net_tx_timeout,
+ .ndo_set_mac_address = tile_net_set_mac_address,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tile_net_netpoll,
+#endif
+};
+
+/* The setup function.
+ *
+ * This uses ether_setup() to assign various fields in dev, including
+ * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
+ */
+static void tile_net_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ dev->netdev_ops = &tile_net_ops;
+ dev->watchdog_timeo = TILE_NET_TIMEOUT;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_TSO;
+ dev->mtu = 1500;
+}
+
+/* Allocate the device structure, register the device, and obtain the
+ * MAC address from the hypervisor.
+ */
+static void tile_net_dev_init(const char *name, const uint8_t *mac)
+{
+ int ret;
+ int i;
+ int nz_addr = 0;
+ struct net_device *dev;
+ struct tile_net_priv *priv;
+
+ /* HACK: Ignore "loop" links. */
+ if (strncmp(name, "loop", 4) == 0)
+ return;
+
+ /* Allocate the device structure. Normally, "name" is a
+ * template, instantiated by register_netdev(), but not for us.
+ */
+ dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
+ NR_CPUS, 1);
+ if (!dev) {
+ pr_err("alloc_netdev_mqs(%s) failed\n", name);
+ return;
+ }
+
+ /* Initialize "priv". */
+ priv = netdev_priv(dev);
+ memset(priv, 0, sizeof(*priv));
+ priv->dev = dev;
+ priv->channel = -1;
+ priv->loopify_channel = -1;
+ priv->echannel = -1;
+
+ /* Get the MAC address and set it in the device struct; this must
+ * be done before the device is opened. If the MAC is all zeroes,
+ * we use a random address, since we're probably on the simulator.
+ */
+ for (i = 0; i < 6; i++)
+ nz_addr |= mac[i];
+
+ if (nz_addr) {
+ memcpy(dev->dev_addr, mac, 6);
+ dev->addr_len = 6;
+ } else {
+ eth_hw_addr_random(dev);
+ }
+
+ /* Register the network device. */
+ ret = register_netdev(dev);
+ if (ret) {
+ netdev_err(dev, "register_netdev failed %d\n", ret);
+ free_netdev(dev);
+ return;
+ }
+}
+
+/* Per-cpu module initialization. */
+static void tile_net_init_module_percpu(void *unused)
+{
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ int my_cpu = smp_processor_id();
+
+ info->has_iqueue = false;
+
+ info->my_cpu = my_cpu;
+
+ /* Initialize the egress timer. */
+ hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ info->egress_timer.function = tile_net_handle_egress_timer;
+}
+
+/* Module initialization. */
+static int __init tile_net_init_module(void)
+{
+ int i;
+ char name[GXIO_MPIPE_LINK_NAME_LEN];
+ uint8_t mac[6];
+
+ pr_info("Tilera Network Driver\n");
+
+ mutex_init(&tile_net_devs_for_channel_mutex);
+
+ /* Initialize each CPU. */
+ on_each_cpu(tile_net_init_module_percpu, NULL, 1);
+
+ /* Find out what devices we have, and initialize them. */
+ for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
+ tile_net_dev_init(name, mac);
+
+ if (!network_cpus_init())
+ network_cpus_map = *cpu_online_mask;
+
+ return 0;
+}
+
+module_init(tile_net_init_module);
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6199f6b..c1ebfe9 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -114,7 +114,8 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
out_be32(card->regs + reg, value);
}
-/** spider_net_write_phy - write to phy register
+/**
+ * spider_net_write_phy - write to phy register
* @netdev: adapter to be written to
* @mii_id: id of MII
* @reg: PHY register
@@ -137,7 +138,8 @@ spider_net_write_phy(struct net_device *netdev, int mii_id,
spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
}
-/** spider_net_read_phy - read from phy register
+/**
+ * spider_net_read_phy - read from phy register
* @netdev: network device to be read from
* @mii_id: id of MII
* @reg: PHY register
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ea3e0a2..a46c198 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -486,7 +486,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
- velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
+ velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
opts->numrx = (opts->numrx & ~3);
}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a75e9ef..a5826a3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,7 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
ndev->addr_assign_type |= NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 3306a20..bdd8891 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,7 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
ndev->addr_assign_type |= NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1eaf712..f8e3518 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -197,7 +197,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
#endif
/**
- * * temac_dma_bd_release - Release buffer descriptor rings
+ * temac_dma_bd_release - Release buffer descriptor rings
*/
static void temac_dma_bd_release(struct net_device *ndev)
{
@@ -768,7 +768,6 @@ static void ll_temac_recv(struct net_device *ndev)
DMA_FROM_DEVICE);
skb_put(skb, length);
- skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9c365e1..0793299 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -312,7 +312,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address)
if (address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 4ad80f77..6695a1d 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -2962,7 +2962,7 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
- bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
+ bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
}
#endif
}
@@ -3030,7 +3030,7 @@ static void dfx_rcv_queue_process(
#ifdef DYNAMIC_BUFFERS
p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
#else
- p_buff = (char *) bp->p_rcv_buff_va[entry];
+ p_buff = bp->p_rcv_buff_va[entry];
#endif
memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 9ac4665..24d8566 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -1242,7 +1242,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 8)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from+2,6) ;
+ memcpy(to,from+2,6) ;
to += 8 ;
from += 8 ;
len -= 8 ;
@@ -1251,7 +1251,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 4)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,4) ;
+ memcpy(to,from,4) ;
to += 4 ;
from += 4 ;
len -= 4 ;
@@ -1260,7 +1260,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 8)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,8) ;
+ memcpy(to,from,8) ;
to += 8 ;
from += 8 ;
len -= 8 ;
@@ -1269,7 +1269,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 32)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,32) ;
+ memcpy(to,from,32) ;
to += 32 ;
from += 32 ;
len -= 32 ;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index aed1a61..2c0894a 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -485,7 +485,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
return;
default:
- count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ count = kiss_esc(p, ax->xbuff, len);
}
} else {
unsigned short crc;
@@ -497,7 +497,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
case CRC_MODE_SMACK:
*p |= 0x80;
crc = swab16(crc16(0, p, len));
- count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
break;
case CRC_MODE_FLEX_TEST:
ax->crcmode = CRC_MODE_NONE;
@@ -506,11 +506,11 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
case CRC_MODE_FLEX:
*p |= 0x20;
crc = calc_crc_flex(p, len);
- count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
break;
default:
- count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ count = kiss_esc(p, ax->xbuff, len);
}
}
spin_unlock_bh(&ax->buflock);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4ffcd57..95ceb35 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -131,6 +131,7 @@ int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
+int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
@@ -478,6 +479,7 @@ struct netvsc_device {
u32 nvsp_version;
atomic_t num_outstanding_sends;
+ wait_queue_head_t wait_drain;
bool start_remove;
bool destroy;
/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8b91947..6cee291 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
if (!net_device)
return NULL;
+ init_waitqueue_head(&net_device->wait_drain);
net_device->start_remove = false;
net_device->destroy = false;
net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/* Wait for all send completions */
- while (atomic_read(&net_device->num_outstanding_sends)) {
- dev_info(&device->device,
- "waiting for %d requests to complete...\n",
- atomic_read(&net_device->num_outstanding_sends));
- udelay(100);
- }
+ wait_event(net_device->wait_drain,
+ atomic_read(&net_device->num_outstanding_sends) == 0);
netvsc_disconnect_vsp(net_device);
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
+ if (net_device->destroy && num_outstanding_sends == 0)
+ wake_up(&net_device->wait_drain);
+
if (netif_queue_stopped(ndev) && !net_device->start_remove &&
(hv_ringbuf_avail_percent(&device->channel->outbound)
> RING_AVAIL_PERCENT_HIWATER ||
@@ -614,7 +614,7 @@ retry_send_cmplt:
static void netvsc_receive_completion(void *context)
{
struct hv_netvsc_packet *packet = context;
- struct hv_device *device = (struct hv_device *)packet->device;
+ struct hv_device *device = packet->device;
struct netvsc_device *net_device;
u64 transaction_id = 0;
bool fsend_receive_comp = false;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8f8ed33..8e23c08 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -341,6 +341,34 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return 0;
}
+
+static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndevctx->device_ctx;
+ struct sockaddr *addr = p;
+ char save_adr[14];
+ unsigned char save_aatype;
+ int err;
+
+ memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
+ save_aatype = ndev->addr_assign_type;
+
+ err = eth_mac_addr(ndev, p);
+ if (err != 0)
+ return err;
+
+ err = rndis_filter_set_device_mac(hdev, addr->sa_data);
+ if (err != 0) {
+ /* roll back to saved MAC */
+ memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
+ ndev->addr_assign_type = save_aatype;
+ }
+
+ return err;
+}
+
+
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -353,7 +381,7 @@ static const struct net_device_ops device_ops = {
.ndo_set_rx_mode = netvsc_set_multicast_list,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = netvsc_set_mac_addr,
};
/*
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 981ebb1..fbf5394 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -27,6 +27,7 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/nls.h>
#include "hyperv_net.h"
@@ -47,6 +48,7 @@ struct rndis_request {
struct hv_page_buffer buf;
/* FIXME: We assumed a fixed size request here. */
struct rndis_message request_msg;
+ u8 ext[100];
};
static void rndis_filter_send_completion(void *ctx);
@@ -511,6 +513,83 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
dev->hw_mac_adr, &size);
}
+#define NWADR_STR "NetworkAddress"
+#define NWADR_STRLEN 14
+
+int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
+{
+ struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct rndis_device *rdev = nvdev->extension;
+ struct net_device *ndev = nvdev->ndev;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct rndis_config_parameter_info *cpi;
+ wchar_t *cfg_nwadr, *cfg_mac;
+ struct rndis_set_complete *set_complete;
+ char macstr[2*ETH_ALEN+1];
+ u32 extlen = sizeof(struct rndis_config_parameter_info) +
+ 2*NWADR_STRLEN + 4*ETH_ALEN;
+ int ret, t;
+
+ request = get_rndis_request(rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ cpi = (struct rndis_config_parameter_info *)((ulong)set +
+ set->info_buf_offset);
+ cpi->parameter_name_offset =
+ sizeof(struct rndis_config_parameter_info);
+ /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
+ cpi->parameter_name_length = 2*NWADR_STRLEN;
+ cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
+ cpi->parameter_value_offset =
+ cpi->parameter_name_offset + cpi->parameter_name_length;
+ /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
+ cpi->parameter_value_length = 4*ETH_ALEN;
+
+ cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
+ cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
+ ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
+ cfg_nwadr, NWADR_STRLEN);
+ if (ret < 0)
+ goto cleanup;
+ snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
+ ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
+ cfg_mac, 2*ETH_ALEN);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ netdev_err(ndev, "timeout before we got a set response...\n");
+ /*
+ * can't put_rndis_request, since we may still receive a
+ * send-completion.
+ */
+ return -EBUSY;
+ } else {
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS)
+ ret = -EINVAL;
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
+
+
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index dcc80d6..8487204 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1017,7 +1017,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
{
int iobase;
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
struct net_device *dev;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
@@ -1052,7 +1052,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
*/
static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
{
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
unsigned long flags;
int iobase;
int fcr; /* FIFO control reg */
@@ -1121,7 +1121,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
{
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
int iobase,dongle_id;
int tmp = 0;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index fc503aa..e09417d 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -794,7 +794,7 @@ static int __devinit au1k_irda_net_init(struct net_device *dev)
/* allocate the data buffers */
aup->db[0].vaddr =
- (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
+ dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
if (!aup->db[0].vaddr)
goto out3;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 32eb94e..e2a06fd 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -107,10 +107,10 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
- start = u64_stats_fetch_begin(&lb_stats->syncp);
+ start = u64_stats_fetch_begin_bh(&lb_stats->syncp);
tbytes = lb_stats->bytes;
tpackets = lb_stats->packets;
- } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start));
bytes += tbytes;
packets += tpackets;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2ee56de..0737bd4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -847,13 +847,12 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
const struct iovec *iv, unsigned long len,
int noblock)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(sk_sleep(&q->sk), &wait);
while (len) {
- current->state = TASK_INTERRUPTIBLE;
+ prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE);
/* Read frames from the queue */
skb = skb_dequeue(&q->sk.sk_receive_queue);
@@ -875,8 +874,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
break;
}
- current->state = TASK_RUNNING;
- remove_wait_queue(sk_sleep(&q->sk), &wait);
+ finish_wait(sk_sleep(&q->sk), &wait);
return ret;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 944cdfb..3090dc6 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -67,6 +67,11 @@ config BCM63XX_PHY
---help---
Currently supports the 6348 and 6358 PHYs.
+config BCM87XX_PHY
+ tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
+ help
+ Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
+
config ICPLUS_PHY
tristate "Drivers for ICPlus PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index f51af68..6d2dc6c 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index cfabd5f..a3fb5ce 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -77,13 +77,7 @@ static struct phy_driver am79c_driver = {
static int __init am79c_init(void)
{
- int ret;
-
- ret = phy_driver_register(&am79c_driver);
- if (ret)
- return ret;
-
- return 0;
+ return phy_driver_register(&am79c_driver);
}
static void __exit am79c_exit(void)
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index cd802eb..84c7a39 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -71,7 +71,8 @@ static int bcm63xx_config_intr(struct phy_device *phydev)
return err;
}
-static struct phy_driver bcm63xx_1_driver = {
+static struct phy_driver bcm63xx_driver[] = {
+{
.phy_id = 0x00406000,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
@@ -84,10 +85,8 @@ static struct phy_driver bcm63xx_1_driver = {
.ack_interrupt = bcm63xx_ack_interrupt,
.config_intr = bcm63xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-/* same phy as above, with just a different OUI */
-static struct phy_driver bcm63xx_2_driver = {
+}, {
+ /* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (2)",
@@ -99,30 +98,18 @@ static struct phy_driver bcm63xx_2_driver = {
.ack_interrupt = bcm63xx_ack_interrupt,
.config_intr = bcm63xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
+} };
static int __init bcm63xx_phy_init(void)
{
- int ret;
-
- ret = phy_driver_register(&bcm63xx_1_driver);
- if (ret)
- goto out_63xx_1;
- ret = phy_driver_register(&bcm63xx_2_driver);
- if (ret)
- goto out_63xx_2;
- return ret;
-
-out_63xx_2:
- phy_driver_unregister(&bcm63xx_1_driver);
-out_63xx_1:
- return ret;
+ return phy_drivers_register(bcm63xx_driver,
+ ARRAY_SIZE(bcm63xx_driver));
}
static void __exit bcm63xx_phy_exit(void)
{
- phy_driver_unregister(&bcm63xx_1_driver);
- phy_driver_unregister(&bcm63xx_2_driver);
+ phy_drivers_unregister(bcm63xx_driver,
+ ARRAY_SIZE(bcm63xx_driver));
}
module_init(bcm63xx_phy_init);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
new file mode 100644
index 0000000..2346b38
--- /dev/null
+++ b/drivers/net/phy/bcm87xx.c
@@ -0,0 +1,231 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 - 2012 Cavium, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#define PHY_ID_BCM8706 0x0143bdc1
+#define PHY_ID_BCM8727 0x0143bff0
+
+#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a)
+#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020)
+#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018)
+
+#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002)
+#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005)
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+/* Set and/or override some configuration registers based on the
+ * broadcom,c45-reg-init property stored in the of_node for the phydev.
+ *
+ * broadcom,c45-reg-init = <devid reg mask value>,...;
+ *
+ * There may be one or more sets of <devid reg mask value>:
+ *
+ * devid: which sub-device to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int bcm87xx_of_reg_init(struct phy_device *phydev)
+{
+ const __be32 *paddr;
+ const __be32 *paddr_end;
+ int len, ret;
+
+ if (!phydev->dev.of_node)
+ return 0;
+
+ paddr = of_get_property(phydev->dev.of_node,
+ "broadcom,c45-reg-init", &len);
+ if (!paddr)
+ return 0;
+
+ paddr_end = paddr + (len /= sizeof(*paddr));
+
+ ret = 0;
+
+ while (paddr + 3 < paddr_end) {
+ u16 devid = be32_to_cpup(paddr++);
+ u16 reg = be32_to_cpup(paddr++);
+ u16 mask = be32_to_cpup(paddr++);
+ u16 val_bits = be32_to_cpup(paddr++);
+ int val;
+ u32 regnum = MII_ADDR_C45 | (devid << 16) | reg;
+ val = 0;
+ if (mask) {
+ val = phy_read(phydev, regnum);
+ if (val < 0) {
+ ret = val;
+ goto err;
+ }
+ val &= mask;
+ }
+ val |= val_bits;
+
+ ret = phy_write(phydev, regnum, val);
+ if (ret < 0)
+ goto err;
+ }
+err:
+ return ret;
+}
+#else
+static int bcm87xx_of_reg_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int bcm87xx_config_init(struct phy_device *phydev)
+{
+ phydev->supported = SUPPORTED_10000baseR_FEC;
+ phydev->advertising = ADVERTISED_10000baseR_FEC;
+ phydev->state = PHY_NOLINK;
+ phydev->autoneg = AUTONEG_DISABLE;
+
+ bcm87xx_of_reg_init(phydev);
+
+ return 0;
+}
+
+static int bcm87xx_config_aneg(struct phy_device *phydev)
+{
+ return -EINVAL;
+}
+
+static int bcm87xx_read_status(struct phy_device *phydev)
+{
+ int rx_signal_detect;
+ int pcs_status;
+ int xgxs_lane_status;
+
+ rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT);
+ if (rx_signal_detect < 0)
+ return rx_signal_detect;
+
+ if ((rx_signal_detect & 1) == 0)
+ goto no_link;
+
+ pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS);
+ if (pcs_status < 0)
+ return pcs_status;
+
+ if ((pcs_status & 1) == 0)
+ goto no_link;
+
+ xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS);
+ if (xgxs_lane_status < 0)
+ return xgxs_lane_status;
+
+ if ((xgxs_lane_status & 0x1000) == 0)
+ goto no_link;
+
+ phydev->speed = 10000;
+ phydev->link = 1;
+ phydev->duplex = 1;
+ return 0;
+
+no_link:
+ phydev->link = 0;
+ return 0;
+}
+
+static int bcm87xx_config_intr(struct phy_device *phydev)
+{
+ int reg, err;
+
+ reg = phy_read(phydev, BCM87XX_LASI_CONTROL);
+
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg |= 1;
+ else
+ reg &= ~1;
+
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ return err;
+}
+
+static int bcm87xx_did_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, BCM87XX_LASI_STATUS);
+
+ if (reg < 0) {
+ dev_err(&phydev->dev,
+ "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", reg);
+ return 0;
+ }
+ return (reg & 1) != 0;
+}
+
+static int bcm87xx_ack_interrupt(struct phy_device *phydev)
+{
+ /* Reading the LASI status clears it. */
+ bcm87xx_did_interrupt(phydev);
+ return 0;
+}
+
+static int bcm8706_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
+}
+
+static int bcm8727_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
+}
+
+static struct phy_driver bcm87xx_driver[] = {
+{
+ .phy_id = PHY_ID_BCM8706,
+ .phy_id_mask = 0xffffffff,
+ .name = "Broadcom BCM8706",
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = bcm87xx_config_init,
+ .config_aneg = bcm87xx_config_aneg,
+ .read_status = bcm87xx_read_status,
+ .ack_interrupt = bcm87xx_ack_interrupt,
+ .config_intr = bcm87xx_config_intr,
+ .did_interrupt = bcm87xx_did_interrupt,
+ .match_phy_device = bcm8706_match_phy_device,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM8727,
+ .phy_id_mask = 0xffffffff,
+ .name = "Broadcom BCM8727",
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = bcm87xx_config_init,
+ .config_aneg = bcm87xx_config_aneg,
+ .read_status = bcm87xx_read_status,
+ .ack_interrupt = bcm87xx_ack_interrupt,
+ .config_intr = bcm87xx_config_intr,
+ .did_interrupt = bcm87xx_did_interrupt,
+ .match_phy_device = bcm8727_match_phy_device,
+ .driver = { .owner = THIS_MODULE },
+} };
+
+static int __init bcm87xx_init(void)
+{
+ return phy_drivers_register(bcm87xx_driver,
+ ARRAY_SIZE(bcm87xx_driver));
+}
+module_init(bcm87xx_init);
+
+static void __exit bcm87xx_exit(void)
+{
+ phy_drivers_unregister(bcm87xx_driver,
+ ARRAY_SIZE(bcm87xx_driver));
+}
+module_exit(bcm87xx_exit);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 60338ff..f8c90ea 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -682,7 +682,8 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
return err;
}
-static struct phy_driver bcm5411_driver = {
+static struct phy_driver broadcom_drivers[] = {
+{
.phy_id = PHY_ID_BCM5411,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
@@ -695,9 +696,7 @@ static struct phy_driver bcm5411_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5421_driver = {
+}, {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
@@ -710,9 +709,7 @@ static struct phy_driver bcm5421_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5461_driver = {
+}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
@@ -725,9 +722,7 @@ static struct phy_driver bcm5461_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5464_driver = {
+}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
@@ -740,9 +735,7 @@ static struct phy_driver bcm5464_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5481_driver = {
+}, {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
@@ -755,9 +748,7 @@ static struct phy_driver bcm5481_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5482_driver = {
+}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
@@ -770,9 +761,7 @@ static struct phy_driver bcm5482_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm50610_driver = {
+}, {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
@@ -785,9 +774,7 @@ static struct phy_driver bcm50610_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm50610m_driver = {
+}, {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
@@ -800,9 +787,7 @@ static struct phy_driver bcm50610m_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm57780_driver = {
+}, {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
@@ -815,9 +800,7 @@ static struct phy_driver bcm57780_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcmac131_driver = {
+}, {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
@@ -830,9 +813,7 @@ static struct phy_driver bcmac131_driver = {
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5241_driver = {
+}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
@@ -845,84 +826,18 @@ static struct phy_driver bcm5241_driver = {
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
.driver = { .owner = THIS_MODULE },
-};
+} };
static int __init broadcom_init(void)
{
- int ret;
-
- ret = phy_driver_register(&bcm5411_driver);
- if (ret)
- goto out_5411;
- ret = phy_driver_register(&bcm5421_driver);
- if (ret)
- goto out_5421;
- ret = phy_driver_register(&bcm5461_driver);
- if (ret)
- goto out_5461;
- ret = phy_driver_register(&bcm5464_driver);
- if (ret)
- goto out_5464;
- ret = phy_driver_register(&bcm5481_driver);
- if (ret)
- goto out_5481;
- ret = phy_driver_register(&bcm5482_driver);
- if (ret)
- goto out_5482;
- ret = phy_driver_register(&bcm50610_driver);
- if (ret)
- goto out_50610;
- ret = phy_driver_register(&bcm50610m_driver);
- if (ret)
- goto out_50610m;
- ret = phy_driver_register(&bcm57780_driver);
- if (ret)
- goto out_57780;
- ret = phy_driver_register(&bcmac131_driver);
- if (ret)
- goto out_ac131;
- ret = phy_driver_register(&bcm5241_driver);
- if (ret)
- goto out_5241;
- return ret;
-
-out_5241:
- phy_driver_unregister(&bcmac131_driver);
-out_ac131:
- phy_driver_unregister(&bcm57780_driver);
-out_57780:
- phy_driver_unregister(&bcm50610m_driver);
-out_50610m:
- phy_driver_unregister(&bcm50610_driver);
-out_50610:
- phy_driver_unregister(&bcm5482_driver);
-out_5482:
- phy_driver_unregister(&bcm5481_driver);
-out_5481:
- phy_driver_unregister(&bcm5464_driver);
-out_5464:
- phy_driver_unregister(&bcm5461_driver);
-out_5461:
- phy_driver_unregister(&bcm5421_driver);
-out_5421:
- phy_driver_unregister(&bcm5411_driver);
-out_5411:
- return ret;
+ return phy_drivers_register(broadcom_drivers,
+ ARRAY_SIZE(broadcom_drivers));
}
static void __exit broadcom_exit(void)
{
- phy_driver_unregister(&bcm5241_driver);
- phy_driver_unregister(&bcmac131_driver);
- phy_driver_unregister(&bcm57780_driver);
- phy_driver_unregister(&bcm50610m_driver);
- phy_driver_unregister(&bcm50610_driver);
- phy_driver_unregister(&bcm5482_driver);
- phy_driver_unregister(&bcm5481_driver);
- phy_driver_unregister(&bcm5464_driver);
- phy_driver_unregister(&bcm5461_driver);
- phy_driver_unregister(&bcm5421_driver);
- phy_driver_unregister(&bcm5411_driver);
+ phy_drivers_unregister(broadcom_drivers,
+ ARRAY_SIZE(broadcom_drivers));
}
module_init(broadcom_init);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d281731..db472ff 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -102,7 +102,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
}
/* Cicada 8201, a.k.a Vitesse VSC8201 */
-static struct phy_driver cis8201_driver = {
+static struct phy_driver cis820x_driver[] = {
+{
.phy_id = 0x000fc410,
.name = "Cicada Cis8201",
.phy_id_mask = 0x000ffff0,
@@ -113,11 +114,8 @@ static struct phy_driver cis8201_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-/* Cicada 8204 */
-static struct phy_driver cis8204_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
@@ -128,32 +126,19 @@ static struct phy_driver cis8204_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init cicada_init(void)
{
- int ret;
-
- ret = phy_driver_register(&cis8204_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&cis8201_driver);
- if (ret)
- goto err2;
- return 0;
-
-err2:
- phy_driver_unregister(&cis8204_driver);
-err1:
- return ret;
+ return phy_drivers_register(cis820x_driver,
+ ARRAY_SIZE(cis820x_driver));
}
static void __exit cicada_exit(void)
{
- phy_driver_unregister(&cis8204_driver);
- phy_driver_unregister(&cis8201_driver);
+ phy_drivers_unregister(cis820x_driver,
+ ARRAY_SIZE(cis820x_driver));
}
module_init(cicada_init);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5f59cc0..81c7bc0 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -144,7 +144,8 @@ static int dm9161_ack_interrupt(struct phy_device *phydev)
return (err < 0) ? err : 0;
}
-static struct phy_driver dm9161e_driver = {
+static struct phy_driver dm91xx_driver[] = {
+{
.phy_id = 0x0181b880,
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
@@ -153,9 +154,7 @@ static struct phy_driver dm9161e_driver = {
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver dm9161a_driver = {
+}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
@@ -164,9 +163,7 @@ static struct phy_driver dm9161a_driver = {
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver dm9131_driver = {
+}, {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
@@ -177,38 +174,18 @@ static struct phy_driver dm9131_driver = {
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
+} };
static int __init davicom_init(void)
{
- int ret;
-
- ret = phy_driver_register(&dm9161e_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&dm9161a_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&dm9131_driver);
- if (ret)
- goto err3;
- return 0;
-
- err3:
- phy_driver_unregister(&dm9161a_driver);
- err2:
- phy_driver_unregister(&dm9161e_driver);
- err1:
- return ret;
+ return phy_drivers_register(dm91xx_driver,
+ ARRAY_SIZE(dm91xx_driver));
}
static void __exit davicom_exit(void)
{
- phy_driver_unregister(&dm9161e_driver);
- phy_driver_unregister(&dm9161a_driver);
- phy_driver_unregister(&dm9131_driver);
+ phy_drivers_unregister(dm91xx_driver,
+ ARRAY_SIZE(dm91xx_driver));
}
module_init(davicom_init);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 940b290..b0da022 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -17,6 +17,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -453,16 +456,16 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
if (!phydev->attached_dev) {
- pr_warning("dp83640: expected to find an attached netdevice\n");
+ pr_warn("expected to find an attached netdevice\n");
return;
}
if (on) {
if (dev_mc_add(phydev->attached_dev, status_frame_dst))
- pr_warning("dp83640: failed to add mc address\n");
+ pr_warn("failed to add mc address\n");
} else {
if (dev_mc_del(phydev->attached_dev, status_frame_dst))
- pr_warning("dp83640: failed to delete mc address\n");
+ pr_warn("failed to delete mc address\n");
}
}
@@ -582,9 +585,9 @@ static void recalibrate(struct dp83640_clock *clock)
* read out and correct offsets
*/
val = ext_read(master, PAGE4, PTP_STS);
- pr_info("master PTP_STS 0x%04hx", val);
+ pr_info("master PTP_STS 0x%04hx\n", val);
val = ext_read(master, PAGE4, PTP_ESTS);
- pr_info("master PTP_ESTS 0x%04hx", val);
+ pr_info("master PTP_ESTS 0x%04hx\n", val);
event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
@@ -594,9 +597,9 @@ static void recalibrate(struct dp83640_clock *clock)
list_for_each(this, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
val = ext_read(tmp->phydev, PAGE4, PTP_STS);
- pr_info("slave PTP_STS 0x%04hx", val);
+ pr_info("slave PTP_STS 0x%04hx\n", val);
val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
- pr_info("slave PTP_ESTS 0x%04hx", val);
+ pr_info("slave PTP_ESTS 0x%04hx\n", val);
event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
@@ -686,7 +689,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) {
- pr_debug("dp83640: rx timestamp pool is empty\n");
+ pr_debug("rx timestamp pool is empty\n");
goto out;
}
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -709,7 +712,7 @@ static void decode_txts(struct dp83640_private *dp83640,
skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) {
- pr_debug("dp83640: have timestamp but tx_queue empty\n");
+ pr_debug("have timestamp but tx_queue empty\n");
return;
}
ns = phy2txts(phy_txts);
@@ -847,7 +850,7 @@ static void dp83640_free_clocks(void)
list_for_each_safe(this, next, &phyter_clocks) {
clock = list_entry(this, struct dp83640_clock, list);
if (!list_empty(&clock->phylist)) {
- pr_warning("phy list non-empty while unloading");
+ pr_warn("phy list non-empty while unloading\n");
BUG();
}
list_del(&clock->list);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 633680d..ba55adf 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -70,7 +70,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
lpa |= LPA_10FULL;
break;
default:
- printk(KERN_WARNING "fixed phy: unknown speed\n");
+ pr_warn("fixed phy: unknown speed\n");
return -EINVAL;
}
} else {
@@ -90,7 +90,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
lpa |= LPA_10HALF;
break;
default:
- printk(KERN_WARNING "fixed phy: unknown speed\n");
+ pr_warn("fixed phy: unknown speed\n");
return -EINVAL;
}
}
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 5ac46f5..d5199cb 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
+#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
+#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
+ /* INTR pin used: speed/link/duplex will cause an interrupt */
+ c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
+ if (c < 0)
+ return c;
+
if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
/* Additional delay (2ns) used to adjust RX clock phase
* at RGMII interface */
@@ -195,7 +202,8 @@ static int ip101a_g_ack_interrupt(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ip175c_driver = {
+static struct phy_driver icplus_driver[] = {
+{
.phy_id = 0x02430d80,
.name = "ICPlus IP175C",
.phy_id_mask = 0x0ffffff0,
@@ -206,9 +214,7 @@ static struct phy_driver ip175c_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ip1001_driver = {
+}, {
.phy_id = 0x02430d90,
.name = "ICPlus IP1001",
.phy_id_mask = 0x0ffffff0,
@@ -220,9 +226,7 @@ static struct phy_driver ip1001_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ip101a_g_driver = {
+}, {
.phy_id = 0x02430c54,
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
@@ -236,28 +240,18 @@ static struct phy_driver ip101a_g_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
+} };
static int __init icplus_init(void)
{
- int ret = 0;
-
- ret = phy_driver_register(&ip1001_driver);
- if (ret < 0)
- return -ENODEV;
-
- ret = phy_driver_register(&ip101a_g_driver);
- if (ret < 0)
- return -ENODEV;
-
- return phy_driver_register(&ip175c_driver);
+ return phy_drivers_register(icplus_driver,
+ ARRAY_SIZE(icplus_driver));
}
static void __exit icplus_exit(void)
{
- phy_driver_unregister(&ip1001_driver);
- phy_driver_unregister(&ip101a_g_driver);
- phy_driver_unregister(&ip175c_driver);
+ phy_drivers_unregister(icplus_driver,
+ ARRAY_SIZE(icplus_driver));
}
module_init(icplus_init);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 6f6e8b6..6d1e3fc 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -149,7 +149,8 @@ static int lxt973_config_aneg(struct phy_device *phydev)
return phydev->priv ? 0 : genphy_config_aneg(phydev);
}
-static struct phy_driver lxt970_driver = {
+static struct phy_driver lxt97x_driver[] = {
+{
.phy_id = 0x78100000,
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
@@ -160,10 +161,8 @@ static struct phy_driver lxt970_driver = {
.read_status = genphy_read_status,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver lxt971_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x001378e0,
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
@@ -173,10 +172,8 @@ static struct phy_driver lxt971_driver = {
.read_status = genphy_read_status,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver lxt973_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x00137a10,
.name = "LXT973",
.phy_id_mask = 0xfffffff0,
@@ -185,39 +182,19 @@ static struct phy_driver lxt973_driver = {
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
.read_status = genphy_read_status,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init lxt_init(void)
{
- int ret;
-
- ret = phy_driver_register(&lxt970_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&lxt971_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&lxt973_driver);
- if (ret)
- goto err3;
- return 0;
-
- err3:
- phy_driver_unregister(&lxt971_driver);
- err2:
- phy_driver_unregister(&lxt970_driver);
- err1:
- return ret;
+ return phy_drivers_register(lxt97x_driver,
+ ARRAY_SIZE(lxt97x_driver));
}
static void __exit lxt_exit(void)
{
- phy_driver_unregister(&lxt970_driver);
- phy_driver_unregister(&lxt971_driver);
- phy_driver_unregister(&lxt973_driver);
+ phy_drivers_unregister(lxt97x_driver,
+ ARRAY_SIZE(lxt97x_driver));
}
module_init(lxt_init);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 418928d..5d2a3f2 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -826,28 +826,14 @@ static struct phy_driver marvell_drivers[] = {
static int __init marvell_init(void)
{
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++) {
- ret = phy_driver_register(&marvell_drivers[i]);
-
- if (ret) {
- while (i-- > 0)
- phy_driver_unregister(&marvell_drivers[i]);
- return ret;
- }
- }
-
- return 0;
+ return phy_drivers_register(marvell_drivers,
+ ARRAY_SIZE(marvell_drivers));
}
static void __exit marvell_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++)
- phy_driver_unregister(&marvell_drivers[i]);
+ phy_drivers_unregister(marvell_drivers,
+ ARRAY_SIZE(marvell_drivers));
}
module_init(marvell_init);
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 39ea067..5c12018 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
struct mdio_mux_parent_bus *pb = cb->parent;
int r;
- mutex_lock(&pb->mii_bus->mdio_lock);
+ /* In theory multiple mdio_mux could be stacked, thus creating
+ * more than a single level of nesting. But in practice,
+ * SINGLE_DEPTH_NESTING will cover the vast majority of use
+ * cases. We use it, instead of trying to handle the general
+ * case.
+ */
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
int r;
- mutex_lock(&pb->mii_bus->mdio_lock);
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 683ef1c..170eb41 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -13,6 +13,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -22,6 +25,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/of_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -96,7 +100,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
}
/**
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
- * @mdio_np: Pointer to the mii_bus.
+ * @mdio_bus_np: Pointer to the mii_bus.
*
* Returns a pointer to the mii_bus, or NULL if none found.
*
@@ -148,7 +152,7 @@ int mdiobus_register(struct mii_bus *bus)
err = device_register(&bus->dev);
if (err) {
- printk(KERN_ERR "mii_bus %s failed to register\n", bus->id);
+ pr_err("mii_bus %s failed to register\n", bus->id);
return -EINVAL;
}
@@ -229,7 +233,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
struct phy_device *phydev;
int err;
- phydev = get_phy_device(bus, addr);
+ phydev = get_phy_device(bus, addr, false);
if (IS_ERR(phydev) || phydev == NULL)
return phydev;
@@ -305,6 +309,12 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
struct phy_device *phydev = to_phy_device(dev);
struct phy_driver *phydrv = to_phy_driver(drv);
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (phydrv->match_phy_device)
+ return phydrv->match_phy_device(phydev);
+
return ((phydrv->phy_id & phydrv->phy_id_mask) ==
(phydev->phy_id & phydrv->phy_id_mask));
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 590f902..cf287e0 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,7 +114,8 @@ static int ks8051_config_init(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ks8737_driver = {
+static struct phy_driver ksphy_driver[] = {
+{
.phy_id = PHY_ID_KS8737,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8737",
@@ -126,9 +127,7 @@ static struct phy_driver ks8737_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ks8737_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8041_driver = {
+}, {
.phy_id = PHY_ID_KS8041,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8041",
@@ -141,9 +140,7 @@ static struct phy_driver ks8041_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8051_driver = {
+}, {
.phy_id = PHY_ID_KS8051,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8051",
@@ -156,12 +153,10 @@ static struct phy_driver ks8051_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8001_driver = {
+}, {
.phy_id = PHY_ID_KS8001,
.name = "Micrel KS8001 or KS8721",
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
@@ -170,11 +165,9 @@ static struct phy_driver ks8001_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ksz9021_driver = {
+}, {
.phy_id = PHY_ID_KSZ9021,
- .phy_id_mask = 0x000fff10,
+ .phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
@@ -185,51 +178,18 @@ static struct phy_driver ksz9021_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
.driver = { .owner = THIS_MODULE, },
-};
+} };
static int __init ksphy_init(void)
{
- int ret;
-
- ret = phy_driver_register(&ks8001_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&ksz9021_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&ks8737_driver);
- if (ret)
- goto err3;
- ret = phy_driver_register(&ks8041_driver);
- if (ret)
- goto err4;
- ret = phy_driver_register(&ks8051_driver);
- if (ret)
- goto err5;
-
- return 0;
-
-err5:
- phy_driver_unregister(&ks8041_driver);
-err4:
- phy_driver_unregister(&ks8737_driver);
-err3:
- phy_driver_unregister(&ksz9021_driver);
-err2:
- phy_driver_unregister(&ks8001_driver);
-err1:
- return ret;
+ return phy_drivers_register(ksphy_driver,
+ ARRAY_SIZE(ksphy_driver));
}
static void __exit ksphy_exit(void)
{
- phy_driver_unregister(&ks8001_driver);
- phy_driver_unregister(&ks8737_driver);
- phy_driver_unregister(&ksz9021_driver);
- phy_driver_unregister(&ks8041_driver);
- phy_driver_unregister(&ks8051_driver);
+ phy_drivers_unregister(ksphy_driver,
+ ARRAY_SIZE(ksphy_driver));
}
module_init(ksphy_init);
@@ -240,8 +200,8 @@ MODULE_AUTHOR("David J. Choi");
MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
- { PHY_ID_KSZ9021, 0x000fff10 },
- { PHY_ID_KS8001, 0x00fffff0 },
+ { PHY_ID_KSZ9021, 0x000ffffe },
+ { PHY_ID_KS8001, 0x00ffffff },
{ PHY_ID_KS8737, 0x00fffff0 },
{ PHY_ID_KS8041, 0x00fffff0 },
{ PHY_ID_KS8051, 0x00fffff0 },
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 04bb8fc..9a5f234 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -22,6 +24,8 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
+#define DEBUG
+
/* DP83865 phy identifier values */
#define DP83865_PHY_ID 0x20005c7a
@@ -112,8 +116,8 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
ns_exp_write(phydev, 0x1c0,
ns_exp_read(phydev, 0x1c0) & 0xfffe);
- printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
- (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
+ pr_debug("10BASE-T HDX loopback %s\n",
+ (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
}
static int ns_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3cbda08..7ca2ff9 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -15,6 +15,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -32,6 +35,7 @@
#include <linux/phy.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/mdio.h>
#include <linux/atomic.h>
#include <asm/io.h>
@@ -44,18 +48,16 @@
*/
void phy_print_status(struct phy_device *phydev)
{
- pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
- phydev->link ? "Up" : "Down");
if (phydev->link)
- printk(KERN_CONT " - %d/%s", phydev->speed,
- DUPLEX_FULL == phydev->duplex ?
- "Full" : "Half");
-
- printk(KERN_CONT "\n");
+ pr_info("%s - Link is Up - %d/%s\n",
+ dev_name(&phydev->dev),
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
}
EXPORT_SYMBOL(phy_print_status);
-
/**
* phy_clear_interrupt - Ack the phy device's interrupt
* @phydev: the phy_device struct
@@ -482,9 +484,8 @@ static void phy_force_reduction(struct phy_device *phydev)
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
- pr_info("Trying %d/%s\n", phydev->speed,
- DUPLEX_FULL == phydev->duplex ?
- "FULL" : "HALF");
+ pr_info("Trying %d/%s\n",
+ phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
}
@@ -598,9 +599,8 @@ int phy_start_interrupts(struct phy_device *phydev)
IRQF_SHARED,
"phy_interrupt",
phydev) < 0) {
- printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
- phydev->bus->name,
- phydev->irq);
+ pr_warn("%s: Can't get IRQ %d (PHY)\n",
+ phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;
return 0;
}
@@ -838,10 +838,10 @@ void phy_state_machine(struct work_struct *work)
phydev->autoneg = AUTONEG_DISABLE;
- pr_info("Trying %d/%s\n", phydev->speed,
- DUPLEX_FULL ==
- phydev->duplex ?
- "FULL" : "HALF");
+ pr_info("Trying %d/%s\n",
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ?
+ "FULL" : "HALF");
}
break;
case PHY_NOLINK:
@@ -968,3 +968,283 @@ void phy_state_machine(struct work_struct *work)
schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
}
+
+static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr)
+{
+ /* Write the desired MMD Devad */
+ bus->write(bus, addr, MII_MMD_CTRL, devad);
+
+ /* Write the desired MMD register address */
+ bus->write(bus, addr, MII_MMD_DATA, prtad);
+
+ /* Select the Function : DATA with no post increment */
+ bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+}
+
+/**
+ * phy_read_mmd_indirect - reads data from the MMD registers
+ * @bus: the target MII bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ *
+ * Description: it reads data from the MMD registers (clause 22 to access to
+ * clause 45) of the specified phy address.
+ * To read these register we have:
+ * 1) Write reg 13 // DEVAD
+ * 2) Write reg 14 // MMD Address
+ * 3) Write reg 13 // MMD Data Command for MMD DEVAD
+ * 3) Read reg 14 // Read MMD data
+ */
+static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr)
+{
+ u32 ret;
+
+ mmd_phy_indirect(bus, prtad, devad, addr);
+
+ /* Read the content of the MMD's selected register */
+ ret = bus->read(bus, addr, MII_MMD_DATA);
+
+ return ret;
+}
+
+/**
+ * phy_write_mmd_indirect - writes data to the MMD registers
+ * @bus: the target MII bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ * @data: data to write in the MMD register
+ *
+ * Description: Write data from the MMD registers of the specified
+ * phy address.
+ * To write these register we have:
+ * 1) Write reg 13 // DEVAD
+ * 2) Write reg 14 // MMD Address
+ * 3) Write reg 13 // MMD Data Command for MMD DEVAD
+ * 3) Write reg 14 // Write MMD data
+ */
+static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr, u32 data)
+{
+ mmd_phy_indirect(bus, prtad, devad, addr);
+
+ /* Write the data into MMD's selected register */
+ bus->write(bus, addr, MII_MMD_DATA, data);
+}
+
+static u32 phy_eee_to_adv(u16 eee_adv)
+{
+ u32 adv = 0;
+
+ if (eee_adv & MDIO_EEE_100TX)
+ adv |= ADVERTISED_100baseT_Full;
+ if (eee_adv & MDIO_EEE_1000T)
+ adv |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & MDIO_EEE_10GT)
+ adv |= ADVERTISED_10000baseT_Full;
+ if (eee_adv & MDIO_EEE_1000KX)
+ adv |= ADVERTISED_1000baseKX_Full;
+ if (eee_adv & MDIO_EEE_10GKX4)
+ adv |= ADVERTISED_10000baseKX4_Full;
+ if (eee_adv & MDIO_EEE_10GKR)
+ adv |= ADVERTISED_10000baseKR_Full;
+
+ return adv;
+}
+
+static u32 phy_eee_to_supported(u16 eee_caported)
+{
+ u32 supported = 0;
+
+ if (eee_caported & MDIO_EEE_100TX)
+ supported |= SUPPORTED_100baseT_Full;
+ if (eee_caported & MDIO_EEE_1000T)
+ supported |= SUPPORTED_1000baseT_Full;
+ if (eee_caported & MDIO_EEE_10GT)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (eee_caported & MDIO_EEE_1000KX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ if (eee_caported & MDIO_EEE_10GKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (eee_caported & MDIO_EEE_10GKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+
+ return supported;
+}
+
+static u16 phy_adv_to_eee(u32 adv)
+{
+ u16 reg = 0;
+
+ if (adv & ADVERTISED_100baseT_Full)
+ reg |= MDIO_EEE_100TX;
+ if (adv & ADVERTISED_1000baseT_Full)
+ reg |= MDIO_EEE_1000T;
+ if (adv & ADVERTISED_10000baseT_Full)
+ reg |= MDIO_EEE_10GT;
+ if (adv & ADVERTISED_1000baseKX_Full)
+ reg |= MDIO_EEE_1000KX;
+ if (adv & ADVERTISED_10000baseKX4_Full)
+ reg |= MDIO_EEE_10GKX4;
+ if (adv & ADVERTISED_10000baseKR_Full)
+ reg |= MDIO_EEE_10GKR;
+
+ return reg;
+}
+
+/**
+ * phy_init_eee - init and check the EEE feature
+ * @phydev: target phy_device struct
+ * @clk_stop_enable: PHY may stop the clock during LPI
+ *
+ * Description: it checks if the Energy-Efficient Ethernet (EEE)
+ * is supported by looking at the MMD registers 3.20 and 7.60/61
+ * and it programs the MMD register 3.0 setting the "Clock stop enable"
+ * bit if required.
+ */
+int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+{
+ int ret = -EPROTONOSUPPORT;
+
+ /* According to 802.3az,the EEE is supported only in full duplex-mode.
+ * Also EEE feature is active when core is operating with MII, GMII
+ * or RGMII.
+ */
+ if ((phydev->duplex == DUPLEX_FULL) &&
+ ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
+ int eee_lp, eee_cap, eee_adv;
+ u32 lp, cap, adv;
+ int idx, status;
+
+ /* Read phy status to properly get the right settings */
+ status = phy_read_status(phydev);
+ if (status)
+ return status;
+
+ /* First check if the EEE ability is supported */
+ eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+ MDIO_MMD_PCS, phydev->addr);
+ if (eee_cap < 0)
+ return eee_cap;
+
+ cap = phy_eee_to_supported(eee_cap);
+ if (!cap)
+ goto eee_exit;
+
+ /* Check which link settings negotiated and verify it in
+ * the EEE advertising registers.
+ */
+ eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+ MDIO_MMD_AN, phydev->addr);
+ if (eee_lp < 0)
+ return eee_lp;
+
+ eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+ MDIO_MMD_AN, phydev->addr);
+ if (eee_adv < 0)
+ return eee_adv;
+
+ adv = phy_eee_to_adv(eee_adv);
+ lp = phy_eee_to_adv(eee_lp);
+ idx = phy_find_setting(phydev->speed, phydev->duplex);
+ if ((lp & adv & settings[idx].setting))
+ goto eee_exit;
+
+ if (clk_stop_enable) {
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
+ MDIO_MMD_PCS,
+ phydev->addr);
+ if (val < 0)
+ return val;
+
+ val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
+ phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
+ MDIO_MMD_PCS, phydev->addr, val);
+ }
+
+ ret = 0; /* EEE supported */
+ }
+
+eee_exit:
+ return ret;
+}
+EXPORT_SYMBOL(phy_init_eee);
+
+/**
+ * phy_get_eee_err - report the EEE wake error count
+ * @phydev: target phy_device struct
+ *
+ * Description: it is to report the number of time where the PHY
+ * failed to complete its normal wake sequence.
+ */
+int phy_get_eee_err(struct phy_device *phydev)
+{
+ return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
+ MDIO_MMD_PCS, phydev->addr);
+
+}
+EXPORT_SYMBOL(phy_get_eee_err);
+
+/**
+ * phy_ethtool_get_eee - get EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it reportes the Supported/Advertisement/LP Advertisement
+ * capabilities.
+ */
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
+{
+ int val;
+
+ /* Get Supported EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+ MDIO_MMD_PCS, phydev->addr);
+ if (val < 0)
+ return val;
+ data->supported = phy_eee_to_supported(val);
+
+ /* Get advertisement EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+ MDIO_MMD_AN, phydev->addr);
+ if (val < 0)
+ return val;
+ data->advertised = phy_eee_to_adv(val);
+
+ /* Get LP advertisement EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+ MDIO_MMD_AN, phydev->addr);
+ if (val < 0)
+ return val;
+ data->lp_advertised = phy_eee_to_adv(val);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_get_eee);
+
+/**
+ * phy_ethtool_set_eee - set EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it is to program the Advertisement EEE register.
+ */
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
+{
+ int val;
+
+ val = phy_adv_to_eee(data->advertised);
+ phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
+ phydev->addr, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index de86a55..8af46e8 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -14,6 +14,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -149,8 +152,8 @@ int phy_scan_fixups(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_scan_fixups);
-static struct phy_device* phy_device_create(struct mii_bus *bus,
- int addr, int phy_id)
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+ bool is_c45, struct phy_c45_device_ids *c45_ids)
{
struct phy_device *dev;
@@ -171,8 +174,11 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
dev->autoneg = AUTONEG_ENABLE;
+ dev->is_c45 = is_c45;
dev->addr = addr;
dev->phy_id = phy_id;
+ if (c45_ids)
+ dev->c45_ids = *c45_ids;
dev->bus = bus;
dev->dev.parent = bus->parent;
dev->dev.bus = &mdio_bus_type;
@@ -197,20 +203,99 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
return dev;
}
+EXPORT_SYMBOL(phy_device_create);
+
+/**
+ * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs.
+ * @bus: the target MII bus
+ * @addr: PHY address on the MII bus
+ * @phy_id: where to store the ID retrieved.
+ * @c45_ids: where to store the c45 ID information.
+ *
+ * If the PHY devices-in-package appears to be valid, it and the
+ * corresponding identifiers are stored in @c45_ids, zero is stored
+ * in @phy_id. Otherwise 0xffffffff is stored in @phy_id. Returns
+ * zero on success.
+ *
+ */
+static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
+ struct phy_c45_device_ids *c45_ids) {
+ int phy_reg;
+ int i, reg_addr;
+ const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
+
+ /* Find first non-zero Devices In package. Device
+ * zero is reserved, so don't probe it.
+ */
+ for (i = 1;
+ i < num_ids && c45_ids->devices_in_package == 0;
+ i++) {
+ reg_addr = MII_ADDR_C45 | i << 16 | 6;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->devices_in_package = (phy_reg & 0xffff) << 16;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | 5;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->devices_in_package |= (phy_reg & 0xffff);
+
+ /* If mostly Fs, there is no device there,
+ * let's get out of here.
+ */
+ if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) {
+ *phy_id = 0xffffffff;
+ return 0;
+ }
+ }
+
+ /* Now probe Device Identifiers for each device present. */
+ for (i = 1; i < num_ids; i++) {
+ if (!(c45_ids->devices_in_package & (1 << i)))
+ continue;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID1;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->device_ids[i] = (phy_reg & 0xffff) << 16;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->device_ids[i] |= (phy_reg & 0xffff);
+ }
+ *phy_id = 0;
+ return 0;
+}
/**
* get_phy_id - reads the specified addr for its ID.
* @bus: the target MII bus
* @addr: PHY address on the MII bus
* @phy_id: where to store the ID retrieved.
+ * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
+ * @c45_ids: where to store the c45 ID information.
+ *
+ * Description: In the case of a 802.3-c22 PHY, reads the ID registers
+ * of the PHY at @addr on the @bus, stores it in @phy_id and returns
+ * zero on success.
+ *
+ * In the case of a 802.3-c45 PHY, get_phy_c45_ids() is invoked, and
+ * its return value is in turn returned.
*
- * Description: Reads the ID registers of the PHY at @addr on the
- * @bus, stores it in @phy_id and returns zero on success.
*/
-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
+static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
+ bool is_c45, struct phy_c45_device_ids *c45_ids)
{
int phy_reg;
+ if (is_c45)
+ return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
+
/* Grab the bits from PHYIR1, and put them
* in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
@@ -235,17 +320,19 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
* get_phy_device - reads the specified PHY device and returns its @phy_device struct
* @bus: the target MII bus
* @addr: PHY address on the MII bus
+ * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
*
* Description: Reads the ID registers of the PHY at @addr on the
* @bus, then allocates and returns the phy_device to represent it.
*/
-struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
+struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
+ struct phy_c45_device_ids c45_ids = {0};
struct phy_device *dev = NULL;
- u32 phy_id;
+ u32 phy_id = 0;
int r;
- r = get_phy_id(bus, addr, &phy_id);
+ r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
if (r)
return ERR_PTR(r);
@@ -253,7 +340,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
if ((phy_id & 0x1fffffff) == 0x1fffffff)
return NULL;
- dev = phy_device_create(bus, addr, phy_id);
+ dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
return dev;
}
@@ -446,6 +533,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Assume that if there is no driver, that it doesn't
* exist, and we should use the genphy driver. */
if (NULL == d->driver) {
+ if (phydev->is_c45) {
+ pr_err("No driver for phy %x\n", phydev->phy_id);
+ return -ENODEV;
+ }
+
d->driver = &genphy_driver.driver;
err = d->driver->probe(d);
@@ -975,8 +1067,8 @@ int phy_driver_register(struct phy_driver *new_driver)
retval = driver_register(&new_driver->driver);
if (retval) {
- printk(KERN_ERR "%s: Error %d in registering driver\n",
- new_driver->name, retval);
+ pr_err("%s: Error %d in registering driver\n",
+ new_driver->name, retval);
return retval;
}
@@ -987,12 +1079,37 @@ int phy_driver_register(struct phy_driver *new_driver)
}
EXPORT_SYMBOL(phy_driver_register);
+int phy_drivers_register(struct phy_driver *new_driver, int n)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n; i++) {
+ ret = phy_driver_register(new_driver + i);
+ if (ret) {
+ while (i-- > 0)
+ phy_driver_unregister(new_driver + i);
+ break;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(phy_drivers_register);
+
void phy_driver_unregister(struct phy_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(phy_driver_unregister);
+void phy_drivers_unregister(struct phy_driver *drv, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ phy_driver_unregister(drv + i);
+ }
+}
+EXPORT_SYMBOL(phy_drivers_unregister);
+
static struct phy_driver genphy_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f414ffb..72f9347 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -65,11 +65,7 @@ static struct phy_driver rtl821x_driver = {
static int __init realtek_init(void)
{
- int ret;
-
- ret = phy_driver_register(&rtl821x_driver);
-
- return ret;
+ return phy_driver_register(&rtl821x_driver);
}
static void __exit realtek_exit(void)
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 42c43e7..6d61923 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -61,7 +61,8 @@ static int lan911x_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt(phydev);
}
-static struct phy_driver lan83c185_driver = {
+static struct phy_driver smsc_phy_driver[] = {
+{
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN83C185",
@@ -83,9 +84,7 @@ static struct phy_driver lan83c185_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8187_driver = {
+}, {
.phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8187",
@@ -107,9 +106,7 @@ static struct phy_driver lan8187_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8700_driver = {
+}, {
.phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
@@ -131,9 +128,7 @@ static struct phy_driver lan8700_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan911x_int_driver = {
+}, {
.phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN911x Internal PHY",
@@ -155,9 +150,7 @@ static struct phy_driver lan911x_int_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8710_driver = {
+}, {
.phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
@@ -179,53 +172,18 @@ static struct phy_driver lan8710_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
+} };
static int __init smsc_init(void)
{
- int ret;
-
- ret = phy_driver_register (&lan83c185_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register (&lan8187_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register (&lan8700_driver);
- if (ret)
- goto err3;
-
- ret = phy_driver_register (&lan911x_int_driver);
- if (ret)
- goto err4;
-
- ret = phy_driver_register (&lan8710_driver);
- if (ret)
- goto err5;
-
- return 0;
-
-err5:
- phy_driver_unregister (&lan911x_int_driver);
-err4:
- phy_driver_unregister (&lan8700_driver);
-err3:
- phy_driver_unregister (&lan8187_driver);
-err2:
- phy_driver_unregister (&lan83c185_driver);
-err1:
- return ret;
+ return phy_drivers_register(smsc_phy_driver,
+ ARRAY_SIZE(smsc_phy_driver));
}
static void __exit smsc_exit(void)
{
- phy_driver_unregister (&lan8710_driver);
- phy_driver_unregister (&lan911x_int_driver);
- phy_driver_unregister (&lan8700_driver);
- phy_driver_unregister (&lan8187_driver);
- phy_driver_unregister (&lan83c185_driver);
+ return phy_drivers_unregister(smsc_phy_driver,
+ ARRAY_SIZE(smsc_phy_driver));
}
MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4eb98bc..1c3abce 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -11,6 +11,8 @@
* by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -356,7 +358,7 @@ static struct spi_driver ks8995_driver = {
static int __init ks8995_init(void)
{
- printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+ pr_info(DRV_DESC " version " DRV_VERSION "\n");
return spi_register_driver(&ks8995_driver);
}
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 187a2fa..5e1eb13 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -81,7 +81,8 @@ static int ste10Xp_ack_interrupt(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ste101p_pdriver = {
+static struct phy_driver ste10xp_pdriver[] = {
+{
.phy_id = STE101P_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
@@ -95,9 +96,7 @@ static struct phy_driver ste101p_pdriver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = {.owner = THIS_MODULE,}
-};
-
-static struct phy_driver ste100p_pdriver = {
+}, {
.phy_id = STE100P_PHY_ID,
.phy_id_mask = 0xffffffff,
.name = "STe100p",
@@ -111,22 +110,18 @@ static struct phy_driver ste100p_pdriver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = {.owner = THIS_MODULE,}
-};
+} };
static int __init ste10Xp_init(void)
{
- int retval;
-
- retval = phy_driver_register(&ste100p_pdriver);
- if (retval < 0)
- return retval;
- return phy_driver_register(&ste101p_pdriver);
+ return phy_drivers_register(ste10xp_pdriver,
+ ARRAY_SIZE(ste10xp_pdriver));
}
static void __exit ste10Xp_exit(void)
{
- phy_driver_unregister(&ste100p_pdriver);
- phy_driver_unregister(&ste101p_pdriver);
+ phy_drivers_unregister(ste10xp_pdriver,
+ ARRAY_SIZE(ste10xp_pdriver));
}
module_init(ste10Xp_init);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 0ec8e09..2585c38 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -138,21 +138,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
return err;
}
-/* Vitesse 824x */
-static struct phy_driver vsc8244_driver = {
- .phy_id = PHY_ID_VSC8244,
- .name = "Vitesse VSC8244",
- .phy_id_mask = 0x000fffc0,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_init = &vsc824x_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc82xx_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
static int vsc8221_config_init(struct phy_device *phydev)
{
int err;
@@ -165,8 +150,22 @@ static int vsc8221_config_init(struct phy_device *phydev)
Options are 802.3Z SerDes or SGMII */
}
-/* Vitesse 8221 */
-static struct phy_driver vsc8221_driver = {
+/* Vitesse 824x */
+static struct phy_driver vsc82xx_driver[] = {
+{
+ .phy_id = PHY_ID_VSC8244,
+ .name = "Vitesse VSC8244",
+ .phy_id_mask = 0x000fffc0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
+ /* Vitesse 8221 */
.phy_id = PHY_ID_VSC8221,
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8221",
@@ -177,26 +176,19 @@ static struct phy_driver vsc8221_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init vsc82xx_init(void)
{
- int err;
-
- err = phy_driver_register(&vsc8244_driver);
- if (err < 0)
- return err;
- err = phy_driver_register(&vsc8221_driver);
- if (err < 0)
- phy_driver_unregister(&vsc8244_driver);
- return err;
+ return phy_drivers_register(vsc82xx_driver,
+ ARRAY_SIZE(vsc82xx_driver));
}
static void __exit vsc82xx_exit(void)
{
- phy_driver_unregister(&vsc8244_driver);
- phy_driver_unregister(&vsc8221_driver);
+ return phy_drivers_unregister(vsc82xx_driver,
+ ARRAY_SIZE(vsc82xx_driver));
}
module_init(vsc82xx_init);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index d4c9db3..a34d6bf 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -390,10 +390,10 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
if (sl->mode & SL_MODE_SLIP6)
- count = slip_esc6(p, (unsigned char *) sl->xbuff, len);
+ count = slip_esc6(p, sl->xbuff, len);
else
#endif
- count = slip_esc(p, (unsigned char *) sl->xbuff, len);
+ count = slip_esc(p, sl->xbuff, len);
/* Order of next two lines is *very* important.
* When we are sending a little amount of data,
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 89024d5..6a7260b 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -15,6 +15,17 @@ menuconfig NET_TEAM
if NET_TEAM
+config NET_TEAM_MODE_BROADCAST
+ tristate "Broadcast mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where packets are transmitted always by all suitable ports.
+
+ All added ports are setup to have team's mac address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_broadcast.
+
config NET_TEAM_MODE_ROUNDROBIN
tristate "Round-robin mode support"
depends on NET_TEAM
@@ -22,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
Basic mode where port used for transmitting packets is selected in
round-robin fashion using packet counter.
- All added ports are setup to have bond's mac address.
+ All added ports are setup to have team's mac address.
To compile this team mode as a module, choose M here: the module
will be called team_mode_roundrobin.
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index fb9f4c1..9757630 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c61ae35..b104c05 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team.c - Network team device driver
+ * drivers/net/team/team.c - Network team device driver
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -18,6 +18,7 @@
#include <linux/ctype.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/socket.h>
@@ -26,6 +27,7 @@
#include <net/rtnetlink.h>
#include <net/genetlink.h>
#include <net/netlink.h>
+#include <net/sch_generic.h>
#include <linux/if_team.h>
#define DRV_NAME "team"
@@ -82,14 +84,16 @@ static void team_refresh_port_linkup(struct team_port *port)
port->state.linkup;
}
+
/*******************
* Options handling
*******************/
struct team_option_inst { /* One for each option instance */
struct list_head list;
+ struct list_head tmp_list;
struct team_option *option;
- struct team_port *port; /* != NULL if per-port */
+ struct team_option_inst_info info;
bool changed;
bool removed;
};
@@ -106,22 +110,6 @@ static struct team_option *__team_find_option(struct team *team,
return NULL;
}
-static int __team_option_inst_add(struct team *team, struct team_option *option,
- struct team_port *port)
-{
- struct team_option_inst *opt_inst;
-
- opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
- if (!opt_inst)
- return -ENOMEM;
- opt_inst->option = option;
- opt_inst->port = port;
- opt_inst->changed = true;
- opt_inst->removed = false;
- list_add_tail(&opt_inst->list, &team->option_inst_list);
- return 0;
-}
-
static void __team_option_inst_del(struct team_option_inst *opt_inst)
{
list_del(&opt_inst->list);
@@ -139,14 +127,49 @@ static void __team_option_inst_del_option(struct team *team,
}
}
+static int __team_option_inst_add(struct team *team, struct team_option *option,
+ struct team_port *port)
+{
+ struct team_option_inst *opt_inst;
+ unsigned int array_size;
+ unsigned int i;
+ int err;
+
+ array_size = option->array_size;
+ if (!array_size)
+ array_size = 1; /* No array but still need one instance */
+
+ for (i = 0; i < array_size; i++) {
+ opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
+ if (!opt_inst)
+ return -ENOMEM;
+ opt_inst->option = option;
+ opt_inst->info.port = port;
+ opt_inst->info.array_index = i;
+ opt_inst->changed = true;
+ opt_inst->removed = false;
+ list_add_tail(&opt_inst->list, &team->option_inst_list);
+ if (option->init) {
+ err = option->init(team, &opt_inst->info);
+ if (err)
+ return err;
+ }
+
+ }
+ return 0;
+}
+
static int __team_option_inst_add_option(struct team *team,
struct team_option *option)
{
struct team_port *port;
int err;
- if (!option->per_port)
- return __team_option_inst_add(team, option, 0);
+ if (!option->per_port) {
+ err = __team_option_inst_add(team, option, NULL);
+ if (err)
+ goto inst_del_option;
+ }
list_for_each_entry(port, &team->port_list, list) {
err = __team_option_inst_add(team, option, port);
@@ -180,7 +203,7 @@ static void __team_option_inst_del_port(struct team *team,
list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
if (opt_inst->option->per_port &&
- opt_inst->port == port)
+ opt_inst->info.port == port)
__team_option_inst_del(opt_inst);
}
}
@@ -211,7 +234,7 @@ static void __team_option_inst_mark_removed_port(struct team *team,
struct team_option_inst *opt_inst;
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
- if (opt_inst->port == port) {
+ if (opt_inst->info.port == port) {
opt_inst->changed = true;
opt_inst->removed = true;
}
@@ -324,28 +347,12 @@ void team_options_unregister(struct team *team,
}
EXPORT_SYMBOL(team_options_unregister);
-static int team_option_port_add(struct team *team, struct team_port *port)
-{
- int err;
-
- err = __team_option_inst_add_port(team, port);
- if (err)
- return err;
- __team_options_change_check(team);
- return 0;
-}
-
-static void team_option_port_del(struct team *team, struct team_port *port)
-{
- __team_option_inst_mark_removed_port(team, port);
- __team_options_change_check(team);
- __team_option_inst_del_port(team, port);
-}
-
static int team_option_get(struct team *team,
struct team_option_inst *opt_inst,
struct team_gsetter_ctx *ctx)
{
+ if (!opt_inst->option->getter)
+ return -EOPNOTSUPP;
return opt_inst->option->getter(team, ctx);
}
@@ -353,16 +360,26 @@ static int team_option_set(struct team *team,
struct team_option_inst *opt_inst,
struct team_gsetter_ctx *ctx)
{
- int err;
+ if (!opt_inst->option->setter)
+ return -EOPNOTSUPP;
+ return opt_inst->option->setter(team, ctx);
+}
- err = opt_inst->option->setter(team, ctx);
- if (err)
- return err;
+void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
+{
+ struct team_option_inst *opt_inst;
+ opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
opt_inst->changed = true;
+}
+EXPORT_SYMBOL(team_option_inst_set_change);
+
+void team_options_change_check(struct team *team)
+{
__team_options_change_check(team);
- return err;
}
+EXPORT_SYMBOL(team_options_change_check);
+
/****************
* Mode handling
@@ -371,13 +388,18 @@ static int team_option_set(struct team *team,
static LIST_HEAD(mode_list);
static DEFINE_SPINLOCK(mode_list_lock);
-static struct team_mode *__find_mode(const char *kind)
+struct team_mode_item {
+ struct list_head list;
+ const struct team_mode *mode;
+};
+
+static struct team_mode_item *__find_mode(const char *kind)
{
- struct team_mode *mode;
+ struct team_mode_item *mitem;
- list_for_each_entry(mode, &mode_list, list) {
- if (strcmp(mode->kind, kind) == 0)
- return mode;
+ list_for_each_entry(mitem, &mode_list, list) {
+ if (strcmp(mitem->mode->kind, kind) == 0)
+ return mitem;
}
return NULL;
}
@@ -392,49 +414,65 @@ static bool is_good_mode_name(const char *name)
return true;
}
-int team_mode_register(struct team_mode *mode)
+int team_mode_register(const struct team_mode *mode)
{
int err = 0;
+ struct team_mode_item *mitem;
if (!is_good_mode_name(mode->kind) ||
mode->priv_size > TEAM_MODE_PRIV_SIZE)
return -EINVAL;
+
+ mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
+ if (!mitem)
+ return -ENOMEM;
+
spin_lock(&mode_list_lock);
if (__find_mode(mode->kind)) {
err = -EEXIST;
+ kfree(mitem);
goto unlock;
}
- list_add_tail(&mode->list, &mode_list);
+ mitem->mode = mode;
+ list_add_tail(&mitem->list, &mode_list);
unlock:
spin_unlock(&mode_list_lock);
return err;
}
EXPORT_SYMBOL(team_mode_register);
-int team_mode_unregister(struct team_mode *mode)
+void team_mode_unregister(const struct team_mode *mode)
{
+ struct team_mode_item *mitem;
+
spin_lock(&mode_list_lock);
- list_del_init(&mode->list);
+ mitem = __find_mode(mode->kind);
+ if (mitem) {
+ list_del_init(&mitem->list);
+ kfree(mitem);
+ }
spin_unlock(&mode_list_lock);
- return 0;
}
EXPORT_SYMBOL(team_mode_unregister);
-static struct team_mode *team_mode_get(const char *kind)
+static const struct team_mode *team_mode_get(const char *kind)
{
- struct team_mode *mode;
+ struct team_mode_item *mitem;
+ const struct team_mode *mode = NULL;
spin_lock(&mode_list_lock);
- mode = __find_mode(kind);
- if (!mode) {
+ mitem = __find_mode(kind);
+ if (!mitem) {
spin_unlock(&mode_list_lock);
request_module("team-mode-%s", kind);
spin_lock(&mode_list_lock);
- mode = __find_mode(kind);
+ mitem = __find_mode(kind);
}
- if (mode)
+ if (mitem) {
+ mode = mitem->mode;
if (!try_module_get(mode->owner))
mode = NULL;
+ }
spin_unlock(&mode_list_lock);
return mode;
@@ -458,26 +496,45 @@ rx_handler_result_t team_dummy_receive(struct team *team,
return RX_HANDLER_ANOTHER;
}
-static void team_adjust_ops(struct team *team)
+static const struct team_mode __team_no_mode = {
+ .kind = "*NOMODE*",
+};
+
+static bool team_is_mode_set(struct team *team)
+{
+ return team->mode != &__team_no_mode;
+}
+
+static void team_set_no_mode(struct team *team)
+{
+ team->mode = &__team_no_mode;
+}
+
+static void __team_adjust_ops(struct team *team, int en_port_count)
{
/*
* To avoid checks in rx/tx skb paths, ensure here that non-null and
* correct ops are always set.
*/
- if (list_empty(&team->port_list) ||
- !team->mode || !team->mode->ops->transmit)
+ if (!en_port_count || !team_is_mode_set(team) ||
+ !team->mode->ops->transmit)
team->ops.transmit = team_dummy_transmit;
else
team->ops.transmit = team->mode->ops->transmit;
- if (list_empty(&team->port_list) ||
- !team->mode || !team->mode->ops->receive)
+ if (!en_port_count || !team_is_mode_set(team) ||
+ !team->mode->ops->receive)
team->ops.receive = team_dummy_receive;
else
team->ops.receive = team->mode->ops->receive;
}
+static void team_adjust_ops(struct team *team)
+{
+ __team_adjust_ops(team, team->en_port_count);
+}
+
/*
* We can benefit from the fact that it's ensured no port is present
* at the time of mode change. Therefore no packets are in fly so there's no
@@ -487,7 +544,7 @@ static int __team_change_mode(struct team *team,
const struct team_mode *new_mode)
{
/* Check if mode was previously set and do cleanup if so */
- if (team->mode) {
+ if (team_is_mode_set(team)) {
void (*exit_op)(struct team *team) = team->ops.exit;
/* Clear ops area so no callback is called any longer */
@@ -497,7 +554,7 @@ static int __team_change_mode(struct team *team,
if (exit_op)
exit_op(team);
team_mode_put(team->mode);
- team->mode = NULL;
+ team_set_no_mode(team);
/* zero private data area */
memset(&team->mode_priv, 0,
sizeof(struct team) - offsetof(struct team, mode_priv));
@@ -523,7 +580,7 @@ static int __team_change_mode(struct team *team,
static int team_change_mode(struct team *team, const char *kind)
{
- struct team_mode *new_mode;
+ const struct team_mode *new_mode;
struct net_device *dev = team->dev;
int err;
@@ -532,7 +589,7 @@ static int team_change_mode(struct team *team, const char *kind)
return -EBUSY;
}
- if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+ if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
netdev_err(dev, "Unable to change to the same mode the team is in\n");
return -EINVAL;
}
@@ -559,8 +616,6 @@ static int team_change_mode(struct team *team, const char *kind)
* Rx path frame handler
************************/
-static bool team_port_enabled(struct team_port *port);
-
/* note: already called with rcu_read_lock */
static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
{
@@ -618,11 +673,6 @@ static bool team_port_find(const struct team *team,
return false;
}
-static bool team_port_enabled(struct team_port *port)
-{
- return port->index != -1;
-}
-
/*
* Enable/disable port by adding to enabled port hashlist and setting
* port->index (Might be racy so reader could see incorrect ifindex when
@@ -637,6 +687,9 @@ static void team_port_enable(struct team *team,
port->index = team->en_port_count++;
hlist_add_head_rcu(&port->hlist,
team_port_index_hash(team, port->index));
+ team_adjust_ops(team);
+ if (team->ops.port_enabled)
+ team->ops.port_enabled(team, port);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -656,14 +709,20 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
static void team_port_disable(struct team *team,
struct team_port *port)
{
- int rm_index = port->index;
-
if (!team_port_enabled(port))
return;
+ if (team->ops.port_disabled)
+ team->ops.port_disabled(team, port);
hlist_del_rcu(&port->hlist);
- __reconstruct_port_hlist(team, rm_index);
- team->en_port_count--;
+ __reconstruct_port_hlist(team, port->index);
port->index = -1;
+ __team_adjust_ops(team, team->en_port_count - 1);
+ /*
+ * Wait until readers see adjusted ops. This ensures that
+ * readers never see team->en_port_count == 0
+ */
+ synchronize_rcu();
+ team->en_port_count--;
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -675,12 +734,14 @@ static void __team_compute_features(struct team *team)
struct team_port *port;
u32 vlan_features = TEAM_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
+ unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
list_for_each_entry(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
+ dst_release_flag &= port->dev->priv_flags;
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
@@ -688,6 +749,9 @@ static void __team_compute_features(struct team *team)
team->dev->vlan_features = vlan_features;
team->dev->hard_header_len = max_hard_header_len;
+ flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
+ team->dev->priv_flags = flags | dst_release_flag;
+
netdev_change_features(team->dev);
}
@@ -730,6 +794,58 @@ static void team_port_leave(struct team *team, struct team_port *port)
dev_put(team->dev);
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+{
+ struct netpoll *np;
+ int err;
+
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ return -ENOMEM;
+
+ err = __netpoll_setup(np, port->dev);
+ if (err) {
+ kfree(np);
+ return err;
+ }
+ port->np = np;
+ return err;
+}
+
+static void team_port_disable_netpoll(struct team_port *port)
+{
+ struct netpoll *np = port->np;
+
+ if (!np)
+ return;
+ port->np = NULL;
+
+ /* Wait for transmitting packets to finish before freeing. */
+ synchronize_rcu_bh();
+ __netpoll_cleanup(np);
+ kfree(np);
+}
+
+static struct netpoll_info *team_netpoll_info(struct team *team)
+{
+ return team->dev->npinfo;
+}
+
+#else
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+{
+ return 0;
+}
+static void team_port_disable_netpoll(struct team_port *port)
+{
+}
+static struct netpoll_info *team_netpoll_info(struct team *team)
+{
+ return NULL;
+}
+#endif
+
static void __team_port_change_check(struct team_port *port, bool linkup);
static int team_port_add(struct team *team, struct net_device *port_dev)
@@ -758,7 +874,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EBUSY;
}
- port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+ port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
+ GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -795,6 +912,15 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
+ if (team_netpoll_info(team)) {
+ err = team_port_enable_netpoll(team, port);
+ if (err) {
+ netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ portname);
+ goto err_enable_netpoll;
+ }
+ }
+
err = netdev_set_master(port_dev, dev);
if (err) {
netdev_err(dev, "Device %s failed to set master\n", portname);
@@ -809,7 +935,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_handler_register;
}
- err = team_option_port_add(team, port);
+ err = __team_option_inst_add_port(team, port);
if (err) {
netdev_err(dev, "Device %s failed to add per-port options\n",
portname);
@@ -819,9 +945,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
port->index = -1;
team_port_enable(team, port);
list_add_tail_rcu(&port->list, &team->port_list);
- team_adjust_ops(team);
__team_compute_features(team);
__team_port_change_check(port, !!netif_carrier_ok(port_dev));
+ __team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname);
@@ -834,6 +960,9 @@ err_handler_register:
netdev_set_master(port_dev, NULL);
err_set_master:
+ team_port_disable_netpoll(port);
+
+err_enable_netpoll:
vlan_vids_del_by_dev(port_dev, dev);
err_vids_add:
@@ -865,14 +994,16 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
return -ENOENT;
}
+ __team_option_inst_mark_removed_port(team, port);
+ __team_options_change_check(team);
+ __team_option_inst_del_port(team, port);
port->removed = true;
__team_port_change_check(port, false);
team_port_disable(team, port);
list_del_rcu(&port->list);
- team_adjust_ops(team);
- team_option_port_del(team, port);
netdev_rx_handler_unregister(port_dev);
netdev_set_master(port_dev, NULL);
+ team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
dev_close(port_dev);
team_port_leave(team, port);
@@ -891,11 +1022,9 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
* Net device ops
*****************/
-static const char team_no_mode_kind[] = "*NOMODE*";
-
static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
+ ctx->data.str_val = team->mode->kind;
return 0;
}
@@ -907,39 +1036,47 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->data.bool_val = team_port_enabled(ctx->port);
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.bool_val = team_port_enabled(port);
return 0;
}
static int team_port_en_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
+ struct team_port *port = ctx->info->port;
+
if (ctx->data.bool_val)
- team_port_enable(team, ctx->port);
+ team_port_enable(team, port);
else
- team_port_disable(team, ctx->port);
+ team_port_disable(team, port);
return 0;
}
static int team_user_linkup_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->data.bool_val = ctx->port->user.linkup;
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.bool_val = port->user.linkup;
return 0;
}
static int team_user_linkup_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->port->user.linkup = ctx->data.bool_val;
- team_refresh_port_linkup(ctx->port);
+ struct team_port *port = ctx->info->port;
+
+ port->user.linkup = ctx->data.bool_val;
+ team_refresh_port_linkup(port);
return 0;
}
static int team_user_linkup_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- struct team_port *port = ctx->port;
+ struct team_port *port = ctx->info->port;
ctx->data.bool_val = port->user.linkup_enabled;
return 0;
@@ -948,10 +1085,10 @@ static int team_user_linkup_en_option_get(struct team *team,
static int team_user_linkup_en_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
- struct team_port *port = ctx->port;
+ struct team_port *port = ctx->info->port;
port->user.linkup_enabled = ctx->data.bool_val;
- team_refresh_port_linkup(ctx->port);
+ team_refresh_port_linkup(port);
return 0;
}
@@ -985,6 +1122,22 @@ static const struct team_option team_options[] = {
},
};
+static struct lock_class_key team_netdev_xmit_lock_key;
+static struct lock_class_key team_netdev_addr_lock_key;
+
+static void team_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
+}
+
+static void team_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
+}
+
static int team_init(struct net_device *dev)
{
struct team *team = netdev_priv(dev);
@@ -993,6 +1146,7 @@ static int team_init(struct net_device *dev)
team->dev = dev;
mutex_init(&team->lock);
+ team_set_no_mode(team);
team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
if (!team->pcpu_stats)
@@ -1011,6 +1165,8 @@ static int team_init(struct net_device *dev)
goto err_options_register;
netif_carrier_off(dev);
+ team_set_lockdep_class(dev);
+
return 0;
err_options_register:
@@ -1079,6 +1235,29 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ /*
+ * This helper function exists to help dev_pick_tx get the correct
+ * destination queue. Using a helper function skips a call to
+ * skb_tx_hash and will put the skbs in the queue we expect on their
+ * way down to the team driver.
+ */
+ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+
+ /*
+ * Save the original txq to restore before passing to the driver
+ */
+ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
+
+ if (unlikely(txq >= dev->real_num_tx_queues)) {
+ do {
+ txq -= dev->real_num_tx_queues;
+ } while (txq >= dev->real_num_tx_queues);
+ }
+ return txq;
+}
+
static void team_change_rx_flags(struct net_device *dev, int change)
{
struct team *team = netdev_priv(dev);
@@ -1116,10 +1295,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
- struct sockaddr *addr = p;
+ int err;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ err = eth_mac_addr(dev, p);
+ if (err)
+ return err;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
if (team->ops.port_change_mac)
@@ -1240,6 +1420,48 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void team_poll_controller(struct net_device *dev)
+{
+}
+
+static void __team_netpoll_cleanup(struct team *team)
+{
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list)
+ team_port_disable_netpoll(port);
+}
+
+static void team_netpoll_cleanup(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+
+ mutex_lock(&team->lock);
+ __team_netpoll_cleanup(team);
+ mutex_unlock(&team->lock);
+}
+
+static int team_netpoll_setup(struct net_device *dev,
+ struct netpoll_info *npifo)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = team_port_enable_netpoll(team, port);
+ if (err) {
+ __team_netpoll_cleanup(team);
+ break;
+ }
+ }
+ mutex_unlock(&team->lock);
+ return err;
+}
+#endif
+
static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
{
struct team *team = netdev_priv(dev);
@@ -1289,6 +1511,7 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_open = team_open,
.ndo_stop = team_close,
.ndo_start_xmit = team_xmit,
+ .ndo_select_queue = team_select_queue,
.ndo_change_rx_flags = team_change_rx_flags,
.ndo_set_rx_mode = team_set_rx_mode,
.ndo_set_mac_address = team_set_mac_address,
@@ -1296,6 +1519,11 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_get_stats64 = team_get_stats64,
.ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = team_poll_controller,
+ .ndo_netpoll_setup = team_netpoll_setup,
+ .ndo_netpoll_cleanup = team_netpoll_cleanup,
+#endif
.ndo_add_slave = team_add_slave,
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
@@ -1321,7 +1549,7 @@ static void team_setup(struct net_device *dev)
* bring us to promisc mode in case a unicast addr is added.
* Let this up to underlay drivers.
*/
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
@@ -1358,12 +1586,24 @@ static int team_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
+static unsigned int team_get_num_tx_queues(void)
+{
+ return TEAM_DEFAULT_NUM_TX_QUEUES;
+}
+
+static unsigned int team_get_num_rx_queues(void)
+{
+ return TEAM_DEFAULT_NUM_RX_QUEUES;
+}
+
static struct rtnl_link_ops team_link_ops __read_mostly = {
- .kind = DRV_NAME,
- .priv_size = sizeof(struct team),
- .setup = team_setup,
- .newlink = team_newlink,
- .validate = team_validate,
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct team),
+ .setup = team_setup,
+ .newlink = team_newlink,
+ .validate = team_validate,
+ .get_num_tx_queues = team_get_num_tx_queues,
+ .get_num_rx_queues = team_get_num_rx_queues,
};
@@ -1404,7 +1644,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
void *hdr;
int err;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -1466,7 +1706,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
struct sk_buff *skb;
int err;
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -1482,16 +1722,128 @@ err_fill:
return err;
}
-static int team_nl_fill_options_get(struct sk_buff *skb,
- u32 pid, u32 seq, int flags,
- struct team *team, bool fillall)
+typedef int team_nl_send_func_t(struct sk_buff *skb,
+ struct team *team, u32 pid);
+
+static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
+{
+ return genlmsg_unicast(dev_net(team->dev), skb, pid);
+}
+
+static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
+ struct team_option_inst *opt_inst)
+{
+ struct nlattr *option_item;
+ struct team_option *option = opt_inst->option;
+ struct team_option_inst_info *opt_inst_info = &opt_inst->info;
+ struct team_gsetter_ctx ctx;
+ int err;
+
+ ctx.info = opt_inst_info;
+ err = team_option_get(team, opt_inst, &ctx);
+ if (err)
+ return err;
+
+ option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ if (!option_item)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
+ goto nest_cancel;
+ if (opt_inst_info->port &&
+ nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
+ opt_inst_info->port->dev->ifindex))
+ goto nest_cancel;
+ if (opt_inst->option->array_size &&
+ nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
+ opt_inst_info->array_index))
+ goto nest_cancel;
+
+ switch (option->type) {
+ case TEAM_OPTION_TYPE_U32:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
+ goto nest_cancel;
+ if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
+ goto nest_cancel;
+ if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
+ ctx.data.str_val))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_BINARY:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
+ goto nest_cancel;
+ if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
+ ctx.data.bin_val.ptr))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_BOOL:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
+ goto nest_cancel;
+ if (ctx.data.bool_val &&
+ nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
+ goto nest_cancel;
+ break;
+ default:
+ BUG();
+ }
+ if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
+ goto nest_cancel;
+ if (opt_inst->changed) {
+ if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
+ goto nest_cancel;
+ opt_inst->changed = false;
+ }
+ nla_nest_end(skb, option_item);
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(skb, option_item);
+ return -EMSGSIZE;
+}
+
+static int __send_and_alloc_skb(struct sk_buff **pskb,
+ struct team *team, u32 pid,
+ team_nl_send_func_t *send_func)
+{
+ int err;
+
+ if (*pskb) {
+ err = send_func(*pskb, team, pid);
+ if (err)
+ return err;
+ }
+ *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!*pskb)
+ return -ENOMEM;
+ return 0;
+}
+
+static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
+ int flags, team_nl_send_func_t *send_func,
+ struct list_head *sel_opt_inst_list)
{
struct nlattr *option_list;
+ struct nlmsghdr *nlh;
void *hdr;
struct team_option_inst *opt_inst;
int err;
+ struct sk_buff *skb = NULL;
+ bool incomplete;
+ int i;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ opt_inst = list_first_entry(sel_opt_inst_list,
+ struct team_option_inst, tmp_list);
+
+start_again:
+ err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
if (IS_ERR(hdr))
return PTR_ERR(hdr);
@@ -1500,122 +1852,80 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
goto nla_put_failure;
option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
if (!option_list)
- return -EMSGSIZE;
-
- list_for_each_entry(opt_inst, &team->option_inst_list, list) {
- struct nlattr *option_item;
- struct team_option *option = opt_inst->option;
- struct team_gsetter_ctx ctx;
+ goto nla_put_failure;
- /* Include only changed options if fill all mode is not on */
- if (!fillall && !opt_inst->changed)
- continue;
- option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
- if (!option_item)
- goto nla_put_failure;
- if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
- goto nla_put_failure;
- if (opt_inst->changed) {
- if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
- goto nla_put_failure;
- opt_inst->changed = false;
- }
- if (opt_inst->removed &&
- nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
- goto nla_put_failure;
- if (opt_inst->port &&
- nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
- opt_inst->port->dev->ifindex))
- goto nla_put_failure;
- ctx.port = opt_inst->port;
- switch (option->type) {
- case TEAM_OPTION_TYPE_U32:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.u32_val))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_STRING:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.str_val))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_BINARY:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.bin_val.len, ctx.data.bin_val.ptr))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_BOOL:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (ctx.data.bool_val &&
- nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
- goto nla_put_failure;
- break;
- default:
- BUG();
+ i = 0;
+ incomplete = false;
+ list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
+ err = team_nl_fill_one_option_get(skb, team, opt_inst);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!i)
+ goto errout;
+ incomplete = true;
+ break;
+ }
+ goto errout;
}
- nla_nest_end(skb, option_item);
+ i++;
}
nla_nest_end(skb, option_list);
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ if (incomplete)
+ goto start_again;
+
+send_done:
+ nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ if (err)
+ goto errout;
+ goto send_done;
+ }
+
+ return send_func(skb, team, pid);
nla_put_failure:
err = -EMSGSIZE;
errout:
genlmsg_cancel(skb, hdr);
+ nlmsg_free(skb);
return err;
}
-static int team_nl_fill_options_get_all(struct sk_buff *skb,
- struct genl_info *info, int flags,
- struct team *team)
-{
- return team_nl_fill_options_get(skb, info->snd_pid,
- info->snd_seq, NLM_F_ACK,
- team, true);
-}
-
static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
+ struct team_option_inst *opt_inst;
int err;
+ LIST_HEAD(sel_opt_inst_list);
team = team_nl_team_get(info);
if (!team)
return -EINVAL;
- err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
+ list_for_each_entry(opt_inst, &team->option_inst_list, list)
+ list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
+ err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
+ NLM_F_ACK, team_nl_send_unicast,
+ &sel_opt_inst_list);
team_nl_team_put(team);
return err;
}
+static int team_nl_send_event_options_get(struct team *team,
+ struct list_head *sel_opt_inst_list);
+
static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
int err = 0;
int i;
struct nlattr *nl_option;
+ LIST_HEAD(opt_inst_list);
team = team_nl_team_get(info);
if (!team)
@@ -1629,10 +1939,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
- struct nlattr *attr_port_ifindex;
+ struct nlattr *attr;
struct nlattr *attr_data;
enum team_option_type opt_type;
int opt_port_ifindex = 0; /* != 0 for per-port options */
+ u32 opt_array_index = 0;
+ bool opt_is_array = false;
struct team_option_inst *opt_inst;
char *opt_name;
bool opt_found = false;
@@ -1674,23 +1986,33 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
}
opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
- attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
- if (attr_port_ifindex)
- opt_port_ifindex = nla_get_u32(attr_port_ifindex);
+ attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
+ if (attr)
+ opt_port_ifindex = nla_get_u32(attr);
+
+ attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
+ if (attr) {
+ opt_is_array = true;
+ opt_array_index = nla_get_u32(attr);
+ }
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
struct team_option *option = opt_inst->option;
struct team_gsetter_ctx ctx;
+ struct team_option_inst_info *opt_inst_info;
int tmp_ifindex;
- tmp_ifindex = opt_inst->port ?
- opt_inst->port->dev->ifindex : 0;
+ opt_inst_info = &opt_inst->info;
+ tmp_ifindex = opt_inst_info->port ?
+ opt_inst_info->port->dev->ifindex : 0;
if (option->type != opt_type ||
strcmp(option->name, opt_name) ||
- tmp_ifindex != opt_port_ifindex)
+ tmp_ifindex != opt_port_ifindex ||
+ (option->array_size && !opt_is_array) ||
+ opt_inst_info->array_index != opt_array_index)
continue;
opt_found = true;
- ctx.port = opt_inst->port;
+ ctx.info = opt_inst_info;
switch (opt_type) {
case TEAM_OPTION_TYPE_U32:
ctx.data.u32_val = nla_get_u32(attr_data);
@@ -1715,6 +2037,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
err = team_option_set(team, opt_inst, &ctx);
if (err)
goto team_put;
+ opt_inst->changed = true;
+ list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
err = -ENOENT;
@@ -1722,6 +2046,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
}
}
+ err = team_nl_send_event_options_get(team, &opt_inst_list);
+
team_put:
team_nl_team_put(team);
@@ -1746,7 +2072,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
goto nla_put_failure;
port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
if (!port_list)
- return -EMSGSIZE;
+ goto nla_put_failure;
list_for_each_entry(port, &team->port_list, list) {
struct nlattr *port_item;
@@ -1838,27 +2164,18 @@ static struct genl_multicast_group team_change_event_mcgrp = {
.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
};
-static int team_nl_send_event_options_get(struct team *team)
+static int team_nl_send_multicast(struct sk_buff *skb,
+ struct team *team, u32 pid)
{
- struct sk_buff *skb;
- int err;
- struct net *net = dev_net(team->dev);
-
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
- if (err < 0)
- goto err_fill;
-
- err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
- GFP_KERNEL);
- return err;
+ return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
+ team_change_event_mcgrp.id, GFP_KERNEL);
+}
-err_fill:
- nlmsg_free(skb);
- return err;
+static int team_nl_send_event_options_get(struct team *team,
+ struct list_head *sel_opt_inst_list)
+{
+ return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
+ sel_opt_inst_list);
}
static int team_nl_send_event_port_list_get(struct team *team)
@@ -1867,7 +2184,7 @@ static int team_nl_send_event_port_list_get(struct team *team)
int err;
struct net *net = dev_net(team->dev);
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -1918,10 +2235,17 @@ static void team_nl_fini(void)
static void __team_options_change_check(struct team *team)
{
int err;
+ struct team_option_inst *opt_inst;
+ LIST_HEAD(sel_opt_inst_list);
- err = team_nl_send_event_options_get(team);
+ list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+ if (opt_inst->changed)
+ list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
+ }
+ err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
if (err)
- netdev_warn(team->dev, "Failed to send options change via netlink\n");
+ netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
+ err);
}
/* rtnl lock is held */
@@ -1965,6 +2289,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
mutex_unlock(&team->lock);
}
+
/************************************
* Net device notifier event handler
************************************/
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index fd6bd03..6262b4d 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * drivers/net/team/team_mode_activebackup.c - Active-backup mode for team
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -40,11 +40,10 @@ static bool ab_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *active_port;
- active_port = rcu_dereference(ab_priv(team)->active_port);
+ active_port = rcu_dereference_bh(ab_priv(team)->active_port);
if (unlikely(!active_port))
goto drop;
- skb->dev = active_port->dev;
- if (dev_queue_xmit(skb))
+ if (team_dev_queue_xmit(team, active_port, skb))
return false;
return true;
@@ -61,8 +60,12 @@ static void ab_port_leave(struct team *team, struct team_port *port)
static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- if (ab_priv(team)->active_port)
- ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
+ struct team_port *active_port;
+
+ active_port = rcu_dereference_protected(ab_priv(team)->active_port,
+ lockdep_is_held(&team->lock));
+ if (active_port)
+ ctx->data.u32_val = active_port->dev->ifindex;
else
ctx->data.u32_val = 0;
return 0;
@@ -108,7 +111,7 @@ static const struct team_mode_ops ab_mode_ops = {
.port_leave = ab_port_leave,
};
-static struct team_mode ab_mode = {
+static const struct team_mode ab_mode = {
.kind = "activebackup",
.owner = THIS_MODULE,
.priv_size = sizeof(struct ab_priv),
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
new file mode 100644
index 0000000..c96e4d2
--- /dev/null
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -0,0 +1,87 @@
+/*
+ * drivers/net/team/team_mode_broadcast.c - Broadcast mode for team
+ * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+static bool bc_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *cur;
+ struct team_port *last = NULL;
+ struct sk_buff *skb2;
+ bool ret;
+ bool sum_ret = false;
+
+ list_for_each_entry_rcu(cur, &team->port_list, list) {
+ if (team_port_txable(cur)) {
+ if (last) {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ ret = team_dev_queue_xmit(team, last,
+ skb2);
+ if (!sum_ret)
+ sum_ret = ret;
+ }
+ }
+ last = cur;
+ }
+ }
+ if (last) {
+ ret = team_dev_queue_xmit(team, last, skb);
+ if (!sum_ret)
+ sum_ret = ret;
+ }
+ return sum_ret;
+}
+
+static int bc_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_mac(port);
+}
+
+static void bc_port_change_mac(struct team *team, struct team_port *port)
+{
+ team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops bc_mode_ops = {
+ .transmit = bc_transmit,
+ .port_enter = bc_port_enter,
+ .port_change_mac = bc_port_change_mac,
+};
+
+static const struct team_mode bc_mode = {
+ .kind = "broadcast",
+ .owner = THIS_MODULE,
+ .ops = &bc_mode_ops,
+};
+
+static int __init bc_init_module(void)
+{
+ return team_mode_register(&bc_mode);
+}
+
+static void __exit bc_cleanup_module(void)
+{
+ team_mode_unregister(&bc_mode);
+}
+
+module_init(bc_init_module);
+module_exit(bc_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Broadcast mode for team");
+MODULE_ALIAS("team-mode-broadcast");
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 86e8183..cdc31b5 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -17,34 +17,209 @@
#include <linux/filter.h>
#include <linux/if_team.h>
+struct lb_priv;
+
+typedef struct team_port *lb_select_tx_port_func_t(struct team *,
+ struct lb_priv *,
+ struct sk_buff *,
+ unsigned char);
+
+#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
+
+struct lb_stats {
+ u64 tx_bytes;
+};
+
+struct lb_pcpu_stats {
+ struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE];
+ struct u64_stats_sync syncp;
+};
+
+struct lb_stats_info {
+ struct lb_stats stats;
+ struct lb_stats last_stats;
+ struct team_option_inst_info *opt_inst_info;
+};
+
+struct lb_port_mapping {
+ struct team_port __rcu *port;
+ struct team_option_inst_info *opt_inst_info;
+};
+
+struct lb_priv_ex {
+ struct team *team;
+ struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
+ struct sock_fprog *orig_fprog;
+ struct {
+ unsigned int refresh_interval; /* in tenths of second */
+ struct delayed_work refresh_dw;
+ struct lb_stats_info info[LB_TX_HASHTABLE_SIZE];
+ } stats;
+};
+
struct lb_priv {
struct sk_filter __rcu *fp;
- struct sock_fprog *orig_fprog;
+ lb_select_tx_port_func_t __rcu *select_tx_port_func;
+ struct lb_pcpu_stats __percpu *pcpu_stats;
+ struct lb_priv_ex *ex; /* priv extension */
};
-static struct lb_priv *lb_priv(struct team *team)
+static struct lb_priv *get_lb_priv(struct team *team)
{
return (struct lb_priv *) &team->mode_priv;
}
-static bool lb_transmit(struct team *team, struct sk_buff *skb)
+struct lb_port_priv {
+ struct lb_stats __percpu *pcpu_stats;
+ struct lb_stats_info stats_info;
+};
+
+static struct lb_port_priv *get_lb_port_priv(struct team_port *port)
+{
+ return (struct lb_port_priv *) &port->mode_priv;
+}
+
+#define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
+ (lb_priv)->ex->tx_hash_to_port_mapping[hash].port
+
+#define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
+ (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
+
+static void lb_tx_hash_to_port_mapping_null_port(struct team *team,
+ struct team_port *port)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ bool changed = false;
+ int i;
+
+ for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) {
+ struct lb_port_mapping *pm;
+
+ pm = &lb_priv->ex->tx_hash_to_port_mapping[i];
+ if (rcu_access_pointer(pm->port) == port) {
+ RCU_INIT_POINTER(pm->port, NULL);
+ team_option_inst_set_change(pm->opt_inst_info);
+ changed = true;
+ }
+ }
+ if (changed)
+ team_options_change_check(team);
+}
+
+/* Basic tx selection based solely by hash */
+static struct team_port *lb_hash_select_tx_port(struct team *team,
+ struct lb_priv *lb_priv,
+ struct sk_buff *skb,
+ unsigned char hash)
{
- struct sk_filter *fp;
- struct team_port *port;
- unsigned int hash;
int port_index;
- fp = rcu_dereference(lb_priv(team)->fp);
- if (unlikely(!fp))
- goto drop;
- hash = SK_RUN_FILTER(fp, skb);
port_index = hash % team->en_port_count;
- port = team_get_port_by_index_rcu(team, port_index);
+ return team_get_port_by_index_rcu(team, port_index);
+}
+
+/* Hash to port mapping select tx port */
+static struct team_port *lb_htpm_select_tx_port(struct team *team,
+ struct lb_priv *lb_priv,
+ struct sk_buff *skb,
+ unsigned char hash)
+{
+ return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
+}
+
+struct lb_select_tx_port {
+ char *name;
+ lb_select_tx_port_func_t *func;
+};
+
+static const struct lb_select_tx_port lb_select_tx_port_list[] = {
+ {
+ .name = "hash",
+ .func = lb_hash_select_tx_port,
+ },
+ {
+ .name = "hash_to_port_mapping",
+ .func = lb_htpm_select_tx_port,
+ },
+};
+#define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
+
+static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func)
+{
+ int i;
+
+ for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
+ const struct lb_select_tx_port *item;
+
+ item = &lb_select_tx_port_list[i];
+ if (item->func == func)
+ return item->name;
+ }
+ return NULL;
+}
+
+static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
+{
+ int i;
+
+ for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
+ const struct lb_select_tx_port *item;
+
+ item = &lb_select_tx_port_list[i];
+ if (!strcmp(item->name, name))
+ return item->func;
+ }
+ return NULL;
+}
+
+static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
+ struct sk_buff *skb)
+{
+ struct sk_filter *fp;
+ uint32_t lhash;
+ unsigned char *c;
+
+ fp = rcu_dereference_bh(lb_priv->fp);
+ if (unlikely(!fp))
+ return 0;
+ lhash = SK_RUN_FILTER(fp, skb);
+ c = (char *) &lhash;
+ return c[0] ^ c[1] ^ c[2] ^ c[3];
+}
+
+static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv,
+ struct lb_port_priv *lb_port_priv,
+ unsigned char hash)
+{
+ struct lb_pcpu_stats *pcpu_stats;
+ struct lb_stats *port_stats;
+ struct lb_stats *hash_stats;
+
+ pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats);
+ port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats);
+ hash_stats = &pcpu_stats->hash_stats[hash];
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ port_stats->tx_bytes += tx_bytes;
+ hash_stats->tx_bytes += tx_bytes;
+ u64_stats_update_end(&pcpu_stats->syncp);
+}
+
+static bool lb_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *select_tx_port_func;
+ struct team_port *port;
+ unsigned char hash;
+ unsigned int tx_bytes = skb->len;
+
+ hash = lb_get_skb_hash(lb_priv, skb);
+ select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func);
+ port = select_tx_port_func(team, lb_priv, skb, hash);
if (unlikely(!port))
goto drop;
- skb->dev = port->dev;
- if (dev_queue_xmit(skb))
+ if (team_dev_queue_xmit(team, port, skb))
return false;
+ lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash);
return true;
drop:
@@ -54,14 +229,16 @@ drop:
static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- if (!lb_priv(team)->orig_fprog) {
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
+ if (!lb_priv->ex->orig_fprog) {
ctx->data.bin_val.len = 0;
ctx->data.bin_val.ptr = NULL;
return 0;
}
- ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
+ ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len *
sizeof(struct sock_filter);
- ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
+ ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter;
return 0;
}
@@ -94,7 +271,9 @@ static void __fprog_destroy(struct sock_fprog *fprog)
static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
{
+ struct lb_priv *lb_priv = get_lb_priv(team);
struct sk_filter *fp = NULL;
+ struct sk_filter *orig_fp;
struct sock_fprog *fprog = NULL;
int err;
@@ -110,14 +289,238 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
}
}
- if (lb_priv(team)->orig_fprog) {
+ if (lb_priv->ex->orig_fprog) {
/* Clear old filter data */
- __fprog_destroy(lb_priv(team)->orig_fprog);
- sk_unattached_filter_destroy(lb_priv(team)->fp);
+ __fprog_destroy(lb_priv->ex->orig_fprog);
+ orig_fp = rcu_dereference_protected(lb_priv->fp,
+ lockdep_is_held(&team->lock));
+ sk_unattached_filter_destroy(orig_fp);
}
- rcu_assign_pointer(lb_priv(team)->fp, fp);
- lb_priv(team)->orig_fprog = fprog;
+ rcu_assign_pointer(lb_priv->fp, fp);
+ lb_priv->ex->orig_fprog = fprog;
+ return 0;
+}
+
+static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+ char *name;
+
+ func = rcu_dereference_protected(lb_priv->select_tx_port_func,
+ lockdep_is_held(&team->lock));
+ name = lb_select_tx_port_get_name(func);
+ BUG_ON(!name);
+ ctx->data.str_val = name;
+ return 0;
+}
+
+static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+
+ func = lb_select_tx_port_get_func(ctx->data.str_val);
+ if (!func)
+ return -EINVAL;
+ rcu_assign_pointer(lb_priv->select_tx_port_func, func);
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = info->array_index;
+
+ LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info;
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct team_port *port;
+ unsigned char hash = ctx->info->array_index;
+
+ port = LB_HTPM_PORT_BY_HASH(lb_priv, hash);
+ ctx->data.u32_val = port ? port->dev->ifindex : 0;
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct team_port *port;
+ unsigned char hash = ctx->info->array_index;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ if (ctx->data.u32_val == port->dev->ifindex &&
+ team_port_enabled(port)) {
+ rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash),
+ port);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int lb_hash_stats_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = info->array_index;
+
+ lb_priv->ex->stats.info[hash].opt_inst_info = info;
+ return 0;
+}
+
+static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = ctx->info->array_index;
+
+ ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats;
+ ctx->data.bin_val.len = sizeof(struct lb_stats);
+ return 0;
+}
+
+static int lb_port_stats_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct team_port *port = info->port;
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ lb_port_priv->stats_info.opt_inst_info = info;
+ return 0;
+}
+
+static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats;
+ ctx->data.bin_val.len = sizeof(struct lb_stats);
+ return 0;
+}
+
+static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info)
+{
+ memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats));
+ memset(&s_info->stats, 0, sizeof(struct lb_stats));
+}
+
+static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info,
+ struct team *team)
+{
+ if (memcmp(&s_info->last_stats, &s_info->stats,
+ sizeof(struct lb_stats))) {
+ team_option_inst_set_change(s_info->opt_inst_info);
+ return true;
+ }
+ return false;
+}
+
+static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
+ struct lb_stats *cpu_stats,
+ struct u64_stats_sync *syncp)
+{
+ unsigned int start;
+ struct lb_stats tmp;
+
+ do {
+ start = u64_stats_fetch_begin_bh(syncp);
+ tmp.tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(syncp, start));
+ acc_stats->tx_bytes += tmp.tx_bytes;
+}
+
+static void lb_stats_refresh(struct work_struct *work)
+{
+ struct team *team;
+ struct lb_priv *lb_priv;
+ struct lb_priv_ex *lb_priv_ex;
+ struct lb_pcpu_stats *pcpu_stats;
+ struct lb_stats *stats;
+ struct lb_stats_info *s_info;
+ struct team_port *port;
+ bool changed = false;
+ int i;
+ int j;
+
+ lb_priv_ex = container_of(work, struct lb_priv_ex,
+ stats.refresh_dw.work);
+
+ team = lb_priv_ex->team;
+ lb_priv = get_lb_priv(team);
+
+ if (!mutex_trylock(&team->lock)) {
+ schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
+ return;
+ }
+
+ for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) {
+ s_info = &lb_priv->ex->stats.info[j];
+ __lb_stats_info_refresh_prepare(s_info);
+ for_each_possible_cpu(i) {
+ pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+ stats = &pcpu_stats->hash_stats[j];
+ __lb_one_cpu_stats_add(&s_info->stats, stats,
+ &pcpu_stats->syncp);
+ }
+ changed |= __lb_stats_info_refresh_check(s_info, team);
+ }
+
+ list_for_each_entry(port, &team->port_list, list) {
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ s_info = &lb_port_priv->stats_info;
+ __lb_stats_info_refresh_prepare(s_info);
+ for_each_possible_cpu(i) {
+ pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+ stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i);
+ __lb_one_cpu_stats_add(&s_info->stats, stats,
+ &pcpu_stats->syncp);
+ }
+ changed |= __lb_stats_info_refresh_check(s_info, team);
+ }
+
+ if (changed)
+ team_options_change_check(team);
+
+ schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
+ (lb_priv_ex->stats.refresh_interval * HZ) / 10);
+
+ mutex_unlock(&team->lock);
+}
+
+static int lb_stats_refresh_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
+ ctx->data.u32_val = lb_priv->ex->stats.refresh_interval;
+ return 0;
+}
+
+static int lb_stats_refresh_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned int interval;
+
+ interval = ctx->data.u32_val;
+ if (lb_priv->ex->stats.refresh_interval == interval)
+ return 0;
+ lb_priv->ex->stats.refresh_interval = interval;
+ if (interval)
+ schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0);
+ else
+ cancel_delayed_work(&lb_priv->ex->stats.refresh_dw);
return 0;
}
@@ -128,30 +531,125 @@ static const struct team_option lb_options[] = {
.getter = lb_bpf_func_get,
.setter = lb_bpf_func_set,
},
+ {
+ .name = "lb_tx_method",
+ .type = TEAM_OPTION_TYPE_STRING,
+ .getter = lb_tx_method_get,
+ .setter = lb_tx_method_set,
+ },
+ {
+ .name = "lb_tx_hash_to_port_mapping",
+ .array_size = LB_TX_HASHTABLE_SIZE,
+ .type = TEAM_OPTION_TYPE_U32,
+ .init = lb_tx_hash_to_port_mapping_init,
+ .getter = lb_tx_hash_to_port_mapping_get,
+ .setter = lb_tx_hash_to_port_mapping_set,
+ },
+ {
+ .name = "lb_hash_stats",
+ .array_size = LB_TX_HASHTABLE_SIZE,
+ .type = TEAM_OPTION_TYPE_BINARY,
+ .init = lb_hash_stats_init,
+ .getter = lb_hash_stats_get,
+ },
+ {
+ .name = "lb_port_stats",
+ .per_port = true,
+ .type = TEAM_OPTION_TYPE_BINARY,
+ .init = lb_port_stats_init,
+ .getter = lb_port_stats_get,
+ },
+ {
+ .name = "lb_stats_refresh_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = lb_stats_refresh_interval_get,
+ .setter = lb_stats_refresh_interval_set,
+ },
};
static int lb_init(struct team *team)
{
- return team_options_register(team, lb_options,
- ARRAY_SIZE(lb_options));
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+ int err;
+
+ /* set default tx port selector */
+ func = lb_select_tx_port_get_func("hash");
+ BUG_ON(!func);
+ rcu_assign_pointer(lb_priv->select_tx_port_func, func);
+
+ lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL);
+ if (!lb_priv->ex)
+ return -ENOMEM;
+ lb_priv->ex->team = team;
+
+ lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats);
+ if (!lb_priv->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_pcpu_stats;
+ }
+
+ INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
+
+ err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
+ if (err)
+ goto err_options_register;
+ return 0;
+
+err_options_register:
+ free_percpu(lb_priv->pcpu_stats);
+err_alloc_pcpu_stats:
+ kfree(lb_priv->ex);
+ return err;
}
static void lb_exit(struct team *team)
{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
team_options_unregister(team, lb_options,
ARRAY_SIZE(lb_options));
+ cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
+ free_percpu(lb_priv->pcpu_stats);
+ kfree(lb_priv->ex);
+}
+
+static int lb_port_enter(struct team *team, struct team_port *port)
+{
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats);
+ if (!lb_port_priv->pcpu_stats)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lb_port_leave(struct team *team, struct team_port *port)
+{
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ free_percpu(lb_port_priv->pcpu_stats);
+}
+
+static void lb_port_disabled(struct team *team, struct team_port *port)
+{
+ lb_tx_hash_to_port_mapping_null_port(team, port);
}
static const struct team_mode_ops lb_mode_ops = {
.init = lb_init,
.exit = lb_exit,
+ .port_enter = lb_port_enter,
+ .port_leave = lb_port_leave,
+ .port_disabled = lb_port_disabled,
.transmit = lb_transmit,
};
-static struct team_mode lb_mode = {
+static const struct team_mode lb_mode = {
.kind = "loadbalance",
.owner = THIS_MODULE,
.priv_size = sizeof(struct lb_priv),
+ .port_priv_size = sizeof(struct lb_port_priv),
.ops = &lb_mode_ops,
};
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 6abfbdc..ad7ed0e 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * drivers/net/team/team_mode_roundrobin.c - Round-robin mode for team
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -30,16 +30,16 @@ static struct team_port *__get_first_port_up(struct team *team,
{
struct team_port *cur;
- if (port->linkup)
+ if (team_port_txable(port))
return port;
cur = port;
list_for_each_entry_continue_rcu(cur, &team->port_list, list)
- if (cur->linkup)
+ if (team_port_txable(port))
return cur;
list_for_each_entry_rcu(cur, &team->port_list, list) {
if (cur == port)
break;
- if (cur->linkup)
+ if (team_port_txable(port))
return cur;
}
return NULL;
@@ -55,8 +55,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
port = __get_first_port_up(team, port);
if (unlikely(!port))
goto drop;
- skb->dev = port->dev;
- if (dev_queue_xmit(skb))
+ if (team_dev_queue_xmit(team, port, skb))
return false;
return true;
@@ -81,7 +80,7 @@ static const struct team_mode_ops rr_mode_ops = {
.port_change_mac = rr_port_change_mac,
};
-static struct team_mode rr_mode = {
+static const struct team_mode rr_mode = {
.kind = "roundrobin",
.owner = THIS_MODULE,
.priv_size = sizeof(struct rr_priv),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 987aeef..c62163e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -22,7 +22,7 @@
* Add TUNSETLINK ioctl to set the link encapsulation
*
* Mark Smith <markzzzsmith@yahoo.com.au>
- * Use random_ether_addr() for tap MAC address.
+ * Use eth_random_addr() for tap MAC address.
*
* Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
* Fixes in packet dropping, queue length setting and queue wakeup.
@@ -100,6 +100,8 @@ do { \
} while (0)
#endif
+#define GOODCOPY_LEN 128
+
#define FLT_EXACT_COUNT 8
struct tap_filter {
unsigned int count; /* Number of addrs. Zero means disabled */
@@ -358,6 +360,8 @@ static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
+
sk_release_kernel(tun->socket.sk);
}
@@ -414,6 +418,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Orphan the skb - required as we might hang on to it
* for indefinite time. */
+ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ goto drop;
skb_orphan(skb);
/* Enqueue packet */
@@ -600,19 +606,100 @@ static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
return skb;
}
+/* set skb frags from iovec, this can move to core network code for reuse */
+static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ int offset, size_t count)
+{
+ int len = iov_length(from, count) - offset;
+ int copy = skb_headlen(skb);
+ int size, offset1 = 0;
+ int i = 0;
+
+ /* Skip over from offset */
+ while (count && (offset >= from->iov_len)) {
+ offset -= from->iov_len;
+ ++from;
+ --count;
+ }
+
+ /* copy up to skb headlen */
+ while (count && (copy > 0)) {
+ size = min_t(unsigned int, copy, from->iov_len - offset);
+ if (copy_from_user(skb->data + offset1, from->iov_base + offset,
+ size))
+ return -EFAULT;
+ if (copy > size) {
+ ++from;
+ --count;
+ offset = 0;
+ } else
+ offset += size;
+ copy -= size;
+ offset1 += size;
+ }
+
+ if (len == offset1)
+ return 0;
+
+ while (count--) {
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
+ unsigned long truesize;
+
+ len = from->iov_len - offset;
+ if (!len) {
+ offset = 0;
+ ++from;
+ continue;
+ }
+ base = (unsigned long)from->iov_base + offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ if (i + size > MAX_SKB_FRAGS)
+ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if (num_pages != size) {
+ for (i = 0; i < num_pages; i++)
+ put_page(page[i]);
+ return -EFAULT;
+ }
+ truesize = size * PAGE_SIZE;
+ skb->data_len += len;
+ skb->len += len;
+ skb->truesize += truesize;
+ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+ __skb_fill_page_desc(skb, i, page[i], off, size);
+ skb_shinfo(skb)->nr_frags++;
+ /* increase sk_wmem_alloc */
+ base += size;
+ len -= size;
+ i++;
+ }
+ offset = 0;
+ ++from;
+ }
+ return 0;
+}
+
/* Get packet from user space buffer */
-static ssize_t tun_get_user(struct tun_struct *tun,
- const struct iovec *iv, size_t count,
- int noblock)
+static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
+ const struct iovec *iv, size_t total_len,
+ size_t count, int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
- size_t len = count, align = NET_SKB_PAD;
+ size_t len = total_len, align = NET_SKB_PAD;
struct virtio_net_hdr gso = { 0 };
int offset = 0;
+ int copylen;
+ bool zerocopy = false;
+ int err;
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) > count)
+ if ((len -= sizeof(pi)) > total_len)
return -EINVAL;
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
@@ -621,7 +708,7 @@ static ssize_t tun_get_user(struct tun_struct *tun,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= tun->vnet_hdr_sz) > count)
+ if ((len -= tun->vnet_hdr_sz) > total_len)
return -EINVAL;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
@@ -643,14 +730,46 @@ static ssize_t tun_get_user(struct tun_struct *tun,
return -EINVAL;
}
- skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
+ if (msg_control)
+ zerocopy = true;
+
+ if (zerocopy) {
+ /* Userspace may produce vectors with count greater than
+ * MAX_SKB_FRAGS, so we need to linearize parts of the skb
+ * to let the rest of data to be fit in the frags.
+ */
+ if (count > MAX_SKB_FRAGS) {
+ copylen = iov_length(iv, count - MAX_SKB_FRAGS);
+ if (copylen < offset)
+ copylen = 0;
+ else
+ copylen -= offset;
+ } else
+ copylen = 0;
+ /* There are 256 bytes to be copied in skb, so there is enough
+ * room for skb expand head in case it is used.
+ * The rest of the buffer is mapped from userspace.
+ */
+ if (copylen < gso.hdr_len)
+ copylen = gso.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+ } else
+ copylen = len;
+
+ skb = tun_alloc_skb(tun, align, copylen, gso.hdr_len, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
tun->dev->stats.rx_dropped++;
return PTR_ERR(skb);
}
- if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) {
+ if (zerocopy)
+ err = zerocopy_sg_from_iovec(skb, iv, offset, count);
+ else
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
+
+ if (err) {
tun->dev->stats.rx_dropped++;
kfree_skb(skb);
return -EFAULT;
@@ -724,12 +843,18 @@ static ssize_t tun_get_user(struct tun_struct *tun,
skb_shinfo(skb)->gso_segs = 0;
}
+ /* copy skb_ubuf_info for callback when skb has no error */
+ if (zerocopy) {
+ skb_shinfo(skb)->destructor_arg = msg_control;
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ }
+
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
- return count;
+ return total_len;
}
static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
@@ -744,7 +869,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
- result = tun_get_user(tun, iv, iov_length(iv, count),
+ result = tun_get_user(tun, NULL, iv, iov_length(iv, count), count,
file->f_flags & O_NONBLOCK);
tun_put(tun);
@@ -958,8 +1083,8 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
- return tun_get_user(tun, m->msg_iov, total_len,
- m->msg_flags & MSG_DONTWAIT);
+ return tun_get_user(tun, m->msg_control, m->msg_iov, total_len,
+ m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
}
static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
@@ -1115,6 +1240,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->flags = flags;
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+ set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
err = -ENOMEM;
sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
@@ -1128,6 +1254,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
+ sock_set_flag(sk, SOCK_ZEROCOPY);
tun_sk(sk)->tun = tun;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 833e32f..c1ae769 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -134,6 +134,7 @@ config USB_NET_AX8817X
tristate "ASIX AX88xxx Based USB 2.0 Ethernet Adapters"
depends on USB_USBNET
select CRC32
+ select PHYLIB
default y
help
This option adds support for ASIX AX88xxx based USB 2.0
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a2e2d72..bf06300 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o
obj-$(CONFIG_USB_RTL8150) += rtl8150.o
obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
+asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
new file mode 100644
index 0000000..e889631
--- /dev/null
+++ b/drivers/net/usb/asix.h
@@ -0,0 +1,218 @@
+/*
+ * ASIX AX8817X based USB 2.0 Ethernet Devices
+ * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
+ * Copyright (c) 2002-2003 TiVo Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASIX_H
+#define _ASIX_H
+
+// #define DEBUG // error path messages, extra info
+// #define VERBOSE // more; success messages
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#define DRIVER_VERSION "22-Dec-2011"
+#define DRIVER_NAME "asix"
+
+/* ASIX AX8817X based USB 2.0 Ethernet Devices */
+
+#define AX_CMD_SET_SW_MII 0x06
+#define AX_CMD_READ_MII_REG 0x07
+#define AX_CMD_WRITE_MII_REG 0x08
+#define AX_CMD_SET_HW_MII 0x0a
+#define AX_CMD_READ_EEPROM 0x0b
+#define AX_CMD_WRITE_EEPROM 0x0c
+#define AX_CMD_WRITE_ENABLE 0x0d
+#define AX_CMD_WRITE_DISABLE 0x0e
+#define AX_CMD_READ_RX_CTL 0x0f
+#define AX_CMD_WRITE_RX_CTL 0x10
+#define AX_CMD_READ_IPG012 0x11
+#define AX_CMD_WRITE_IPG0 0x12
+#define AX_CMD_WRITE_IPG1 0x13
+#define AX_CMD_READ_NODE_ID 0x13
+#define AX_CMD_WRITE_NODE_ID 0x14
+#define AX_CMD_WRITE_IPG2 0x14
+#define AX_CMD_WRITE_MULTI_FILTER 0x16
+#define AX88172_CMD_READ_NODE_ID 0x17
+#define AX_CMD_READ_PHY_ID 0x19
+#define AX_CMD_READ_MEDIUM_STATUS 0x1a
+#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
+#define AX_CMD_READ_MONITOR_MODE 0x1c
+#define AX_CMD_WRITE_MONITOR_MODE 0x1d
+#define AX_CMD_READ_GPIOS 0x1e
+#define AX_CMD_WRITE_GPIOS 0x1f
+#define AX_CMD_SW_RESET 0x20
+#define AX_CMD_SW_PHY_STATUS 0x21
+#define AX_CMD_SW_PHY_SELECT 0x22
+
+#define AX_PHY_SELECT_MASK (BIT(3) | BIT(2))
+#define AX_PHY_SELECT_INTERNAL 0
+#define AX_PHY_SELECT_EXTERNAL BIT(2)
+
+#define AX_MONITOR_MODE 0x01
+#define AX_MONITOR_LINK 0x02
+#define AX_MONITOR_MAGIC 0x04
+#define AX_MONITOR_HSFS 0x10
+
+/* AX88172 Medium Status Register values */
+#define AX88172_MEDIUM_FD 0x02
+#define AX88172_MEDIUM_TX 0x04
+#define AX88172_MEDIUM_FC 0x10
+#define AX88172_MEDIUM_DEFAULT \
+ ( AX88172_MEDIUM_FD | AX88172_MEDIUM_TX | AX88172_MEDIUM_FC )
+
+#define AX_MCAST_FILTER_SIZE 8
+#define AX_MAX_MCAST 64
+
+#define AX_SWRESET_CLEAR 0x00
+#define AX_SWRESET_RR 0x01
+#define AX_SWRESET_RT 0x02
+#define AX_SWRESET_PRTE 0x04
+#define AX_SWRESET_PRL 0x08
+#define AX_SWRESET_BZ 0x10
+#define AX_SWRESET_IPRL 0x20
+#define AX_SWRESET_IPPD 0x40
+
+#define AX88772_IPG0_DEFAULT 0x15
+#define AX88772_IPG1_DEFAULT 0x0c
+#define AX88772_IPG2_DEFAULT 0x12
+
+/* AX88772 & AX88178 Medium Mode Register */
+#define AX_MEDIUM_PF 0x0080
+#define AX_MEDIUM_JFE 0x0040
+#define AX_MEDIUM_TFC 0x0020
+#define AX_MEDIUM_RFC 0x0010
+#define AX_MEDIUM_ENCK 0x0008
+#define AX_MEDIUM_AC 0x0004
+#define AX_MEDIUM_FD 0x0002
+#define AX_MEDIUM_GM 0x0001
+#define AX_MEDIUM_SM 0x1000
+#define AX_MEDIUM_SBP 0x0800
+#define AX_MEDIUM_PS 0x0200
+#define AX_MEDIUM_RE 0x0100
+
+#define AX88178_MEDIUM_DEFAULT \
+ (AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
+ AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
+ AX_MEDIUM_RE)
+
+#define AX88772_MEDIUM_DEFAULT \
+ (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
+ AX_MEDIUM_TFC | AX_MEDIUM_PS | \
+ AX_MEDIUM_AC | AX_MEDIUM_RE)
+
+/* AX88772 & AX88178 RX_CTL values */
+#define AX_RX_CTL_SO 0x0080
+#define AX_RX_CTL_AP 0x0020
+#define AX_RX_CTL_AM 0x0010
+#define AX_RX_CTL_AB 0x0008
+#define AX_RX_CTL_SEP 0x0004
+#define AX_RX_CTL_AMALL 0x0002
+#define AX_RX_CTL_PRO 0x0001
+#define AX_RX_CTL_MFB_2048 0x0000
+#define AX_RX_CTL_MFB_4096 0x0100
+#define AX_RX_CTL_MFB_8192 0x0200
+#define AX_RX_CTL_MFB_16384 0x0300
+
+#define AX_DEFAULT_RX_CTL (AX_RX_CTL_SO | AX_RX_CTL_AB)
+
+/* GPIO 0 .. 2 toggles */
+#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
+#define AX_GPIO_GPO_0 0x02 /* GPIO0 Output value */
+#define AX_GPIO_GPO1EN 0x04 /* GPIO1 Output enable */
+#define AX_GPIO_GPO_1 0x08 /* GPIO1 Output value */
+#define AX_GPIO_GPO2EN 0x10 /* GPIO2 Output enable */
+#define AX_GPIO_GPO_2 0x20 /* GPIO2 Output value */
+#define AX_GPIO_RESERVED 0x40 /* Reserved */
+#define AX_GPIO_RSE 0x80 /* Reload serial EEPROM */
+
+#define AX_EEPROM_MAGIC 0xdeadbeef
+#define AX_EEPROM_LEN 0x200
+
+/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+struct asix_data {
+ u8 multi_filter[AX_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
+ u8 phymode;
+ u8 ledmode;
+ u8 res;
+};
+
+int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data);
+
+int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data);
+
+void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data);
+
+int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
+
+struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags);
+
+int asix_set_sw_mii(struct usbnet *dev);
+int asix_set_hw_mii(struct usbnet *dev);
+
+int asix_read_phy_addr(struct usbnet *dev, int internal);
+int asix_get_phy_addr(struct usbnet *dev);
+
+int asix_sw_reset(struct usbnet *dev, u8 flags);
+
+u16 asix_read_rx_ctl(struct usbnet *dev);
+int asix_write_rx_ctl(struct usbnet *dev, u16 mode);
+
+u16 asix_read_medium_status(struct usbnet *dev);
+int asix_write_medium_mode(struct usbnet *dev, u16 mode);
+
+int asix_write_gpio(struct usbnet *dev, u16 value, int sleep);
+
+void asix_set_multicast(struct net_device *net);
+
+int asix_mdio_read(struct net_device *netdev, int phy_id, int loc);
+void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val);
+
+void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo);
+int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo);
+
+int asix_get_eeprom_len(struct net_device *net);
+int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data);
+int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data);
+
+void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info);
+
+int asix_set_mac_address(struct net_device *net, void *p);
+
+#endif /* _ASIX_H */
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
new file mode 100644
index 0000000..774d9ce
--- /dev/null
+++ b/drivers/net/usb/asix_common.c
@@ -0,0 +1,631 @@
+/*
+ * ASIX AX8817X based USB 2.0 Ethernet Devices
+ * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
+ * Copyright (c) 2002-2003 TiVo Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "asix.h"
+
+int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ void *buf;
+ int err = -ENOMEM;
+
+ netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ err = usb_control_msg(
+ dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ cmd,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value,
+ index,
+ buf,
+ size,
+ USB_CTRL_GET_TIMEOUT);
+ if (err == size)
+ memcpy(data, buf, size);
+ else if (err >= 0)
+ err = -EINVAL;
+ kfree(buf);
+
+out:
+ return err;
+}
+
+int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ void *buf = NULL;
+ int err = -ENOMEM;
+
+ netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
+
+ if (data) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ }
+
+ err = usb_control_msg(
+ dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value,
+ index,
+ buf,
+ size,
+ USB_CTRL_SET_TIMEOUT);
+ kfree(buf);
+
+out:
+ return err;
+}
+
+static void asix_async_cmd_callback(struct urb *urb)
+{
+ struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
+ int status = urb->status;
+
+ if (status < 0)
+ printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
+ status);
+
+ kfree(req);
+ usb_free_urb(urb);
+}
+
+void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ struct usb_ctrlrequest *req;
+ int status;
+ struct urb *urb;
+
+ netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
+ return;
+ }
+
+ req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ if (!req) {
+ netdev_err(dev->net, "Failed to allocate memory for control request\n");
+ usb_free_urb(urb);
+ return;
+ }
+
+ req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ req->bRequest = cmd;
+ req->wValue = cpu_to_le16(value);
+ req->wIndex = cpu_to_le16(index);
+ req->wLength = cpu_to_le16(size);
+
+ usb_fill_control_urb(urb, dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ (void *)req, data, size,
+ asix_async_cmd_callback, req);
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status < 0) {
+ netdev_err(dev->net, "Error submitting the control message: status=%d\n",
+ status);
+ kfree(req);
+ usb_free_urb(urb);
+ }
+}
+
+int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int offset = 0;
+
+ while (offset + sizeof(u32) < skb->len) {
+ struct sk_buff *ax_skb;
+ u16 size;
+ u32 header = get_unaligned_le32(skb->data + offset);
+
+ offset += sizeof(u32);
+
+ /* get the packet length */
+ size = (u16) (header & 0x7ff);
+ if (size != ((~header >> 16) & 0x07ff)) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
+ return 0;
+ }
+
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
+ (size + offset > skb->len)) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+ size);
+ return 0;
+ }
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!ax_skb)
+ return 0;
+
+ skb_put(ax_skb, size);
+ memcpy(ax_skb->data, skb->data + offset, size);
+ usbnet_skb_return(dev, ax_skb);
+
+ offset += (size + 1) & 0xfffe;
+ }
+
+ if (skb->len != offset) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
+ skb->len);
+ return 0;
+ }
+ return 1;
+}
+
+struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int padlen;
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ u32 packet_len;
+ u32 padbytes = 0xffff0000;
+
+ padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
+
+ /* We need to push 4 bytes in front of frame (packet_len)
+ * and maybe add 4 bytes after the end (if padlen is 4)
+ *
+ * Avoid skb_copy_expand() expensive call, using following rules :
+ * - We are allowed to push 4 bytes in headroom if skb_header_cloned()
+ * is false (and if we have 4 bytes of headroom)
+ * - We are allowed to put 4 bytes at tail if skb_cloned()
+ * is false (and if we have 4 bytes of tailroom)
+ *
+ * TCP packets for example are cloned, but skb_header_release()
+ * was called in tcp stack, allowing us to use headroom for our needs.
+ */
+ if (!skb_header_cloned(skb) &&
+ !(padlen && skb_cloned(skb)) &&
+ headroom + tailroom >= 4 + padlen) {
+ /* following should not happen, but better be safe */
+ if (headroom < 4 ||
+ tailroom < padlen) {
+ skb->data = memmove(skb->head + 4, skb->data, skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, 4, padlen, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
+ skb_push(skb, 4);
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
+ }
+ return skb;
+}
+
+int asix_set_sw_mii(struct usbnet *dev)
+{
+ int ret;
+ ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+int asix_set_hw_mii(struct usbnet *dev)
+{
+ int ret;
+ ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
+int asix_read_phy_addr(struct usbnet *dev, int internal)
+{
+ int offset = (internal ? 1 : 0);
+ u8 buf[2];
+ int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
+
+ netdev_dbg(dev->net, "asix_get_phy_addr()\n");
+
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
+ goto out;
+ }
+ netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
+ *((__le16 *)buf));
+ ret = buf[offset];
+
+out:
+ return ret;
+}
+
+int asix_get_phy_addr(struct usbnet *dev)
+{
+ /* return the address of the internal phy */
+ return asix_read_phy_addr(dev, 1);
+}
+
+
+int asix_sw_reset(struct usbnet *dev, u8 flags)
+{
+ int ret;
+
+ ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
+
+ return ret;
+}
+
+u16 asix_read_rx_ctl(struct usbnet *dev)
+{
+ __le16 v;
+ int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
+
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
+ goto out;
+ }
+ ret = le16_to_cpu(v);
+out:
+ return ret;
+}
+
+int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+u16 asix_read_medium_status(struct usbnet *dev)
+{
+ __le16 v;
+ int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
+ ret);
+ return ret; /* TODO: callers not checking for error ret */
+ }
+
+ return le16_to_cpu(v);
+
+}
+
+int asix_write_medium_mode(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
+ value, ret);
+
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+/*
+ * AX88772 & AX88178 have a 16-bit RX_CTL value
+ */
+void asix_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ u16 rx_ctl = AX_DEFAULT_RX_CTL;
+
+ if (net->flags & IFF_PROMISC) {
+ rx_ctl |= AX_RX_CTL_PRO;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > AX_MAX_MCAST) {
+ rx_ctl |= AX_RX_CTL_AMALL;
+ } else if (netdev_mc_empty(net)) {
+ /* just broadcast and directed */
+ } else {
+ /* We use the 20 byte dev->data
+ * for our 8 byte filter buffer
+ * to avoid allocating memory that
+ * is tricky to free later */
+ struct netdev_hw_addr *ha;
+ u32 crc_bits;
+
+ memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
+
+ /* Build the multicast hash filter. */
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ data->multi_filter[crc_bits >> 3] |=
+ 1 << (crc_bits & 7);
+ }
+
+ asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
+ AX_MCAST_FILTER_SIZE, data->multi_filter);
+
+ rx_ctl |= AX_RX_CTL_AM;
+ }
+
+ asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
+}
+
+int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res;
+
+ mutex_lock(&dev->phy_mutex);
+ asix_set_sw_mii(dev);
+ asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
+ (__u16)loc, 2, &res);
+ asix_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+
+ netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, le16_to_cpu(res));
+
+ return le16_to_cpu(res);
+}
+
+void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res = cpu_to_le16(val);
+
+ netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
+ mutex_lock(&dev->phy_mutex);
+ asix_set_sw_mii(dev);
+ asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
+ asix_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+}
+
+void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt;
+
+ if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+ return;
+ }
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+ if (opt & AX_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & AX_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt = 0;
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ opt |= AX_MONITOR_LINK;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ opt |= AX_MONITOR_MAGIC;
+
+ if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
+ opt, 0, 0, NULL) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int asix_get_eeprom_len(struct net_device *net)
+{
+ return AX_EEPROM_LEN;
+}
+
+int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = AX_EEPROM_MAGIC;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ /* ax8817x returns 2 bytes from eeprom on read */
+ for (i = first_word; i <= last_word; i++) {
+ if (asix_read_cmd(dev, AX_CMD_READ_EEPROM, i, 0, 2,
+ &(eeprom_buff[i - first_word])) < 0) {
+ kfree(eeprom_buff);
+ return -EIO;
+ }
+ }
+
+ memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+ return 0;
+}
+
+int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int i;
+ int ret;
+
+ netdev_dbg(net, "write EEPROM len %d, offset %d, magic 0x%x\n",
+ eeprom->len, eeprom->offset, eeprom->magic);
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != AX_EEPROM_MAGIC)
+ return -EINVAL;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ /* align data to 16 bit boundaries, read the missing data from
+ the EEPROM */
+ if (eeprom->offset & 1) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, first_word, 0, 2,
+ &(eeprom_buff[0]));
+ if (ret < 0) {
+ netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", first_word);
+ goto free;
+ }
+ }
+
+ if ((eeprom->offset + eeprom->len) & 1) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, last_word, 0, 2,
+ &(eeprom_buff[last_word - first_word]));
+ if (ret < 0) {
+ netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", last_word);
+ goto free;
+ }
+ }
+
+ memcpy((u8 *)eeprom_buff + (eeprom->offset & 1), data, eeprom->len);
+
+ /* write data to EEPROM */
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0x0000, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_err(net, "Failed to enable EEPROM write\n");
+ goto free;
+ }
+ msleep(20);
+
+ for (i = first_word; i <= last_word; i++) {
+ netdev_dbg(net, "write to EEPROM at offset 0x%02x, data 0x%04x\n",
+ i, eeprom_buff[i - first_word]);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_EEPROM, i,
+ eeprom_buff[i - first_word], 0, NULL);
+ if (ret < 0) {
+ netdev_err(net, "Failed to write EEPROM at offset 0x%02x.\n",
+ i);
+ goto free;
+ }
+ msleep(20);
+ }
+
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0x0000, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_err(net, "Failed to disable EEPROM write\n");
+ goto free;
+ }
+
+ ret = 0;
+free:
+ kfree(eeprom_buff);
+ return ret;
+}
+
+void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
+ strncpy (info->version, DRIVER_VERSION, sizeof info->version);
+ info->eedump_len = AX_EEPROM_LEN;
+}
+
+int asix_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix_devices.c
index 71e2b05..4fd48df 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix_devices.c
@@ -20,136 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-// #define DEBUG // error path messages, extra info
-// #define VERBOSE // more; success messages
-
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/workqueue.h>
-#include <linux/mii.h>
-#include <linux/usb.h>
-#include <linux/crc32.h>
-#include <linux/usb/usbnet.h>
-#include <linux/slab.h>
-
-#define DRIVER_VERSION "22-Dec-2011"
-#define DRIVER_NAME "asix"
-
-/* ASIX AX8817X based USB 2.0 Ethernet Devices */
-
-#define AX_CMD_SET_SW_MII 0x06
-#define AX_CMD_READ_MII_REG 0x07
-#define AX_CMD_WRITE_MII_REG 0x08
-#define AX_CMD_SET_HW_MII 0x0a
-#define AX_CMD_READ_EEPROM 0x0b
-#define AX_CMD_WRITE_EEPROM 0x0c
-#define AX_CMD_WRITE_ENABLE 0x0d
-#define AX_CMD_WRITE_DISABLE 0x0e
-#define AX_CMD_READ_RX_CTL 0x0f
-#define AX_CMD_WRITE_RX_CTL 0x10
-#define AX_CMD_READ_IPG012 0x11
-#define AX_CMD_WRITE_IPG0 0x12
-#define AX_CMD_WRITE_IPG1 0x13
-#define AX_CMD_READ_NODE_ID 0x13
-#define AX_CMD_WRITE_NODE_ID 0x14
-#define AX_CMD_WRITE_IPG2 0x14
-#define AX_CMD_WRITE_MULTI_FILTER 0x16
-#define AX88172_CMD_READ_NODE_ID 0x17
-#define AX_CMD_READ_PHY_ID 0x19
-#define AX_CMD_READ_MEDIUM_STATUS 0x1a
-#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
-#define AX_CMD_READ_MONITOR_MODE 0x1c
-#define AX_CMD_WRITE_MONITOR_MODE 0x1d
-#define AX_CMD_READ_GPIOS 0x1e
-#define AX_CMD_WRITE_GPIOS 0x1f
-#define AX_CMD_SW_RESET 0x20
-#define AX_CMD_SW_PHY_STATUS 0x21
-#define AX_CMD_SW_PHY_SELECT 0x22
-
-#define AX_MONITOR_MODE 0x01
-#define AX_MONITOR_LINK 0x02
-#define AX_MONITOR_MAGIC 0x04
-#define AX_MONITOR_HSFS 0x10
-
-/* AX88172 Medium Status Register values */
-#define AX88172_MEDIUM_FD 0x02
-#define AX88172_MEDIUM_TX 0x04
-#define AX88172_MEDIUM_FC 0x10
-#define AX88172_MEDIUM_DEFAULT \
- ( AX88172_MEDIUM_FD | AX88172_MEDIUM_TX | AX88172_MEDIUM_FC )
-
-#define AX_MCAST_FILTER_SIZE 8
-#define AX_MAX_MCAST 64
-
-#define AX_SWRESET_CLEAR 0x00
-#define AX_SWRESET_RR 0x01
-#define AX_SWRESET_RT 0x02
-#define AX_SWRESET_PRTE 0x04
-#define AX_SWRESET_PRL 0x08
-#define AX_SWRESET_BZ 0x10
-#define AX_SWRESET_IPRL 0x20
-#define AX_SWRESET_IPPD 0x40
-
-#define AX88772_IPG0_DEFAULT 0x15
-#define AX88772_IPG1_DEFAULT 0x0c
-#define AX88772_IPG2_DEFAULT 0x12
-
-/* AX88772 & AX88178 Medium Mode Register */
-#define AX_MEDIUM_PF 0x0080
-#define AX_MEDIUM_JFE 0x0040
-#define AX_MEDIUM_TFC 0x0020
-#define AX_MEDIUM_RFC 0x0010
-#define AX_MEDIUM_ENCK 0x0008
-#define AX_MEDIUM_AC 0x0004
-#define AX_MEDIUM_FD 0x0002
-#define AX_MEDIUM_GM 0x0001
-#define AX_MEDIUM_SM 0x1000
-#define AX_MEDIUM_SBP 0x0800
-#define AX_MEDIUM_PS 0x0200
-#define AX_MEDIUM_RE 0x0100
-
-#define AX88178_MEDIUM_DEFAULT \
- (AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
- AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
- AX_MEDIUM_RE)
-
-#define AX88772_MEDIUM_DEFAULT \
- (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
- AX_MEDIUM_TFC | AX_MEDIUM_PS | \
- AX_MEDIUM_AC | AX_MEDIUM_RE)
-
-/* AX88772 & AX88178 RX_CTL values */
-#define AX_RX_CTL_SO 0x0080
-#define AX_RX_CTL_AP 0x0020
-#define AX_RX_CTL_AM 0x0010
-#define AX_RX_CTL_AB 0x0008
-#define AX_RX_CTL_SEP 0x0004
-#define AX_RX_CTL_AMALL 0x0002
-#define AX_RX_CTL_PRO 0x0001
-#define AX_RX_CTL_MFB_2048 0x0000
-#define AX_RX_CTL_MFB_4096 0x0100
-#define AX_RX_CTL_MFB_8192 0x0200
-#define AX_RX_CTL_MFB_16384 0x0300
-
-#define AX_DEFAULT_RX_CTL (AX_RX_CTL_SO | AX_RX_CTL_AB)
-
-/* GPIO 0 .. 2 toggles */
-#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
-#define AX_GPIO_GPO_0 0x02 /* GPIO0 Output value */
-#define AX_GPIO_GPO1EN 0x04 /* GPIO1 Output enable */
-#define AX_GPIO_GPO_1 0x08 /* GPIO1 Output value */
-#define AX_GPIO_GPO2EN 0x10 /* GPIO2 Output enable */
-#define AX_GPIO_GPO_2 0x20 /* GPIO2 Output value */
-#define AX_GPIO_RESERVED 0x40 /* Reserved */
-#define AX_GPIO_RSE 0x80 /* Reload serial EEPROM */
-
-#define AX_EEPROM_MAGIC 0xdeadbeef
-#define AX88172_EEPROM_LEN 0x40
-#define AX88772_EEPROM_LEN 0xff
+#include "asix.h"
#define PHY_MODE_MARVELL 0x0000
#define MII_MARVELL_LED_CTRL 0x0018
@@ -165,15 +36,6 @@
#define PHY_MODE_RTL8211CL 0x000C
-/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
-struct asix_data {
- u8 multi_filter[AX_MCAST_FILTER_SIZE];
- u8 mac_addr[ETH_ALEN];
- u8 phymode;
- u8 ledmode;
- u8 eeprom_len;
-};
-
struct ax88172_int_data {
__le16 res1;
u8 link;
@@ -182,209 +44,6 @@ struct ax88172_int_data {
__le16 res3;
} __packed;
-static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
-{
- void *buf;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf)
- goto out;
-
- err = usb_control_msg(
- dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- buf,
- size,
- USB_CTRL_GET_TIMEOUT);
- if (err == size)
- memcpy(data, buf, size);
- else if (err >= 0)
- err = -EINVAL;
- kfree(buf);
-
-out:
- return err;
-}
-
-static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
-{
- void *buf = NULL;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- if (data) {
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- goto out;
- }
-
- err = usb_control_msg(
- dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- cmd,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- buf,
- size,
- USB_CTRL_SET_TIMEOUT);
- kfree(buf);
-
-out:
- return err;
-}
-
-static void asix_async_cmd_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
- status);
-
- kfree(req);
- usb_free_urb(urb);
-}
-
-static void
-asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
-{
- struct usb_ctrlrequest *req;
- int status;
- struct urb *urb;
-
- netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
- return;
- }
-
- req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!req) {
- netdev_err(dev->net, "Failed to allocate memory for control request\n");
- usb_free_urb(urb);
- return;
- }
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- req->bRequest = cmd;
- req->wValue = cpu_to_le16(value);
- req->wIndex = cpu_to_le16(index);
- req->wLength = cpu_to_le16(size);
-
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (void *)req, data, size,
- asix_async_cmd_callback, req);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_err(dev->net, "Error submitting the control message: status=%d\n",
- status);
- kfree(req);
- usb_free_urb(urb);
- }
-}
-
-static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
-{
- int offset = 0;
-
- while (offset + sizeof(u32) < skb->len) {
- struct sk_buff *ax_skb;
- u16 size;
- u32 header = get_unaligned_le32(skb->data + offset);
-
- offset += sizeof(u32);
-
- /* get the packet length */
- size = (u16) (header & 0x7ff);
- if (size != ((~header >> 16) & 0x07ff)) {
- netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
- return 0;
- }
-
- if ((size > dev->net->mtu + ETH_HLEN) ||
- (size + offset > skb->len)) {
- netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
- size);
- return 0;
- }
- ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
- if (!ax_skb)
- return 0;
-
- skb_put(ax_skb, size);
- memcpy(ax_skb->data, skb->data + offset, size);
- usbnet_skb_return(dev, ax_skb);
-
- offset += (size + 1) & 0xfffe;
- }
-
- if (skb->len != offset) {
- netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
- skb->len);
- return 0;
- }
- return 1;
-}
-
-static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
- gfp_t flags)
-{
- int padlen;
- int headroom = skb_headroom(skb);
- int tailroom = skb_tailroom(skb);
- u32 packet_len;
- u32 padbytes = 0xffff0000;
-
- padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
-
- if ((!skb_cloned(skb)) &&
- ((headroom + tailroom) >= (4 + padlen))) {
- if ((headroom < 4) || (tailroom < padlen)) {
- skb->data = memmove(skb->head + 4, skb->data, skb->len);
- skb_set_tail_pointer(skb, skb->len);
- }
- } else {
- struct sk_buff *skb2;
- skb2 = skb_copy_expand(skb, 4, padlen, flags);
- dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
- }
-
- skb_push(skb, 4);
- packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
- cpu_to_le32s(&packet_len);
- skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
-
- if (padlen) {
- cpu_to_le32s(&padbytes);
- memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
- skb_put(skb, sizeof(padbytes));
- }
- return skb;
-}
-
static void asix_status(struct usbnet *dev, struct urb *urb)
{
struct ax88172_int_data *event;
@@ -405,200 +64,6 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
}
}
-static inline int asix_set_sw_mii(struct usbnet *dev)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable software MII access\n");
- return ret;
-}
-
-static inline int asix_set_hw_mii(struct usbnet *dev)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable hardware MII access\n");
- return ret;
-}
-
-static inline int asix_get_phy_addr(struct usbnet *dev)
-{
- u8 buf[2];
- int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
-
- netdev_dbg(dev->net, "asix_get_phy_addr()\n");
-
- if (ret < 0) {
- netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
- goto out;
- }
- netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
- *((__le16 *)buf));
- ret = buf[1];
-
-out:
- return ret;
-}
-
-static int asix_sw_reset(struct usbnet *dev, u8 flags)
-{
- int ret;
-
- ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
-
- return ret;
-}
-
-static u16 asix_read_rx_ctl(struct usbnet *dev)
-{
- __le16 v;
- int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
-
- if (ret < 0) {
- netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
- goto out;
- }
- ret = le16_to_cpu(v);
-out:
- return ret;
-}
-
-static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
-{
- int ret;
-
- netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
- mode, ret);
-
- return ret;
-}
-
-static u16 asix_read_medium_status(struct usbnet *dev)
-{
- __le16 v;
- int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
-
- if (ret < 0) {
- netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
- ret);
- return ret; /* TODO: callers not checking for error ret */
- }
-
- return le16_to_cpu(v);
-
-}
-
-static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
-{
- int ret;
-
- netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
- mode, ret);
-
- return ret;
-}
-
-static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
-{
- int ret;
-
- netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
- value, ret);
-
- if (sleep)
- msleep(sleep);
-
- return ret;
-}
-
-/*
- * AX88772 & AX88178 have a 16-bit RX_CTL value
- */
-static void asix_set_multicast(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
- struct asix_data *data = (struct asix_data *)&dev->data;
- u16 rx_ctl = AX_DEFAULT_RX_CTL;
-
- if (net->flags & IFF_PROMISC) {
- rx_ctl |= AX_RX_CTL_PRO;
- } else if (net->flags & IFF_ALLMULTI ||
- netdev_mc_count(net) > AX_MAX_MCAST) {
- rx_ctl |= AX_RX_CTL_AMALL;
- } else if (netdev_mc_empty(net)) {
- /* just broadcast and directed */
- } else {
- /* We use the 20 byte dev->data
- * for our 8 byte filter buffer
- * to avoid allocating memory that
- * is tricky to free later */
- struct netdev_hw_addr *ha;
- u32 crc_bits;
-
- memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
-
- /* Build the multicast hash filter. */
- netdev_for_each_mc_addr(ha, net) {
- crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
- data->multi_filter[crc_bits >> 3] |=
- 1 << (crc_bits & 7);
- }
-
- asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
- AX_MCAST_FILTER_SIZE, data->multi_filter);
-
- rx_ctl |= AX_RX_CTL_AM;
- }
-
- asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
-}
-
-static int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
-{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res;
-
- mutex_lock(&dev->phy_mutex);
- asix_set_sw_mii(dev);
- asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
- (__u16)loc, 2, &res);
- asix_set_hw_mii(dev);
- mutex_unlock(&dev->phy_mutex);
-
- netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
- phy_id, loc, le16_to_cpu(res));
-
- return le16_to_cpu(res);
-}
-
-static void
-asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
-{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res = cpu_to_le16(val);
-
- netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
- phy_id, loc, val);
- mutex_lock(&dev->phy_mutex);
- asix_set_sw_mii(dev);
- asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
- asix_set_hw_mii(dev);
- mutex_unlock(&dev->phy_mutex);
-}
-
/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
static u32 asix_get_phyid(struct usbnet *dev)
{
@@ -628,88 +93,6 @@ static u32 asix_get_phyid(struct usbnet *dev)
return phy_id;
}
-static void
-asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
-{
- struct usbnet *dev = netdev_priv(net);
- u8 opt;
-
- if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
- return;
- }
- wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
- wolinfo->wolopts = 0;
- if (opt & AX_MONITOR_LINK)
- wolinfo->wolopts |= WAKE_PHY;
- if (opt & AX_MONITOR_MAGIC)
- wolinfo->wolopts |= WAKE_MAGIC;
-}
-
-static int
-asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
-{
- struct usbnet *dev = netdev_priv(net);
- u8 opt = 0;
-
- if (wolinfo->wolopts & WAKE_PHY)
- opt |= AX_MONITOR_LINK;
- if (wolinfo->wolopts & WAKE_MAGIC)
- opt |= AX_MONITOR_MAGIC;
-
- if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
- opt, 0, 0, NULL) < 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int asix_get_eeprom_len(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
- struct asix_data *data = (struct asix_data *)&dev->data;
-
- return data->eeprom_len;
-}
-
-static int asix_get_eeprom(struct net_device *net,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- struct usbnet *dev = netdev_priv(net);
- __le16 *ebuf = (__le16 *)data;
- int i;
-
- /* Crude hack to ensure that we don't overwrite memory
- * if an odd length is supplied
- */
- if (eeprom->len % 2)
- return -EINVAL;
-
- eeprom->magic = AX_EEPROM_MAGIC;
-
- /* ax8817x returns 2 bytes from eeprom on read */
- for (i=0; i < eeprom->len / 2; i++) {
- if (asix_read_cmd(dev, AX_CMD_READ_EEPROM,
- eeprom->offset + i, 0, 2, &ebuf[i]) < 0)
- return -EINVAL;
- }
- return 0;
-}
-
-static void asix_get_drvinfo (struct net_device *net,
- struct ethtool_drvinfo *info)
-{
- struct usbnet *dev = netdev_priv(net);
- struct asix_data *data = (struct asix_data *)&dev->data;
-
- /* Inherit standard device info */
- usbnet_get_drvinfo(net, info);
- strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
- strncpy (info->version, DRIVER_VERSION, sizeof info->version);
- info->eedump_len = data->eeprom_len;
-}
-
static u32 asix_get_link(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@ -724,30 +107,6 @@ static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
-static int asix_set_mac_address(struct net_device *net, void *p)
-{
- struct usbnet *dev = netdev_priv(net);
- struct asix_data *data = (struct asix_data *)&dev->data;
- struct sockaddr *addr = p;
-
- if (netif_running(net))
- return -EBUSY;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
-
- /* We use the 20 byte dev->data
- * for our 6 byte mac buffer
- * to avoid allocating memory that
- * is tricky to free later */
- memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
- asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
- data->mac_addr);
-
- return 0;
-}
-
/* We need to override some ethtool_ops so we require our
own structure so we don't interfere with other usbnet
devices that may be connected at the same time. */
@@ -760,6 +119,7 @@ static const struct ethtool_ops ax88172_ethtool_ops = {
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
+ .set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
@@ -842,9 +202,6 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
u8 buf[ETH_ALEN];
int i;
unsigned long gpio_bits = dev->driver_info->data;
- struct asix_data *data = (struct asix_data *)&dev->data;
-
- data->eeprom_len = AX88172_EEPROM_LEN;
usbnet_get_endpoints(dev,intf);
@@ -879,6 +236,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88172_netdev_ops;
dev->net->ethtool_ops = &ax88172_ethtool_ops;
+ dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
+ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
@@ -900,6 +259,7 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
+ .set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
@@ -1048,12 +408,9 @@ static const struct net_device_ops ax88772_netdev_ops = {
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, embd_phy;
- struct asix_data *data = (struct asix_data *)&dev->data;
u8 buf[ETH_ALEN];
u32 phyid;
- data->eeprom_len = AX88772_EEPROM_LEN;
-
usbnet_get_endpoints(dev,intf);
/* Get the MAC address */
@@ -1074,6 +431,8 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
+ dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
+ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
@@ -1121,6 +480,7 @@ static const struct ethtool_ops ax88178_ethtool_ops = {
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
+ .set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
@@ -1404,9 +764,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
u8 buf[ETH_ALEN];
- struct asix_data *data = (struct asix_data *)&dev->data;
-
- data->eeprom_len = AX88772_EEPROM_LEN;
usbnet_get_endpoints(dev,intf);
@@ -1509,6 +866,8 @@ static const struct driver_info ax88178_info = {
.tx_fixup = asix_tx_fixup,
};
+extern const struct driver_info ax88172a_info;
+
static const struct usb_device_id products [] = {
{
// Linksys USB200M
@@ -1634,6 +993,10 @@ static const struct usb_device_id products [] = {
// Asus USB Ethernet Adapter
USB_DEVICE (0x0b95, 0x7e2b),
.driver_info = (unsigned long) &ax88772_info,
+}, {
+ /* ASIX 88172a demo board */
+ USB_DEVICE(0x0b95, 0x172a),
+ .driver_info = (unsigned long) &ax88172a_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
new file mode 100644
index 0000000..c8e0aa8
--- /dev/null
+++ b/drivers/net/usb/ax88172a.c
@@ -0,0 +1,414 @@
+/*
+ * ASIX AX88172A based USB 2.0 Ethernet Devices
+ * Copyright (C) 2012 OMICRON electronics GmbH
+ *
+ * Supports external PHYs via phylib. Based on the driver for the
+ * AX88772. Original copyrights follow:
+ *
+ * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
+ * Copyright (c) 2002-2003 TiVo Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "asix.h"
+#include <linux/phy.h>
+
+struct ax88172a_private {
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ char phy_name[20];
+ u16 phy_addr;
+ u16 oldmode;
+ int use_embdphy;
+};
+
+/* MDIO read and write wrappers for phylib */
+static int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ return asix_mdio_read(((struct usbnet *)bus->priv)->net, phy_id,
+ regnum);
+}
+
+static int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum,
+ u16 val)
+{
+ asix_mdio_write(((struct usbnet *)bus->priv)->net, phy_id, regnum, val);
+ return 0;
+}
+
+static int ax88172a_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(net))
+ return -EINVAL;
+
+ if (!net->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(net->phydev, rq, cmd);
+}
+
+/* set MAC link settings according to information from phylib */
+static void ax88172a_adjust_link(struct net_device *netdev)
+{
+ struct phy_device *phydev = netdev->phydev;
+ struct usbnet *dev = netdev_priv(netdev);
+ struct ax88172a_private *priv = dev->driver_priv;
+ u16 mode = 0;
+
+ if (phydev->link) {
+ mode = AX88772_MEDIUM_DEFAULT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ mode &= ~AX_MEDIUM_FD;
+
+ if (phydev->speed != SPEED_100)
+ mode &= ~AX_MEDIUM_PS;
+ }
+
+ if (mode != priv->oldmode) {
+ asix_write_medium_mode(dev, mode);
+ priv->oldmode = mode;
+ netdev_dbg(netdev, "speed %u duplex %d, setting mode to 0x%04x\n",
+ phydev->speed, phydev->duplex, mode);
+ phy_print_status(phydev);
+ }
+}
+
+static void ax88172a_status(struct usbnet *dev, struct urb *urb)
+{
+ /* link changes are detected by polling the phy */
+}
+
+/* use phylib infrastructure */
+static int ax88172a_init_mdio(struct usbnet *dev)
+{
+ struct ax88172a_private *priv = dev->driver_priv;
+ int ret, i;
+
+ priv->mdio = mdiobus_alloc();
+ if (!priv->mdio) {
+ netdev_err(dev->net, "Could not allocate MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ priv->mdio->priv = (void *)dev;
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+
+ priv->mdio->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!priv->mdio->irq) {
+ netdev_err(dev->net, "Could not allocate mdio->irq\n");
+ ret = -ENOMEM;
+ goto mfree;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ priv->mdio->irq[i] = PHY_POLL;
+
+ ret = mdiobus_register(priv->mdio);
+ if (ret) {
+ netdev_err(dev->net, "Could not register MDIO bus\n");
+ goto ifree;
+ }
+
+ netdev_info(dev->net, "registered mdio bus %s\n", priv->mdio->id);
+ return 0;
+
+ifree:
+ kfree(priv->mdio->irq);
+mfree:
+ mdiobus_free(priv->mdio);
+ return ret;
+}
+
+static void ax88172a_remove_mdio(struct usbnet *dev)
+{
+ struct ax88172a_private *priv = dev->driver_priv;
+
+ netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
+ mdiobus_unregister(priv->mdio);
+ kfree(priv->mdio->irq);
+ mdiobus_free(priv->mdio);
+}
+
+static const struct net_device_ops ax88172a_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = asix_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = ax88172a_ioctl,
+ .ndo_set_rx_mode = asix_set_multicast,
+};
+
+int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ if (!net->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(net->phydev, cmd);
+}
+
+int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ if (!net->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(net->phydev, cmd);
+}
+
+int ax88172a_nway_reset(struct net_device *net)
+{
+ if (!net->phydev)
+ return -ENODEV;
+
+ return phy_start_aneg(net->phydev);
+}
+
+static const struct ethtool_ops ax88172a_ethtool_ops = {
+ .get_drvinfo = asix_get_drvinfo,
+ .get_link = usbnet_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_wol = asix_get_wol,
+ .set_wol = asix_set_wol,
+ .get_eeprom_len = asix_get_eeprom_len,
+ .get_eeprom = asix_get_eeprom,
+ .set_eeprom = asix_set_eeprom,
+ .get_settings = ax88172a_get_settings,
+ .set_settings = ax88172a_set_settings,
+ .nway_reset = ax88172a_nway_reset,
+};
+
+static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy)
+{
+ int ret;
+
+ ret = asix_sw_reset(dev, AX_SWRESET_IPPD);
+ if (ret < 0)
+ goto err;
+
+ msleep(150);
+ ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
+ if (ret < 0)
+ goto err;
+
+ msleep(150);
+
+ ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_IPPD);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
+}
+
+
+static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int ret;
+ u8 buf[ETH_ALEN];
+ struct ax88172a_private *priv;
+
+ usbnet_get_endpoints(dev, intf);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ netdev_err(dev->net, "Could not allocate memory for private data\n");
+ return -ENOMEM;
+ }
+ dev->driver_priv = priv;
+
+ /* Get the MAC address */
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
+ goto free;
+ }
+ memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+ dev->net->netdev_ops = &ax88172a_netdev_ops;
+ dev->net->ethtool_ops = &ax88172a_ethtool_ops;
+
+ /* are we using the internal or the external phy? */
+ ret = asix_read_cmd(dev, AX_CMD_SW_PHY_STATUS, 0, 0, 1, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to read software interface selection register: %d\n",
+ ret);
+ goto free;
+ }
+
+ netdev_dbg(dev->net, "AX_CMD_SW_PHY_STATUS = 0x%02x\n", buf[0]);
+ switch (buf[0] & AX_PHY_SELECT_MASK) {
+ case AX_PHY_SELECT_INTERNAL:
+ netdev_dbg(dev->net, "use internal phy\n");
+ priv->use_embdphy = 1;
+ break;
+ case AX_PHY_SELECT_EXTERNAL:
+ netdev_dbg(dev->net, "use external phy\n");
+ priv->use_embdphy = 0;
+ break;
+ default:
+ netdev_err(dev->net, "Interface mode not supported by driver\n");
+ ret = -ENOTSUPP;
+ goto free;
+ }
+
+ priv->phy_addr = asix_read_phy_addr(dev, priv->use_embdphy);
+ ax88172a_reset_phy(dev, priv->use_embdphy);
+
+ /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
+ if (dev->driver_info->flags & FLAG_FRAMING_AX) {
+ /* hard_mtu is still the default - the device does not support
+ jumbo eth frames */
+ dev->rx_urb_size = 2048;
+ }
+
+ /* init MDIO bus */
+ ret = ax88172a_init_mdio(dev);
+ if (ret)
+ goto free;
+
+ return 0;
+
+free:
+ kfree(priv);
+ return ret;
+}
+
+static int ax88172a_stop(struct usbnet *dev)
+{
+ struct ax88172a_private *priv = dev->driver_priv;
+
+ netdev_dbg(dev->net, "Stopping interface\n");
+
+ if (priv->phydev) {
+ netdev_info(dev->net, "Disconnecting from phy %s\n",
+ priv->phy_name);
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ }
+
+ return 0;
+}
+
+static void ax88172a_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct ax88172a_private *priv = dev->driver_priv;
+
+ ax88172a_remove_mdio(dev);
+ kfree(priv);
+}
+
+static int ax88172a_reset(struct usbnet *dev)
+{
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ struct ax88172a_private *priv = dev->driver_priv;
+ int ret;
+ u16 rx_ctl;
+
+ ax88172a_reset_phy(dev, priv->use_embdphy);
+
+ msleep(150);
+ rx_ctl = asix_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = asix_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = asix_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ msleep(150);
+
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
+ AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
+ AX88772_IPG2_DEFAULT, 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = asix_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ rx_ctl = asix_read_medium_status(dev);
+ netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ /* Connect to PHY */
+ snprintf(priv->phy_name, 20, PHY_ID_FMT,
+ priv->mdio->id, priv->phy_addr);
+
+ priv->phydev = phy_connect(dev->net, priv->phy_name,
+ &ax88172a_adjust_link,
+ 0, PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(priv->phydev)) {
+ netdev_err(dev->net, "Could not connect to PHY device %s\n",
+ priv->phy_name);
+ ret = PTR_ERR(priv->phydev);
+ goto out;
+ }
+
+ netdev_info(dev->net, "Connected to phy %s\n", priv->phy_name);
+
+ /* During power-up, the AX88172A set the power down (BMCR_PDOWN)
+ * bit of the PHY. Bring the PHY up again.
+ */
+ genphy_resume(priv->phydev);
+ phy_start(priv->phydev);
+
+ return 0;
+
+out:
+ return ret;
+
+}
+
+const struct driver_info ax88172a_info = {
+ .description = "ASIX AX88172A USB 2.0 Ethernet",
+ .bind = ax88172a_bind,
+ .reset = ax88172a_reset,
+ .stop = ax88172a_stop,
+ .unbind = ax88172a_unbind,
+ .status = ax88172a_status,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup,
+ .tx_fixup = asix_tx_fixup,
+};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index d848d4d..187c144 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -394,7 +394,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
SET_NETDEV_DEV(dev, &intf->dev);
pnd->dev = dev;
- pnd->usb = usb_get_dev(usbdev);
+ pnd->usb = usbdev;
pnd->intf = intf;
pnd->data_intf = data_intf;
spin_lock_init(&pnd->tx_lock);
@@ -440,7 +440,6 @@ out:
static void usbpn_disconnect(struct usb_interface *intf)
{
struct usbpn_dev *pnd = usb_get_intfdata(intf);
- struct usb_device *usb = pnd->usb;
if (pnd->disconnected)
return;
@@ -449,7 +448,6 @@ static void usbpn_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&usbpn_driver,
(pnd->intf == intf) ? pnd->data_intf : pnd->intf);
unregister_netdev(pnd->dev);
- usb_put_dev(usb);
}
static struct usb_driver usbpn_driver = {
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 964031e..a28a983 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -59,6 +59,7 @@
#define USB_PRODUCT_IPHONE_3G 0x1292
#define USB_PRODUCT_IPHONE_3GS 0x1294
#define USB_PRODUCT_IPHONE_4 0x1297
+#define USB_PRODUCT_IPAD 0x129a
#define USB_PRODUCT_IPHONE_4_VZW 0x129c
#define USB_PRODUCT_IPHONE_4S 0x12a0
@@ -101,6 +102,10 @@ static struct usb_device_id ipheth_table[] = {
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
{ USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index add1064..03c2d8d 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return skb->len > 0;
}
+static void mcs7830_status(struct usbnet *dev, struct urb *urb)
+{
+ u8 *buf = urb->transfer_buffer;
+ bool link;
+
+ if (urb->actual_length < 16)
+ return;
+
+ link = !(buf[1] & 0x20);
+ if (netif_carrier_ok(dev->net) != link) {
+ if (link) {
+ netif_carrier_on(dev->net);
+ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+ } else
+ netif_carrier_off(dev->net);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+}
+
static const struct driver_info moschip_info = {
.description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
.bind = mcs7830_bind,
.rx_fixup = mcs7830_rx_fixup,
- .flags = FLAG_ETHER,
+ .flags = FLAG_ETHER | FLAG_LINK_INTR,
+ .status = mcs7830_status,
.in = 1,
.out = 2,
};
@@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = {
.description = "Sitecom LN-30 usb-NET adapter",
.bind = mcs7830_bind,
.rx_fixup = mcs7830_rx_fixup,
- .flags = FLAG_ETHER,
+ .flags = FLAG_ETHER | FLAG_LINK_INTR,
+ .status = mcs7830_status,
.in = 1,
.out = 2,
};
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 7023220..a0b5807 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1329,8 +1329,6 @@ static int pegasus_probe(struct usb_interface *intf,
}
pegasus_count++;
- usb_get_dev(dev);
-
net = alloc_etherdev(sizeof(struct pegasus));
if (!net)
goto out;
@@ -1407,7 +1405,6 @@ out2:
out1:
free_netdev(net);
out:
- usb_put_dev(dev);
pegasus_dec_workqueue();
return res;
}
@@ -1425,7 +1422,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
pegasus->flags |= PEGASUS_UNPLUG;
cancel_delayed_work(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
- usb_put_dev(interface_to_usbdev(intf));
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
free_skb_pool(pegasus);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 380dbea..2ea126a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1,6 +1,10 @@
/*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
*
+ * The probing code is heavily inspired by cdc_ether, which is:
+ * Copyright (C) 2003-2005 by David Brownell
+ * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@@ -15,11 +19,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
-/* The name of the CDC Device Management driver */
-#define DM_DRIVER "cdc_wdm"
-
-/*
- * This driver supports wwan (3G/LTE/?) devices using a vendor
+/* This driver supports wwan (3G/LTE/?) devices using a vendor
* specific management protocol called Qualcomm MSM Interface (QMI) -
* in addition to the more common AT commands over serial interface
* management
@@ -31,59 +31,117 @@
* management protocol is used in place of the standard CDC
* notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
*
+ * Alternatively, control and data functions can be combined in a
+ * single USB interface.
+ *
* Handling a protocol like QMI is out of the scope for any driver.
- * It can be exported as a character device using the cdc-wdm driver,
- * which will enable userspace applications ("modem managers") to
- * handle it. This may be required to use the network interface
- * provided by the driver.
+ * It is exported as a character device using the cdc-wdm driver as
+ * a subdriver, enabling userspace applications ("modem managers") to
+ * handle it.
*
* These devices may alternatively/additionally be configured using AT
- * commands on any of the serial interfaces driven by the option driver
- *
- * This driver binds only to the data ("slave") interface to enable
- * the cdc-wdm driver to bind to the control interface. It still
- * parses the CDC functional descriptors on the control interface to
- * a) verify that this is indeed a handled interface (CDC Union
- * header lists it as slave)
- * b) get MAC address and other ethernet config from the CDC Ethernet
- * header
- * c) enable user bind requests against the control interface, which
- * is the common way to bind to CDC Ethernet Control Model type
- * interfaces
- * d) provide a hint to the user about which interface is the
- * corresponding management interface
+ * commands on a serial interface
*/
+/* driver specific data */
+struct qmi_wwan_state {
+ struct usb_driver *subdriver;
+ atomic_t pmcount;
+ unsigned long unused;
+ struct usb_interface *control;
+ struct usb_interface *data;
+};
+
+/* using a counter to merge subdriver requests with our own into a combined state */
+static int qmi_wwan_manage_power(struct usbnet *dev, int on)
+{
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ int rv = 0;
+
+ dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+
+ if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
+ /* need autopm_get/put here to ensure the usbcore sees the new value */
+ rv = usb_autopm_get_interface(dev->intf);
+ if (rv < 0)
+ goto err;
+ dev->intf->needs_remote_wakeup = on;
+ usb_autopm_put_interface(dev->intf);
+ }
+err:
+ return rv;
+}
+
+static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ /* can be called while disconnecting */
+ if (!dev)
+ return 0;
+ return qmi_wwan_manage_power(dev, on);
+}
+
+/* collect all three endpoints and register subdriver */
+static int qmi_wwan_register_subdriver(struct usbnet *dev)
+{
+ int rv;
+ struct usb_driver *subdriver = NULL;
+ struct qmi_wwan_state *info = (void *)&dev->data;
+
+ /* collect bulk endpoints */
+ rv = usbnet_get_endpoints(dev, info->data);
+ if (rv < 0)
+ goto err;
+
+ /* update status endpoint if separate control interface */
+ if (info->control != info->data)
+ dev->status = &info->control->cur_altsetting->endpoint[0];
+
+ /* require interrupt endpoint for subdriver */
+ if (!dev->status) {
+ rv = -EINVAL;
+ goto err;
+ }
+
+ /* for subdriver power management */
+ atomic_set(&info->pmcount, 0);
+
+ /* register subdriver */
+ subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
+ if (IS_ERR(subdriver)) {
+ dev_err(&info->control->dev, "subdriver registration failed\n");
+ rv = PTR_ERR(subdriver);
+ goto err;
+ }
+
+ /* prevent usbnet from using status endpoint */
+ dev->status = NULL;
+
+ /* save subdriver struct for suspend/resume wrappers */
+ info->subdriver = subdriver;
+
+err:
+ return rv;
+}
+
static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status = -1;
- struct usb_interface *control = NULL;
u8 *buf = intf->cur_altsetting->extra;
int len = intf->cur_altsetting->extralen;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
struct usb_cdc_union_desc *cdc_union = NULL;
struct usb_cdc_ether_desc *cdc_ether = NULL;
- u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE;
u32 found = 0;
- atomic_t *pmcount = (void *)&dev->data[1];
+ struct usb_driver *driver = driver_of(intf);
+ struct qmi_wwan_state *info = (void *)&dev->data;
- atomic_set(pmcount, 0);
+ BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
- /*
- * assume a data interface has no additional descriptors and
- * that the control and data interface are numbered
- * consecutively - this holds for the Huawei device at least
- */
- if (len == 0 && desc->bInterfaceNumber > 0) {
- control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1);
- if (!control)
- goto err;
-
- buf = control->cur_altsetting->extra;
- len = control->cur_altsetting->extralen;
- dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n",
- dev_name(&control->dev));
- }
+ /* require a single interrupt status endpoint for subdriver */
+ if (intf->cur_altsetting->desc.bNumEndpoints != 1)
+ goto err;
while (len > 3) {
struct usb_descriptor_header *h = (void *)buf;
@@ -142,15 +200,23 @@ next_desc:
}
/* did we find all the required ones? */
- if ((found & required) != required) {
+ if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
+ !(found & (1 << USB_CDC_UNION_TYPE))) {
dev_err(&intf->dev, "CDC functional descriptors missing\n");
goto err;
}
- /* give the user a helpful hint if trying to bind to the wrong interface */
- if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) {
- dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n",
- dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev));
+ /* verify CDC Union */
+ if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
+ dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
+ goto err;
+ }
+
+ /* need to save these for unbind */
+ info->control = intf;
+ info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
+ if (!info->data) {
+ dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
goto err;
}
@@ -160,59 +226,29 @@ next_desc:
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
- /* success! point the user to the management interface */
- if (control)
- dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n",
- dev_name(&control->dev));
-
- /* XXX: add a sysfs symlink somewhere to help management applications find it? */
+ /* claim data interface and set it up */
+ status = usb_driver_claim_interface(driver, info->data, dev);
+ if (status < 0)
+ goto err;
- /* collect bulk endpoints now that we know intf == "data" interface */
- status = usbnet_get_endpoints(dev, intf);
+ status = qmi_wwan_register_subdriver(dev);
+ if (status < 0) {
+ usb_set_intfdata(info->data, NULL);
+ usb_driver_release_interface(driver, info->data);
+ }
err:
return status;
}
-/* using a counter to merge subdriver requests with our own into a combined state */
-static int qmi_wwan_manage_power(struct usbnet *dev, int on)
-{
- atomic_t *pmcount = (void *)&dev->data[1];
- int rv = 0;
-
- dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(pmcount), on);
-
- if ((on && atomic_add_return(1, pmcount) == 1) || (!on && atomic_dec_and_test(pmcount))) {
- /* need autopm_get/put here to ensure the usbcore sees the new value */
- rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
- dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
- }
-err:
- return rv;
-}
-
-static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
-{
- struct usbnet *dev = usb_get_intfdata(intf);
- return qmi_wwan_manage_power(dev, on);
-}
-
/* Some devices combine the "control" and "data" functions into a
* single interface with all three endpoints: interrupt + bulk in and
* out
- *
- * Setting up cdc-wdm as a subdriver owning the interrupt endpoint
- * will let it provide userspace access to the encapsulated QMI
- * protocol without interfering with the usbnet operations.
- */
+ */
static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
{
int rv;
- struct usb_driver *subdriver = NULL;
- atomic_t *pmcount = (void *)&dev->data[1];
+ struct qmi_wwan_state *info = (void *)&dev->data;
/* ZTE makes devices where the interface descriptors and endpoint
* configurations of two or more interfaces are identical, even
@@ -228,66 +264,39 @@ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
- atomic_set(pmcount, 0);
-
- /* collect all three endpoints */
- rv = usbnet_get_endpoints(dev, intf);
- if (rv < 0)
- goto err;
-
- /* require interrupt endpoint for subdriver */
- if (!dev->status) {
- rv = -EINVAL;
- goto err;
- }
-
- subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
- if (IS_ERR(subdriver)) {
- rv = PTR_ERR(subdriver);
- goto err;
- }
-
- /* can't let usbnet use the interrupt endpoint */
- dev->status = NULL;
-
- /* save subdriver struct for suspend/resume wrappers */
- dev->data[0] = (unsigned long)subdriver;
+ /* control and data is shared */
+ info->control = intf;
+ info->data = intf;
+ rv = qmi_wwan_register_subdriver(dev);
err:
return rv;
}
-/* Gobi devices uses identical class/protocol codes for all interfaces regardless
- * of function. Some of these are CDC ACM like and have the exact same endpoints
- * we are looking for. This leaves two possible strategies for identifying the
- * correct interface:
- * a) hardcoding interface number, or
- * b) use the fact that the wwan interface is the only one lacking additional
- * (CDC functional) descriptors
- *
- * Let's see if we can get away with the generic b) solution.
- */
-static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf)
+static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
{
- int rv = -EINVAL;
-
- /* ignore any interface with additional descriptors */
- if (intf->cur_altsetting->extralen)
- goto err;
-
- rv = qmi_wwan_bind_shared(dev, intf);
-err:
- return rv;
-}
-
-static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
-{
- struct usb_driver *subdriver = (void *)dev->data[0];
-
- if (subdriver && subdriver->disconnect)
- subdriver->disconnect(intf);
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ struct usb_driver *driver = driver_of(intf);
+ struct usb_interface *other;
+
+ if (info->subdriver && info->subdriver->disconnect)
+ info->subdriver->disconnect(info->control);
+
+ /* allow user to unbind using either control or data */
+ if (intf == info->control)
+ other = info->data;
+ else
+ other = info->control;
+
+ /* only if not shared */
+ if (other && intf != other) {
+ usb_set_intfdata(other, NULL);
+ usb_driver_release_interface(driver, other);
+ }
- dev->data[0] = (unsigned long)NULL;
+ info->subdriver = NULL;
+ info->data = NULL;
+ info->control = NULL;
}
/* suspend/resume wrappers calling both usbnet and the cdc-wdm
@@ -299,15 +308,15 @@ static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *int
static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct usb_driver *subdriver = (void *)dev->data[0];
+ struct qmi_wwan_state *info = (void *)&dev->data;
int ret;
ret = usbnet_suspend(intf, message);
if (ret < 0)
goto err;
- if (subdriver && subdriver->suspend)
- ret = subdriver->suspend(intf, message);
+ if (info->subdriver && info->subdriver->suspend)
+ ret = info->subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
err:
@@ -317,59 +326,77 @@ err:
static int qmi_wwan_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct usb_driver *subdriver = (void *)dev->data[0];
+ struct qmi_wwan_state *info = (void *)&dev->data;
int ret = 0;
- if (subdriver && subdriver->resume)
- ret = subdriver->resume(intf);
+ if (info->subdriver && info->subdriver->resume)
+ ret = info->subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend)
- subdriver->suspend(intf, PMSG_SUSPEND);
+ if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend)
+ info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
}
-
static const struct driver_info qmi_wwan_info = {
- .description = "QMI speaking wwan device",
+ .description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
};
static const struct driver_info qmi_wwan_shared = {
- .description = "QMI speaking wwan device with combined interface",
+ .description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
};
-static const struct driver_info qmi_wwan_gobi = {
- .description = "Qualcomm Gobi wwan/QMI device",
+static const struct driver_info qmi_wwan_force_int0 = {
+ .description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_gobi,
- .unbind = qmi_wwan_unbind_shared,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
+ .data = BIT(0), /* interface whitelist bitmap */
};
-/* ZTE suck at making USB descriptors */
static const struct driver_info qmi_wwan_force_int1 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(1), /* interface whitelist bitmap */
};
+static const struct driver_info qmi_wwan_force_int2 = {
+ .description = "Qualcomm WWAN/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
+ .manage_power = qmi_wwan_manage_power,
+ .data = BIT(2), /* interface whitelist bitmap */
+};
+
+static const struct driver_info qmi_wwan_force_int3 = {
+ .description = "Qualcomm WWAN/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
+ .manage_power = qmi_wwan_manage_power,
+ .data = BIT(3), /* interface whitelist bitmap */
+};
+
static const struct driver_info qmi_wwan_force_int4 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(4), /* interface whitelist bitmap */
};
@@ -390,16 +417,23 @@ static const struct driver_info qmi_wwan_force_int4 = {
static const struct driver_info qmi_wwan_sierra = {
.description = "Sierra Wireless wwan/QMI device",
.flags = FLAG_WWAN,
- .bind = qmi_wwan_bind_gobi,
- .unbind = qmi_wwan_unbind_shared,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(8) | BIT(19), /* interface whitelist bitmap */
};
#define HUAWEI_VENDOR_ID 0x12D1
+
+/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
+#define QMI_GOBI1K_DEVICE(vend, prod) \
+ USB_DEVICE(vend, prod), \
+ .driver_info = (unsigned long)&qmi_wwan_force_int3
+
+/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */
#define QMI_GOBI_DEVICE(vend, prod) \
USB_DEVICE(vend, prod), \
- .driver_info = (unsigned long)&qmi_wwan_gobi
+ .driver_info = (unsigned long)&qmi_wwan_force_int0
static const struct usb_device_id products[] = {
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -407,7 +441,7 @@ static const struct usb_device_id products[] = {
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
+ .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
@@ -415,7 +449,7 @@ static const struct usb_device_id products[] = {
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */
+ .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Huawei E392, E398 and possibly others in "Windows mode"
@@ -447,6 +481,15 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
+ { /* ZTE MF821D */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x0326,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int4,
+ },
{ /* ZTE (Vodafone) K3520-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
@@ -501,6 +544,15 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
+ { /* ZTE MF60 */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x1402,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int2,
+ },
{ /* Sierra Wireless MC77xx in QMI mode */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x1199,
@@ -510,20 +562,24 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_sierra,
},
- {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
- {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
- {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
- {QMI_GOBI_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
- {QMI_GOBI_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
- {QMI_GOBI_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
- {QMI_GOBI_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
- {QMI_GOBI_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
+
+ /* Gobi 1000 devices */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
+ {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
+ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */
+ {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */
+
+ /* Gobi 2000 and 3000 devices */
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
@@ -547,14 +603,33 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{ } /* END */
};
MODULE_DEVICE_TABLE(usb, products);
+static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+{
+ struct usb_device_id *id = (struct usb_device_id *)prod;
+
+ /* Workaround to enable dynamic IDs. This disables usbnet
+ * blacklisting functionality. Which, if required, can be
+ * reimplemented here by using a magic "blacklist" value
+ * instead of 0 in the static device id table
+ */
+ if (!id->driver_info) {
+ dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
+ id->driver_info = (unsigned long)&qmi_wwan_shared;
+ }
+
+ return usbnet_probe(intf, id);
+}
+
static struct usb_driver qmi_wwan_driver = {
.name = "qmi_wwan",
.id_table = products,
- .probe = usbnet_probe,
+ .probe = qmi_wwan_probe,
.disconnect = usbnet_disconnect,
.suspend = qmi_wwan_suspend,
.resume = qmi_wwan_resume,
@@ -563,17 +638,7 @@ static struct usb_driver qmi_wwan_driver = {
.disable_hub_initiated_lpm = 1,
};
-static int __init qmi_wwan_init(void)
-{
- return usb_register(&qmi_wwan_driver);
-}
-module_init(qmi_wwan_init);
-
-static void __exit qmi_wwan_exit(void)
-{
- usb_deregister(&qmi_wwan_driver);
-}
-module_exit(qmi_wwan_exit);
+module_usb_driver(qmi_wwan_driver);
MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 3faef56..d75d1f5 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
}
static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
-static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
+static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
.rx_urb_size = 8 * 1024,
.whitelist = {
.infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
}
};
-static const struct driver_info sierra_net_info_68A3 = {
+static const struct driver_info sierra_net_info_direct_ip = {
.description = "Sierra Wireless USB-to-WWAN Modem",
.flags = FLAG_WWAN | FLAG_SEND_ZLP,
.bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
.status = sierra_net_status,
.rx_fixup = sierra_net_rx_fixup,
.tx_fixup = sierra_net_tx_fixup,
- .data = (unsigned long)&sierra_net_info_data_68A3,
+ .data = (unsigned long)&sierra_net_info_data_direct_ip,
};
static const struct usb_device_id products[] = {
{USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
- .driver_info = (unsigned long) &sierra_net_info_68A3},
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
+ {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
+ .driver_info = (unsigned long) &sierra_net_info_direct_ip},
{}, /* last item */
};
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 75bbfdb..f5ab6e6 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -616,7 +616,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
/* no eeprom, or eeprom values are invalid. generate random MAC */
eth_hw_addr_random(dev->net);
- netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr");
+ netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr");
}
static int smsc75xx_set_mac_address(struct usbnet *dev)
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 24c8f37..d45e539 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -578,6 +578,36 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
}
+static int smsc95xx_ethtool_getregslen(struct net_device *netdev)
+{
+ /* all smsc95xx registers */
+ return COE_CR - ID_REV + 1;
+}
+
+static void
+smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ unsigned int i, j;
+ int retval;
+ u32 *data = buf;
+
+ retval = smsc95xx_read_reg(dev, ID_REV, &regs->version);
+ if (retval < 0) {
+ netdev_warn(netdev, "REGS: cannot read ID_REV\n");
+ return;
+ }
+
+ for (i = ID_REV, j = 0; i <= COE_CR; i += (sizeof(u32)), j++) {
+ retval = smsc95xx_read_reg(dev, i, &data[j]);
+ if (retval < 0) {
+ netdev_warn(netdev, "REGS: cannot read reg[%x]\n", i);
+ return;
+ }
+ }
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
@@ -589,6 +619,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
.get_eeprom = smsc95xx_ethtool_get_eeprom,
.set_eeprom = smsc95xx_ethtool_set_eeprom,
+ .get_regs_len = smsc95xx_ethtool_getregslen,
+ .get_regs = smsc95xx_ethtool_getregs,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -615,7 +647,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
/* no eeprom, or eeprom values are invalid. generate random MAC */
eth_hw_addr_random(dev->net);
- netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr\n");
+ netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
static int smsc95xx_set_mac_address(struct usbnet *dev)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 9f58330..8531c1c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -180,7 +180,40 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
-static void intr_complete (struct urb *urb);
+static void intr_complete (struct urb *urb)
+{
+ struct usbnet *dev = urb->context;
+ int status = urb->status;
+
+ switch (status) {
+ /* success */
+ case 0:
+ dev->driver_info->status(dev, urb);
+ break;
+
+ /* software-driven interface shutdown */
+ case -ENOENT: /* urb killed */
+ case -ESHUTDOWN: /* hardware gone */
+ netif_dbg(dev, ifdown, dev->net,
+ "intr shutdown, code %d\n", status);
+ return;
+
+ /* NOTE: not throttling like RX/TX, since this endpoint
+ * already polls infrequently
+ */
+ default:
+ netdev_dbg(dev->net, "intr status %d\n", status);
+ break;
+ }
+
+ if (!netif_running (dev->net))
+ return;
+
+ status = usb_submit_urb (urb, GFP_ATOMIC);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
+ "intr resubmit --> %d\n", status);
+}
static int init_status (struct usbnet *dev, struct usb_interface *intf)
{
@@ -519,42 +552,6 @@ block:
netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
-static void intr_complete (struct urb *urb)
-{
- struct usbnet *dev = urb->context;
- int status = urb->status;
-
- switch (status) {
- /* success */
- case 0:
- dev->driver_info->status(dev, urb);
- break;
-
- /* software-driven interface shutdown */
- case -ENOENT: /* urb killed */
- case -ESHUTDOWN: /* hardware gone */
- netif_dbg(dev, ifdown, dev->net,
- "intr shutdown, code %d\n", status);
- return;
-
- /* NOTE: not throttling like RX/TX, since this endpoint
- * already polls infrequently
- */
- default:
- netdev_dbg(dev->net, "intr status %d\n", status);
- break;
- }
-
- if (!netif_running (dev->net))
- return;
-
- memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status != 0)
- netif_err(dev, timer, dev->net,
- "intr resubmit --> %d\n", status);
-}
-
/*-------------------------------------------------------------------------*/
void usbnet_pause_rx(struct usbnet *dev)
{
@@ -796,11 +793,13 @@ int usbnet_open (struct net_device *net)
if (info->manage_power) {
retval = info->manage_power(dev, 1);
if (retval < 0)
- goto done;
+ goto done_manage_power_error;
usb_autopm_put_interface(dev->intf);
}
return retval;
+done_manage_power_error:
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
done:
usb_autopm_put_interface(dev->intf);
done_nopm:
@@ -876,9 +875,9 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
{
struct usbnet *dev = netdev_priv(net);
- strncpy (info->driver, dev->driver_name, sizeof info->driver);
- strncpy (info->version, DRIVER_VERSION, sizeof info->version);
- strncpy (info->fw_version, dev->driver_info->description,
+ strlcpy (info->driver, dev->driver_name, sizeof info->driver);
+ strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
+ strlcpy (info->fw_version, dev->driver_info->description,
sizeof info->fw_version);
usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
}
@@ -1202,6 +1201,21 @@ deferred:
}
EXPORT_SYMBOL_GPL(usbnet_start_xmit);
+static void rx_alloc_submit(struct usbnet *dev, gfp_t flags)
+{
+ struct urb *urb;
+ int i;
+
+ /* don't refill the queue all at once */
+ for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) {
+ urb = usb_alloc_urb(0, flags);
+ if (urb != NULL) {
+ if (rx_submit(dev, urb, flags) == -ENOLINK)
+ return;
+ }
+ }
+}
+
/*-------------------------------------------------------------------------*/
// tasklet (work deferred from completions, in_irq) or timer
@@ -1241,26 +1255,14 @@ static void usbnet_bh (unsigned long param)
!timer_pending (&dev->delay) &&
!test_bit (EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
- int qlen = RX_QLEN (dev);
-
- if (temp < qlen) {
- struct urb *urb;
- int i;
-
- // don't refill the queue all at once
- for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
- urb = usb_alloc_urb (0, GFP_ATOMIC);
- if (urb != NULL) {
- if (rx_submit (dev, urb, GFP_ATOMIC) ==
- -ENOLINK)
- return;
- }
- }
+
+ if (temp < RX_QLEN(dev)) {
+ rx_alloc_submit(dev, GFP_ATOMIC);
if (temp != dev->rxq.qlen)
netif_dbg(dev, link, dev->net,
"rxqlen %d --> %d\n",
temp, dev->rxq.qlen);
- if (dev->rxq.qlen < qlen)
+ if (dev->rxq.qlen < RX_QLEN(dev))
tasklet_schedule (&dev->bh);
}
if (dev->txq.qlen < TX_QLEN (dev))
@@ -1307,7 +1309,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
free_netdev(net);
- usb_put_dev (xdev);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1363,8 +1364,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
xdev = interface_to_usbdev (udev);
interface = udev->cur_altsetting;
- usb_get_dev (xdev);
-
status = -ENOMEM;
// set up our own records
@@ -1493,7 +1492,6 @@ out3:
out1:
free_netdev(net);
out:
- usb_put_dev(xdev);
return status;
}
EXPORT_SYMBOL_GPL(usbnet_probe);
@@ -1513,6 +1511,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message)
spin_lock_irq(&dev->txq.lock);
/* don't autosuspend while transmitting */
if (dev->txq.qlen && PMSG_IS_AUTO(message)) {
+ dev->suspend_count--;
spin_unlock_irq(&dev->txq.lock);
return -EBUSY;
} else {
@@ -1569,6 +1568,13 @@ int usbnet_resume (struct usb_interface *intf)
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+ /* handle remote wakeup ASAP */
+ if (!dev->wait &&
+ netif_device_present(dev->net) &&
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags))
+ rx_alloc_submit(dev, GFP_KERNEL);
+
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
tasklet_schedule (&dev->bh);
@@ -1587,7 +1593,7 @@ static int __init usbnet_init(void)
BUILD_BUG_ON(
FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
- random_ether_addr(node_id);
+ eth_random_addr(node_id);
return 0;
}
module_init(usbnet_init);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ce6995..83d2b0c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
- struct u64_stats_sync syncp;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
u64 tx_bytes;
u64 tx_packets;
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
hdr = skb_vnet_hdr(skb);
- u64_stats_update_begin(&stats->syncp);
+ u64_stats_update_begin(&stats->rx_syncp);
stats->rx_bytes += skb->len;
stats->rx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_end(&stats->rx_syncp);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- u64_stats_update_begin(&stats->syncp);
+ u64_stats_update_begin(&stats->tx_syncp);
stats->tx_bytes += skb->len;
stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_end(&stats->tx_syncp);
tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
u64 tpackets, tbytes, rpackets, rbytes;
do {
- start = u64_stats_fetch_begin(&stats->syncp);
+ start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
tpackets = stats->tx_packets;
tbytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
rpackets = stats->rx_packets;
rbytes = stats->rx_bytes;
- } while (u64_stats_fetch_retry(&stats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
@@ -1057,7 +1062,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Set up network device as normal. */
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
@@ -1231,11 +1236,6 @@ static int virtnet_freeze(struct virtio_device *vdev)
vi->config_enable = false;
mutex_unlock(&vi->config_lock);
- virtqueue_disable_cb(vi->rvq);
- virtqueue_disable_cb(vi->svq);
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
- virtqueue_disable_cb(vi->cvq);
-
netif_device_detach(vi->dev);
cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f04ba0..93e0cfb 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1037,7 +1037,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
#endif
dev_dbg(&adapter->netdev->dev,
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
- (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
+ (u32)(ctx.sop_txd -
tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d7a65e1..44db8b7 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -231,7 +231,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
}
p = icp;
- count = x25_asy_esc(p, (unsigned char *) sl->xbuff, len);
+ count = x25_asy_esc(p, sl->xbuff, len);
/* Order of next two lines is *very* important.
* When we are sending a little amount of data,
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index 672de18..71453db 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -7,9 +7,6 @@ config WIMAX_I2400M
comment "Enable USB support to see WiMAX USB drivers"
depends on USB = n
-comment "Enable MMC support to see WiMAX SDIO drivers"
- depends on MMC = n
-
config WIMAX_I2400M_USB
tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)"
depends on WIMAX && USB
@@ -21,25 +18,6 @@ config WIMAX_I2400M_USB
If unsure, it is safe to select M (module).
-config WIMAX_I2400M_SDIO
- tristate "Intel Wireless WiMAX Connection 2400 over SDIO"
- depends on WIMAX && MMC
- select WIMAX_I2400M
- help
- Select if you have a device based on the Intel WiMAX
- Connection 2400 over SDIO.
-
- If unsure, it is safe to select M (module).
-
-config WIMAX_IWMC3200_SDIO
- bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
- depends on WIMAX_I2400M_SDIO
- depends on EXPERIMENTAL
- select IWMC3200TOP
- help
- Select if you have a device based on the Intel Multicom WiMAX
- Connection 3200 over SDIO.
-
config WIMAX_I2400M_DEBUG_LEVEL
int "WiMAX i2400m debug level"
depends on WIMAX_I2400M
diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/net/wimax/i2400m/Makefile
index 5d9e018..f6d19c3 100644
--- a/drivers/net/wimax/i2400m/Makefile
+++ b/drivers/net/wimax/i2400m/Makefile
@@ -1,7 +1,6 @@
obj-$(CONFIG_WIMAX_I2400M) += i2400m.o
obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o
-obj-$(CONFIG_WIMAX_I2400M_SDIO) += i2400m-sdio.o
i2400m-y := \
control.o \
@@ -21,10 +20,3 @@ i2400m-usb-y := \
usb-tx.o \
usb-rx.o \
usb.o
-
-
-i2400m-sdio-y := \
- sdio.o \
- sdio-tx.o \
- sdio-fw.o \
- sdio-rx.o
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 2fea02b..4a01e5c 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -130,7 +130,7 @@ ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv,
&& le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) {
size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv);
printk(KERN_WARNING "W: tlv type 0x%x mismatched because of "
- "size (got %zu vs %zu expected)\n",
+ "size (got %zu vs %zd expected)\n",
tlv_type, size, tlv_size);
return size;
}
@@ -235,7 +235,7 @@ const struct i2400m_tlv_hdr *i2400m_tlv_find(
break;
if (match > 0)
dev_warn(dev, "TLV type 0x%04x found with size "
- "mismatch (%zu vs %zu needed)\n",
+ "mismatch (%zu vs %zd needed)\n",
tlv_type, match, tlv_size);
}
return tlv;
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 47cae71..0254261 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -754,8 +754,7 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery);
/*
* Alloc the command and ack buffers for boot mode
*
- * Get the buffers needed to deal with boot mode messages. These
- * buffers need to be allocated before the sdio receive irq is setup.
+ * Get the buffers needed to deal with boot mode messages.
*/
static
int i2400m_bm_buf_alloc(struct i2400m *i2400m)
@@ -897,7 +896,7 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
result = i2400m_read_mac_addr(i2400m);
if (result < 0)
goto error_read_mac_addr;
- random_ether_addr(i2400m->src_mac_addr);
+ eth_random_addr(i2400m->src_mac_addr);
i2400m->pm_notifier.notifier_call = i2400m_pm_notifier;
register_pm_notifier(&i2400m->pm_notifier);
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 7cbd7d2..283237f 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -51,8 +51,7 @@
* firmware. Normal hardware takes only signed firmware.
*
* On boot mode, in USB, we write to the device using the bulk out
- * endpoint and read from it in the notification endpoint. In SDIO we
- * talk to it via the write address and read from the read address.
+ * endpoint and read from it in the notification endpoint.
*
* Upon entrance to boot mode, the device sends (preceded with a few
* zero length packets (ZLPs) on the notification endpoint in USB) a
@@ -1268,7 +1267,7 @@ int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
size_t leftover, offset, header_len, size;
leftover = top - itr;
- offset = itr - (const void *) bcf;
+ offset = itr - bcf;
if (leftover <= sizeof(*bcf_hdr)) {
dev_err(dev, "firmware %s: %zu B left at @%zx, "
"not enough for BCF header\n",
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
deleted file mode 100644
index 1d63ffd..0000000
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * SDIO-specific i2400m driver definitions
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Brian Bian <brian.bian@intel.com>
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * Yanir Lubetkin <yanirx.lubetkin@intel.com>
- * - Initial implementation
- *
- *
- * This driver implements the bus-specific part of the i2400m for
- * SDIO. Check i2400m.h for a generic driver description.
- *
- * ARCHITECTURE
- *
- * This driver sits under the bus-generic i2400m driver, providing the
- * connection to the device.
- *
- * When probed, all the function pointers are setup and then the
- * bus-generic code called. The generic driver will then use the
- * provided pointers for uploading firmware (i2400ms_bus_bm*() in
- * sdio-fw.c) and then setting up the device (i2400ms_dev_*() in
- * sdio.c).
- *
- * Once firmware is uploaded, TX functions (sdio-tx.c) are called when
- * data is ready for transmission in the TX fifo; then the SDIO IRQ is
- * fired and data is available (sdio-rx.c), it is sent to the generic
- * driver for processing with i2400m_rx.
- */
-
-#ifndef __I2400M_SDIO_H__
-#define __I2400M_SDIO_H__
-
-#include "i2400m.h"
-
-/* Host-Device interface for SDIO */
-enum {
- I2400M_SDIO_BOOT_RETRIES = 3,
- I2400MS_BLK_SIZE = 256,
- I2400MS_PL_SIZE_MAX = 0x3E00,
-
- I2400MS_DATA_ADDR = 0x0,
- I2400MS_INTR_STATUS_ADDR = 0x13,
- I2400MS_INTR_CLEAR_ADDR = 0x13,
- I2400MS_INTR_ENABLE_ADDR = 0x14,
- I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
- /* The number of ticks to wait for the device to signal that
- * it is ready */
- I2400MS_INIT_SLEEP_INTERVAL = 100,
- /* How long to wait for the device to settle after reset */
- I2400MS_SETTLE_TIME = 40,
- /* The number of msec to wait for IOR after sending IOE */
- IWMC3200_IOR_TIMEOUT = 10,
-};
-
-
-/**
- * struct i2400ms - descriptor for a SDIO connected i2400m
- *
- * @i2400m: bus-generic i2400m implementation; has to be first (see
- * it's documentation in i2400m.h).
- *
- * @func: pointer to our SDIO function
- *
- * @tx_worker: workqueue struct used to TX data when the bus-generic
- * code signals packets are pending for transmission to the device.
- *
- * @tx_workqueue: workqeueue used for data TX; we don't use the
- * system's workqueue as that might cause deadlocks with code in
- * the bus-generic driver. The read/write operation to the queue
- * is protected with spinlock (tx_lock in struct i2400m) to avoid
- * the queue being destroyed in the middle of a the queue read/write
- * operation.
- *
- * @debugfs_dentry: dentry for the SDIO specific debugfs files
- *
- * Note this value is set to NULL upon destruction; this is
- * because some routinges use it to determine if we are inside the
- * probe() path or some other path. When debugfs is disabled,
- * creation sets the dentry to '(void*) -ENODEV', which is valid
- * for the test.
- */
-struct i2400ms {
- struct i2400m i2400m; /* FIRST! See doc */
- struct sdio_func *func;
-
- struct work_struct tx_worker;
- struct workqueue_struct *tx_workqueue;
- char tx_wq_name[32];
-
- struct dentry *debugfs_dentry;
-
- wait_queue_head_t bm_wfa_wq;
- int bm_wait_result;
- size_t bm_ack_size;
-
- /* Device is any of the iwmc3200 SKUs */
- unsigned iwmc3200:1;
-};
-
-
-static inline
-void i2400ms_init(struct i2400ms *i2400ms)
-{
- i2400m_init(&i2400ms->i2400m);
-}
-
-
-extern int i2400ms_rx_setup(struct i2400ms *);
-extern void i2400ms_rx_release(struct i2400ms *);
-
-extern int i2400ms_tx_setup(struct i2400ms *);
-extern void i2400ms_tx_release(struct i2400ms *);
-extern void i2400ms_bus_tx_kick(struct i2400m *);
-
-extern ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *,
- const struct i2400m_bootrom_header *,
- size_t, int);
-extern ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *,
- struct i2400m_bootrom_header *,
- size_t);
-extern void i2400ms_bus_bm_release(struct i2400m *);
-extern int i2400ms_bus_bm_setup(struct i2400m *);
-
-#endif /* #ifndef __I2400M_SDIO_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index c806d45..79c6505 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -46,7 +46,7 @@
* - bus generic driver (this part)
*
* The bus specific driver sets up stuff specific to the bus the
- * device is connected to (USB, SDIO, PCI, tam-tam...non-authoritative
+ * device is connected to (USB, PCI, tam-tam...non-authoritative
* nor binding list) which is basically the device-model management
* (probe/disconnect, etc), moving data from device to kernel and
* back, doing the power saving details and reseting the device.
@@ -238,14 +238,13 @@ struct i2400m_barker_db;
* amount needed for loading firmware, where us dev_start/stop setup
* the rest needed to do full data/control traffic.
*
- * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16,
- * so we have a tx_blk_size variable that the bus layer sets to
- * tell the engine how much of that we need.
+ * @bus_tx_block_size: [fill] USB imposes a 16 block size, but other
+ * busses will differ. So we have a tx_blk_size variable that the
+ * bus layer sets to tell the engine how much of that we need.
*
* @bus_tx_room_min: [fill] Minimum room required while allocating
- * TX queue's buffer space for message header. SDIO requires
- * 224 bytes and USB 16 bytes. Refer bus specific driver code
- * for details.
+ * TX queue's buffer space for message header. USB requires
+ * 16 bytes. Refer to bus specific driver code for details.
*
* @bus_pl_size_max: [fill] Maximum payload size.
*
diff --git a/drivers/net/wimax/i2400m/sdio-debug-levels.h b/drivers/net/wimax/i2400m/sdio-debug-levels.h
deleted file mode 100644
index c519987..0000000
--- a/drivers/net/wimax/i2400m/sdio-debug-levels.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * debug levels control file for the i2400m module's
- */
-#ifndef __debug_levels__h__
-#define __debug_levels__h__
-
-/* Maximum compile and run time debug level for all submodules */
-#define D_MODULENAME i2400m_sdio
-#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
-
-#include <linux/wimax/debug.h>
-
-/* List of all the enabled modules */
-enum d_module {
- D_SUBMODULE_DECLARE(main),
- D_SUBMODULE_DECLARE(tx),
- D_SUBMODULE_DECLARE(rx),
- D_SUBMODULE_DECLARE(fw)
-};
-
-
-#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
deleted file mode 100644
index 8e02541..0000000
--- a/drivers/net/wimax/i2400m/sdio-fw.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * Firmware uploader's SDIO specifics
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Yanir Lubetkin <yanirx.lubetkin@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - Initial implementation
- *
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - Bus generic/specific split for USB
- *
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * - Initial implementation for SDIO
- *
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - SDIO rehash for changes in the bus-driver model
- *
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * - Make it IRQ based, not polling
- *
- * THE PROCEDURE
- *
- * See fw.c for the generic description of this procedure.
- *
- * This file implements only the SDIO specifics. It boils down to how
- * to send a command and waiting for an acknowledgement from the
- * device.
- *
- * All this code is sequential -- all i2400ms_bus_bm_*() functions are
- * executed in the same thread, except i2400ms_bm_irq() [on its own by
- * the SDIO driver]. This makes it possible to avoid locking.
- *
- * COMMAND EXECUTION
- *
- * The generic firmware upload code will call i2400m_bus_bm_cmd_send()
- * to send commands.
- *
- * The SDIO devices expects things in 256 byte blocks, so it will pad
- * it, compute the checksum (if needed) and pass it to SDIO.
- *
- * ACK RECEPTION
- *
- * This works in IRQ mode -- the fw loader says when to wait for data
- * and for that it calls i2400ms_bus_bm_wait_for_ack().
- *
- * This checks if there is any data available (RX size > 0); if not,
- * waits for the IRQ handler to notify about it. Once there is data,
- * it is read and passed to the caller. Doing it this way we don't
- * need much coordination/locking, and it makes it much more difficult
- * for an interrupt to be lost and the wait_for_ack() function getting
- * stuck even when data is pending.
- */
-#include <linux/mmc/sdio_func.h>
-#include "i2400m-sdio.h"
-
-
-#define D_SUBMODULE fw
-#include "sdio-debug-levels.h"
-
-
-/*
- * Send a boot-mode command to the SDIO function
- *
- * We use a bounce buffer (i2400m->bm_cmd_buf) because we need to
- * touch the header if the RAW flag is not set.
- *
- * @flags: pass thru from i2400m_bm_cmd()
- * @return: cmd_size if ok, < 0 errno code on error.
- *
- * Note the command is padded to the SDIO block size for the device.
- */
-ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
- const struct i2400m_bootrom_header *_cmd,
- size_t cmd_size, int flags)
-{
- ssize_t result;
- struct device *dev = i2400m_dev(i2400m);
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
- struct i2400m_bootrom_header *cmd;
- /* SDIO restriction */
- size_t cmd_size_a = ALIGN(cmd_size, I2400MS_BLK_SIZE);
-
- d_fnstart(5, dev, "(i2400m %p cmd %p size %zu)\n",
- i2400m, _cmd, cmd_size);
- result = -E2BIG;
- if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
- goto error_too_big;
-
- if (_cmd != i2400m->bm_cmd_buf)
- memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
- cmd = i2400m->bm_cmd_buf;
- if (cmd_size_a > cmd_size) /* Zero pad space */
- memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
- if ((flags & I2400M_BM_CMD_RAW) == 0) {
- if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
- dev_warn(dev, "SW BUG: response_required == 0\n");
- i2400m_bm_cmd_prepare(cmd);
- }
- d_printf(4, dev, "BM cmd %d: %zu bytes (%zu padded)\n",
- opcode, cmd_size, cmd_size_a);
- d_dump(5, dev, cmd, cmd_size);
-
- sdio_claim_host(i2400ms->func); /* Send & check */
- result = sdio_memcpy_toio(i2400ms->func, I2400MS_DATA_ADDR,
- i2400m->bm_cmd_buf, cmd_size_a);
- sdio_release_host(i2400ms->func);
- if (result < 0) {
- dev_err(dev, "BM cmd %d: cannot send: %ld\n",
- opcode, (long) result);
- goto error_cmd_send;
- }
- result = cmd_size;
-error_cmd_send:
-error_too_big:
- d_fnend(5, dev, "(i2400m %p cmd %p size %zu) = %d\n",
- i2400m, _cmd, cmd_size, (int) result);
- return result;
-}
-
-
-/*
- * Read an ack from the device's boot-mode
- *
- * @i2400m:
- * @_ack: pointer to where to store the read data
- * @ack_size: how many bytes we should read
- *
- * Returns: < 0 errno code on error; otherwise, amount of received bytes.
- *
- * The ACK for a BM command is always at least sizeof(*ack) bytes, so
- * check for that. We don't need to check for device reboots
- *
- */
-ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
- struct i2400m_bootrom_header *ack,
- size_t ack_size)
-{
- ssize_t result;
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- int size;
-
- BUG_ON(sizeof(*ack) > ack_size);
-
- d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
- i2400m, ack, ack_size);
-
- result = wait_event_timeout(i2400ms->bm_wfa_wq,
- i2400ms->bm_ack_size != -EINPROGRESS,
- 2 * HZ);
- if (result == 0) {
- result = -ETIMEDOUT;
- dev_err(dev, "BM: error waiting for an ack\n");
- goto error_timeout;
- }
-
- spin_lock(&i2400m->rx_lock);
- result = i2400ms->bm_ack_size;
- BUG_ON(result == -EINPROGRESS);
- if (result < 0) /* so we exit when rx_release() is called */
- dev_err(dev, "BM: %s failed: %zd\n", __func__, result);
- else {
- size = min(ack_size, i2400ms->bm_ack_size);
- memcpy(ack, i2400m->bm_ack_buf, size);
- }
- /*
- * Remember always to clear the bm_ack_size to -EINPROGRESS
- * after the RX data is processed
- */
- i2400ms->bm_ack_size = -EINPROGRESS;
- spin_unlock(&i2400m->rx_lock);
-
-error_timeout:
- d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %zd\n",
- i2400m, ack, ack_size, result);
- return result;
-}
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
deleted file mode 100644
index fb6396d..0000000
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * SDIO RX handling
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * - Initial implementation
- *
- *
- * This handles the RX path on SDIO.
- *
- * The SDIO bus driver calls the "irq" routine when data is available.
- * This is not a traditional interrupt routine since the SDIO bus
- * driver calls us from its irq thread context. Because of this
- * sleeping in the SDIO RX IRQ routine is okay.
- *
- * From there on, we obtain the size of the data that is available,
- * allocate an skb, copy it and then pass it to the generic driver's
- * RX routine [i2400m_rx()].
- *
- * ROADMAP
- *
- * i2400ms_irq()
- * i2400ms_rx()
- * __i2400ms_rx_get_size()
- * i2400m_is_boot_barker()
- * i2400m_rx()
- *
- * i2400ms_rx_setup()
- *
- * i2400ms_rx_release()
- */
-#include <linux/workqueue.h>
-#include <linux/wait.h>
-#include <linux/skbuff.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/slab.h>
-#include "i2400m-sdio.h"
-
-#define D_SUBMODULE rx
-#include "sdio-debug-levels.h"
-
-static const __le32 i2400m_ACK_BARKER[4] = {
- __constant_cpu_to_le32(I2400M_ACK_BARKER),
- __constant_cpu_to_le32(I2400M_ACK_BARKER),
- __constant_cpu_to_le32(I2400M_ACK_BARKER),
- __constant_cpu_to_le32(I2400M_ACK_BARKER)
-};
-
-
-/*
- * Read and return the amount of bytes available for RX
- *
- * The RX size has to be read like this: byte reads of three
- * sequential locations; then glue'em together.
- *
- * sdio_readl() doesn't work.
- */
-static ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
-{
- int ret, cnt, val;
- ssize_t rx_size;
- unsigned xfer_size_addr;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &i2400ms->func->dev;
-
- d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
- xfer_size_addr = I2400MS_INTR_GET_SIZE_ADDR;
- rx_size = 0;
- for (cnt = 0; cnt < 3; cnt++) {
- val = sdio_readb(func, xfer_size_addr + cnt, &ret);
- if (ret < 0) {
- dev_err(dev, "RX: Can't read byte %d of RX size from "
- "0x%08x: %d\n", cnt, xfer_size_addr + cnt, ret);
- rx_size = ret;
- goto error_read;
- }
- rx_size = rx_size << 8 | (val & 0xff);
- }
- d_printf(6, dev, "RX: rx_size is %ld\n", (long) rx_size);
-error_read:
- d_fnend(7, dev, "(i2400ms %p) = %ld\n", i2400ms, (long) rx_size);
- return rx_size;
-}
-
-
-/*
- * Read data from the device (when in normal)
- *
- * Allocate an SKB of the right size, read the data in and then
- * deliver it to the generic layer.
- *
- * We also check for a reboot barker. That means the device died and
- * we have to reboot it.
- */
-static
-void i2400ms_rx(struct i2400ms *i2400ms)
-{
- int ret;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct sk_buff *skb;
- ssize_t rx_size;
-
- d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
- rx_size = __i2400ms_rx_get_size(i2400ms);
- if (rx_size < 0) {
- ret = rx_size;
- goto error_get_size;
- }
- /*
- * Hardware quirk: make sure to clear the INTR status register
- * AFTER getting the data transfer size.
- */
- sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
-
- ret = -ENOMEM;
- skb = alloc_skb(rx_size, GFP_ATOMIC);
- if (NULL == skb) {
- dev_err(dev, "RX: unable to alloc skb\n");
- goto error_alloc_skb;
- }
- ret = sdio_memcpy_fromio(func, skb->data,
- I2400MS_DATA_ADDR, rx_size);
- if (ret < 0) {
- dev_err(dev, "RX: SDIO data read failed: %d\n", ret);
- goto error_memcpy_fromio;
- }
-
- rmb(); /* make sure we get boot_mode from dev_reset_handle */
- if (unlikely(i2400m->boot_mode == 1)) {
- spin_lock(&i2400m->rx_lock);
- i2400ms->bm_ack_size = rx_size;
- spin_unlock(&i2400m->rx_lock);
- memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
- wake_up(&i2400ms->bm_wfa_wq);
- d_printf(5, dev, "RX: SDIO boot mode message\n");
- kfree_skb(skb);
- goto out;
- }
- ret = -EIO;
- if (unlikely(rx_size < sizeof(__le32))) {
- dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size);
- goto error_bad_size;
- }
- if (likely(i2400m_is_d2h_barker(skb->data))) {
- skb_put(skb, rx_size);
- i2400m_rx(i2400m, skb);
- } else if (unlikely(i2400m_is_boot_barker(i2400m,
- skb->data, rx_size))) {
- ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
- dev_err(dev, "RX: SDIO reboot barker\n");
- kfree_skb(skb);
- } else {
- i2400m_unknown_barker(i2400m, skb->data, rx_size);
- kfree_skb(skb);
- }
-out:
- d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
- return;
-
-error_memcpy_fromio:
- kfree_skb(skb);
-error_alloc_skb:
-error_get_size:
-error_bad_size:
- d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
-}
-
-
-/*
- * Process an interrupt from the SDIO card
- *
- * FIXME: need to process other events that are not just ready-to-read
- *
- * Checks there is data ready and then proceeds to read it.
- */
-static
-void i2400ms_irq(struct sdio_func *func)
-{
- int ret;
- struct i2400ms *i2400ms = sdio_get_drvdata(func);
- struct device *dev = &func->dev;
- int val;
-
- d_fnstart(6, dev, "(i2400ms %p)\n", i2400ms);
- val = sdio_readb(func, I2400MS_INTR_STATUS_ADDR, &ret);
- if (ret < 0) {
- dev_err(dev, "RX: Can't read interrupt status: %d\n", ret);
- goto error_no_irq;
- }
- if (!val) {
- dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
- goto error_no_irq;
- }
- i2400ms_rx(i2400ms);
-error_no_irq:
- d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
-}
-
-
-/*
- * Setup SDIO RX
- *
- * Hooks up the IRQ handler and then enables IRQs.
- */
-int i2400ms_rx_setup(struct i2400ms *i2400ms)
-{
- int result;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- struct i2400m *i2400m = &i2400ms->i2400m;
-
- d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
-
- init_waitqueue_head(&i2400ms->bm_wfa_wq);
- spin_lock(&i2400m->rx_lock);
- i2400ms->bm_wait_result = -EINPROGRESS;
- /*
- * Before we are about to enable the RX interrupt, make sure
- * bm_ack_size is cleared to -EINPROGRESS which indicates
- * no RX interrupt happened yet or the previous interrupt
- * has been handled, we are ready to take the new interrupt
- */
- i2400ms->bm_ack_size = -EINPROGRESS;
- spin_unlock(&i2400m->rx_lock);
-
- sdio_claim_host(func);
- result = sdio_claim_irq(func, i2400ms_irq);
- if (result < 0) {
- dev_err(dev, "Cannot claim IRQ: %d\n", result);
- goto error_irq_claim;
- }
- result = 0;
- sdio_writeb(func, 1, I2400MS_INTR_ENABLE_ADDR, &result);
- if (result < 0) {
- sdio_release_irq(func);
- dev_err(dev, "Failed to enable interrupts %d\n", result);
- }
-error_irq_claim:
- sdio_release_host(func);
- d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
- return result;
-}
-
-
-/*
- * Tear down SDIO RX
- *
- * Disables IRQs in the device and removes the IRQ handler.
- */
-void i2400ms_rx_release(struct i2400ms *i2400ms)
-{
- int result;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- struct i2400m *i2400m = &i2400ms->i2400m;
-
- d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
- spin_lock(&i2400m->rx_lock);
- i2400ms->bm_ack_size = -EINTR;
- spin_unlock(&i2400m->rx_lock);
- wake_up_all(&i2400ms->bm_wfa_wq);
- sdio_claim_host(func);
- sdio_writeb(func, 0, I2400MS_INTR_ENABLE_ADDR, &result);
- sdio_release_irq(func);
- sdio_release_host(func);
- d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
-}
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
deleted file mode 100644
index b53cd1c..0000000
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * SDIO TX transaction backends
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * - Initial implementation
- *
- *
- * Takes the TX messages in the i2400m's driver TX FIFO and sends them
- * to the device until there are no more.
- *
- * If we fail sending the message, we just drop it. There isn't much
- * we can do at this point. Most of the traffic is network, which has
- * recovery methods for dropped packets.
- *
- * The SDIO functions are not atomic, so we can't run from the context
- * where i2400m->bus_tx_kick() [i2400ms_bus_tx_kick()] is being called
- * (some times atomic). Thus, the actual TX work is deferred to a
- * workqueue.
- *
- * ROADMAP
- *
- * i2400ms_bus_tx_kick()
- * i2400ms_tx_submit() [through workqueue]
- *
- * i2400m_tx_setup()
- *
- * i2400m_tx_release()
- */
-#include <linux/mmc/sdio_func.h>
-#include "i2400m-sdio.h"
-
-#define D_SUBMODULE tx
-#include "sdio-debug-levels.h"
-
-
-/*
- * Pull TX transations from the TX FIFO and send them to the device
- * until there are no more.
- */
-static
-void i2400ms_tx_submit(struct work_struct *ws)
-{
- int result;
- struct i2400ms *i2400ms = container_of(ws, struct i2400ms, tx_worker);
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- struct i2400m_msg_hdr *tx_msg;
- size_t tx_msg_size;
-
- d_fnstart(4, dev, "(i2400ms %p, i2400m %p)\n", i2400ms, i2400ms);
-
- while (NULL != (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) {
- d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
- d_dump(5, dev, tx_msg, tx_msg_size);
-
- sdio_claim_host(func);
- result = sdio_memcpy_toio(func, 0, tx_msg, tx_msg_size);
- sdio_release_host(func);
-
- i2400m_tx_msg_sent(i2400m);
-
- if (result < 0) {
- dev_err(dev, "TX: cannot submit TX; tx_msg @%zu %zu B:"
- " %d\n", (void *) tx_msg - i2400m->tx_buf,
- tx_msg_size, result);
- }
-
- if (result == -ETIMEDOUT) {
- i2400m_error_recovery(i2400m);
- break;
- }
- d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
- }
-
- d_fnend(4, dev, "(i2400ms %p) = void\n", i2400ms);
-}
-
-
-/*
- * The generic driver notifies us that there is data ready for TX
- *
- * Schedule a run of i2400ms_tx_submit() to handle it.
- */
-void i2400ms_bus_tx_kick(struct i2400m *i2400m)
-{
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- struct device *dev = &i2400ms->func->dev;
- unsigned long flags;
-
- d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
-
- /* schedule tx work, this is because tx may block, therefore
- * it has to run in a thread context.
- */
- spin_lock_irqsave(&i2400m->tx_lock, flags);
- if (i2400ms->tx_workqueue != NULL)
- queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
- spin_unlock_irqrestore(&i2400m->tx_lock, flags);
-
- d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
-}
-
-int i2400ms_tx_setup(struct i2400ms *i2400ms)
-{
- int result;
- struct device *dev = &i2400ms->func->dev;
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct workqueue_struct *tx_workqueue;
- unsigned long flags;
-
- d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
-
- INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
- snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
- "%s-tx", i2400m->wimax_dev.name);
- tx_workqueue =
- create_singlethread_workqueue(i2400ms->tx_wq_name);
- if (tx_workqueue == NULL) {
- dev_err(dev, "TX: failed to create workqueue\n");
- result = -ENOMEM;
- } else
- result = 0;
- spin_lock_irqsave(&i2400m->tx_lock, flags);
- i2400ms->tx_workqueue = tx_workqueue;
- spin_unlock_irqrestore(&i2400m->tx_lock, flags);
- d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
- return result;
-}
-
-void i2400ms_tx_release(struct i2400ms *i2400ms)
-{
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct workqueue_struct *tx_workqueue;
- unsigned long flags;
-
- tx_workqueue = i2400ms->tx_workqueue;
-
- spin_lock_irqsave(&i2400m->tx_lock, flags);
- i2400ms->tx_workqueue = NULL;
- spin_unlock_irqrestore(&i2400m->tx_lock, flags);
-
- if (tx_workqueue)
- destroy_workqueue(tx_workqueue);
-}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
deleted file mode 100644
index 21a9edd..0000000
--- a/drivers/net/wimax/i2400m/sdio.c
+++ /dev/null
@@ -1,602 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * Linux driver model glue for the SDIO device, reset & fw upload
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * Yanir Lubetkin <yanirx.lubetkin@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * See i2400m-sdio.h for a general description of this driver.
- *
- * This file implements driver model glue, and hook ups for the
- * generic driver to implement the bus-specific functions (device
- * communication setup/tear down, firmware upload and resetting).
- *
- * ROADMAP
- *
- * i2400m_probe()
- * alloc_netdev()
- * i2400ms_netdev_setup()
- * i2400ms_init()
- * i2400m_netdev_setup()
- * i2400ms_enable_function()
- * i2400m_setup()
- *
- * i2400m_remove()
- * i2400m_release()
- * free_netdev(net_dev)
- *
- * i2400ms_bus_reset() Called by i2400m_reset
- * __i2400ms_reset()
- * __i2400ms_send_barker()
- */
-
-#include <linux/slab.h>
-#include <linux/debugfs.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/sdio_func.h>
-#include "i2400m-sdio.h"
-#include <linux/wimax/i2400m.h>
-#include <linux/module.h>
-
-#define D_SUBMODULE main
-#include "sdio-debug-levels.h"
-
-/* IOE WiMAX function timeout in seconds */
-static int ioe_timeout = 2;
-module_param(ioe_timeout, int, 0);
-
-static char i2400ms_debug_params[128];
-module_param_string(debug, i2400ms_debug_params, sizeof(i2400ms_debug_params),
- 0644);
-MODULE_PARM_DESC(debug,
- "String of space-separated NAME:VALUE pairs, where NAMEs "
- "are the different debug submodules and VALUE are the "
- "initial debug value to set.");
-
-/* Our firmware file name list */
-static const char *i2400ms_bus_fw_names[] = {
-#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf"
- I2400MS_FW_FILE_NAME,
- NULL
-};
-
-
-static const struct i2400m_poke_table i2400ms_pokes[] = {
- I2400M_FW_POKE(0x6BE260, 0x00000088),
- I2400M_FW_POKE(0x080550, 0x00000005),
- I2400M_FW_POKE(0xAE0000, 0x00000000),
- I2400M_FW_POKE(0x000000, 0x00000000), /* MUST be 0 terminated or bad
- * things will happen */
-};
-
-/*
- * Enable the SDIO function
- *
- * Tries to enable the SDIO function; might fail if it is still not
- * ready (in some hardware, the SDIO WiMAX function is only enabled
- * when we ask it to explicitly doing). Tries until a timeout is
- * reached.
- *
- * The @maxtries argument indicates how many times (at most) it should
- * be tried to enable the function. 0 means forever. This acts along
- * with the timeout (ie: it'll stop trying as soon as the maximum
- * number of tries is reached _or_ as soon as the timeout is reached).
- *
- * The reverse of this is...sdio_disable_function()
- *
- * Returns: 0 if the SDIO function was enabled, < 0 errno code on
- * error (-ENODEV when it was unable to enable the function).
- */
-static
-int i2400ms_enable_function(struct i2400ms *i2400ms, unsigned maxtries)
-{
- struct sdio_func *func = i2400ms->func;
- u64 timeout;
- int err;
- struct device *dev = &func->dev;
- unsigned tries = 0;
-
- d_fnstart(3, dev, "(func %p)\n", func);
- /* Setup timeout (FIXME: This needs to read the CIS table to
- * get a real timeout) and then wait for the device to signal
- * it is ready */
- timeout = get_jiffies_64() + ioe_timeout * HZ;
- err = -ENODEV;
- while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
- sdio_claim_host(func);
- /*
- * There is a sillicon bug on the IWMC3200, where the
- * IOE timeout will cause problems on Moorestown
- * platforms (system hang). We explicitly overwrite
- * func->enable_timeout here to work around the issue.
- */
- if (i2400ms->iwmc3200)
- func->enable_timeout = IWMC3200_IOR_TIMEOUT;
- err = sdio_enable_func(func);
- if (0 == err) {
- sdio_release_host(func);
- d_printf(2, dev, "SDIO function enabled\n");
- goto function_enabled;
- }
- d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
- sdio_release_host(func);
- if (maxtries > 0 && ++tries >= maxtries) {
- err = -ETIME;
- break;
- }
- msleep(I2400MS_INIT_SLEEP_INTERVAL);
- }
- /* If timed out, device is not there yet -- get -ENODEV so
- * the device driver core will retry later on. */
- if (err == -ETIME) {
- dev_err(dev, "Can't enable WiMAX function; "
- " has the function been enabled?\n");
- err = -ENODEV;
- }
-function_enabled:
- d_fnend(3, dev, "(func %p) = %d\n", func, err);
- return err;
-}
-
-
-/*
- * Setup minimal device communication infrastructure needed to at
- * least be able to update the firmware.
- *
- * Note the ugly trick: if we are in the probe path
- * (i2400ms->debugfs_dentry == NULL), we only retry function
- * enablement one, to avoid racing with the iwmc3200 top controller.
- */
-static
-int i2400ms_bus_setup(struct i2400m *i2400m)
-{
- int result;
- struct i2400ms *i2400ms =
- container_of(i2400m, struct i2400ms, i2400m);
- struct device *dev = i2400m_dev(i2400m);
- struct sdio_func *func = i2400ms->func;
- int retries;
-
- sdio_claim_host(func);
- result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
- sdio_release_host(func);
- if (result < 0) {
- dev_err(dev, "Failed to set block size: %d\n", result);
- goto error_set_blk_size;
- }
-
- if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL)
- retries = 1;
- else
- retries = 0;
- result = i2400ms_enable_function(i2400ms, retries);
- if (result < 0) {
- dev_err(dev, "Cannot enable SDIO function: %d\n", result);
- goto error_func_enable;
- }
-
- result = i2400ms_tx_setup(i2400ms);
- if (result < 0)
- goto error_tx_setup;
- result = i2400ms_rx_setup(i2400ms);
- if (result < 0)
- goto error_rx_setup;
- return 0;
-
-error_rx_setup:
- i2400ms_tx_release(i2400ms);
-error_tx_setup:
- sdio_claim_host(func);
- sdio_disable_func(func);
- sdio_release_host(func);
-error_func_enable:
-error_set_blk_size:
- return result;
-}
-
-
-/*
- * Tear down minimal device communication infrastructure needed to at
- * least be able to update the firmware.
- */
-static
-void i2400ms_bus_release(struct i2400m *i2400m)
-{
- struct i2400ms *i2400ms =
- container_of(i2400m, struct i2400ms, i2400m);
- struct sdio_func *func = i2400ms->func;
-
- i2400ms_rx_release(i2400ms);
- i2400ms_tx_release(i2400ms);
- sdio_claim_host(func);
- sdio_disable_func(func);
- sdio_release_host(func);
-}
-
-
-/*
- * Setup driver resources needed to communicate with the device
- *
- * The fw needs some time to settle, and it was just uploaded,
- * so give it a break first. I'd prefer to just wait for the device to
- * send something, but seems the poking we do to enable SDIO stuff
- * interferes with it, so just give it a break before starting...
- */
-static
-int i2400ms_bus_dev_start(struct i2400m *i2400m)
-{
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
-
- d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
- msleep(200);
- d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, 0);
- return 0;
-}
-
-
-/*
- * Sends a barker buffer to the device
- *
- * This helper will allocate a kmalloced buffer and use it to transmit
- * (then free it). Reason for this is that the SDIO host controller
- * expects alignment (unknown exactly which) which the stack won't
- * really provide and certain arches/host-controller combinations
- * cannot use stack/vmalloc/text areas for DMA transfers.
- */
-static
-int __i2400ms_send_barker(struct i2400ms *i2400ms,
- const __le32 *barker, size_t barker_size)
-{
- int ret;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- void *buffer;
-
- ret = -ENOMEM;
- buffer = kmalloc(I2400MS_BLK_SIZE, GFP_KERNEL);
- if (buffer == NULL)
- goto error_kzalloc;
-
- memcpy(buffer, barker, barker_size);
- sdio_claim_host(func);
- ret = sdio_memcpy_toio(func, 0, buffer, I2400MS_BLK_SIZE);
- sdio_release_host(func);
-
- if (ret < 0)
- d_printf(0, dev, "E: barker error: %d\n", ret);
-
- kfree(buffer);
-error_kzalloc:
- return ret;
-}
-
-
-/*
- * Reset a device at different levels (warm, cold or bus)
- *
- * @i2400ms: device descriptor
- * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
- *
- * FIXME: not tested -- need to confirm expected effects
- *
- * Warm and cold resets get an SDIO reset if they fail (unimplemented)
- *
- * Warm reset:
- *
- * The device will be fully reset internally, but won't be
- * disconnected from the bus (so no reenumeration will
- * happen). Firmware upload will be necessary.
- *
- * The device will send a reboot barker that will trigger the driver
- * to reinitialize the state via __i2400m_dev_reset_handle.
- *
- *
- * Cold and bus reset:
- *
- * The device will be fully reset internally, disconnected from the
- * bus an a reenumeration will happen. Firmware upload will be
- * necessary. Thus, we don't do any locking or struct
- * reinitialization, as we are going to be fully disconnected and
- * reenumerated.
- *
- * Note we need to return -ENODEV if a warm reset was requested and we
- * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
- * and wimax_dev->op_reset.
- *
- * WARNING: no driver state saved/fixed
- */
-static
-int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
-{
- int result = 0;
- struct i2400ms *i2400ms =
- container_of(i2400m, struct i2400ms, i2400m);
- struct device *dev = i2400m_dev(i2400m);
- static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
- cpu_to_le32(I2400M_WARM_RESET_BARKER),
- cpu_to_le32(I2400M_WARM_RESET_BARKER),
- cpu_to_le32(I2400M_WARM_RESET_BARKER),
- cpu_to_le32(I2400M_WARM_RESET_BARKER),
- };
- static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
- cpu_to_le32(I2400M_COLD_RESET_BARKER),
- cpu_to_le32(I2400M_COLD_RESET_BARKER),
- cpu_to_le32(I2400M_COLD_RESET_BARKER),
- cpu_to_le32(I2400M_COLD_RESET_BARKER),
- };
-
- if (rt == I2400M_RT_WARM)
- result = __i2400ms_send_barker(i2400ms, i2400m_WARM_BOOT_BARKER,
- sizeof(i2400m_WARM_BOOT_BARKER));
- else if (rt == I2400M_RT_COLD)
- result = __i2400ms_send_barker(i2400ms, i2400m_COLD_BOOT_BARKER,
- sizeof(i2400m_COLD_BOOT_BARKER));
- else if (rt == I2400M_RT_BUS) {
-do_bus_reset:
-
- i2400ms_bus_release(i2400m);
-
- /* Wait for the device to settle */
- msleep(40);
-
- result = i2400ms_bus_setup(i2400m);
- } else
- BUG();
- if (result < 0 && rt != I2400M_RT_BUS) {
- dev_err(dev, "%s reset failed (%d); trying SDIO reset\n",
- rt == I2400M_RT_WARM ? "warm" : "cold", result);
- rt = I2400M_RT_BUS;
- goto do_bus_reset;
- }
- return result;
-}
-
-
-static
-void i2400ms_netdev_setup(struct net_device *net_dev)
-{
- struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- i2400ms_init(i2400ms);
- i2400m_netdev_setup(net_dev);
-}
-
-
-/*
- * Debug levels control; see debug.h
- */
-struct d_level D_LEVEL[] = {
- D_SUBMODULE_DEFINE(main),
- D_SUBMODULE_DEFINE(tx),
- D_SUBMODULE_DEFINE(rx),
- D_SUBMODULE_DEFINE(fw),
-};
-size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
-
-
-#define __debugfs_register(prefix, name, parent) \
-do { \
- result = d_level_register_debugfs(prefix, name, parent); \
- if (result < 0) \
- goto error; \
-} while (0)
-
-
-static
-int i2400ms_debugfs_add(struct i2400ms *i2400ms)
-{
- int result;
- struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
-
- dentry = debugfs_create_dir("i2400m-sdio", dentry);
- result = PTR_ERR(dentry);
- if (IS_ERR(dentry)) {
- if (result == -ENODEV)
- result = 0; /* No debugfs support */
- goto error;
- }
- i2400ms->debugfs_dentry = dentry;
- __debugfs_register("dl_", main, dentry);
- __debugfs_register("dl_", tx, dentry);
- __debugfs_register("dl_", rx, dentry);
- __debugfs_register("dl_", fw, dentry);
-
- return 0;
-
-error:
- debugfs_remove_recursive(i2400ms->debugfs_dentry);
- i2400ms->debugfs_dentry = NULL;
- return result;
-}
-
-
-static struct device_type i2400ms_type = {
- .name = "wimax",
-};
-
-/*
- * Probe a i2400m interface and register it
- *
- * @func: SDIO function
- * @id: SDIO device ID
- * @returns: 0 if ok, < 0 errno code on error.
- *
- * Alloc a net device, initialize the bus-specific details and then
- * calls the bus-generic initialization routine. That will register
- * the wimax and netdev devices, upload the firmware [using
- * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
- * communication with the device and then will start to talk to it to
- * finnish setting it up.
- *
- * Initialization is tricky; some instances of the hw are packed with
- * others in a way that requires a third driver that enables the WiMAX
- * function. In those cases, we can't enable the SDIO function and
- * we'll return with -ENODEV. When the driver that enables the WiMAX
- * function does its thing, it has to do a bus_rescan_devices() on the
- * SDIO bus so this driver is called again to enumerate the WiMAX
- * function.
- */
-static
-int i2400ms_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int result;
- struct net_device *net_dev;
- struct device *dev = &func->dev;
- struct i2400m *i2400m;
- struct i2400ms *i2400ms;
-
- /* Allocate instance [calls i2400m_netdev_setup() on it]. */
- result = -ENOMEM;
- net_dev = alloc_netdev(sizeof(*i2400ms), "wmx%d",
- i2400ms_netdev_setup);
- if (net_dev == NULL) {
- dev_err(dev, "no memory for network device instance\n");
- goto error_alloc_netdev;
- }
- SET_NETDEV_DEV(net_dev, dev);
- SET_NETDEV_DEVTYPE(net_dev, &i2400ms_type);
- i2400m = net_dev_to_i2400m(net_dev);
- i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- i2400m->wimax_dev.net_dev = net_dev;
- i2400ms->func = func;
- sdio_set_drvdata(func, i2400ms);
-
- i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
- /*
- * Room required in the TX queue for SDIO message to accommodate
- * a smallest payload while allocating header space is 224 bytes,
- * which is the smallest message size(the block size 256 bytes)
- * minus the smallest message header size(32 bytes).
- */
- i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
- i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
- i2400m->bus_setup = i2400ms_bus_setup;
- i2400m->bus_dev_start = i2400ms_bus_dev_start;
- i2400m->bus_dev_stop = NULL;
- i2400m->bus_release = i2400ms_bus_release;
- i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
- i2400m->bus_reset = i2400ms_bus_reset;
- /* The iwmc3200-wimax sometimes requires the driver to try
- * hard when we paint it into a corner. */
- i2400m->bus_bm_retries = I2400M_SDIO_BOOT_RETRIES;
- i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
- i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
- i2400m->bus_fw_names = i2400ms_bus_fw_names;
- i2400m->bus_bm_mac_addr_impaired = 1;
- i2400m->bus_bm_pokes_table = &i2400ms_pokes[0];
-
- switch (func->device) {
- case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX:
- case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5:
- i2400ms->iwmc3200 = 1;
- break;
- default:
- i2400ms->iwmc3200 = 0;
- }
-
- result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
- if (result < 0) {
- dev_err(dev, "cannot setup device: %d\n", result);
- goto error_setup;
- }
-
- result = i2400ms_debugfs_add(i2400ms);
- if (result < 0) {
- dev_err(dev, "cannot create SDIO debugfs: %d\n",
- result);
- goto error_debugfs_add;
- }
- return 0;
-
-error_debugfs_add:
- i2400m_release(i2400m);
-error_setup:
- sdio_set_drvdata(func, NULL);
- free_netdev(net_dev);
-error_alloc_netdev:
- return result;
-}
-
-
-static
-void i2400ms_remove(struct sdio_func *func)
-{
- struct device *dev = &func->dev;
- struct i2400ms *i2400ms = sdio_get_drvdata(func);
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct net_device *net_dev = i2400m->wimax_dev.net_dev;
-
- d_fnstart(3, dev, "SDIO func %p\n", func);
- debugfs_remove_recursive(i2400ms->debugfs_dentry);
- i2400ms->debugfs_dentry = NULL;
- i2400m_release(i2400m);
- sdio_set_drvdata(func, NULL);
- free_netdev(net_dev);
- d_fnend(3, dev, "SDIO func %p\n", func);
-}
-
-static
-const struct sdio_device_id i2400ms_sdio_ids[] = {
- /* Intel: i2400m WiMAX (iwmc3200) over SDIO */
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
- SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
- SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5) },
- { /* end: all zeroes */ },
-};
-MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
-
-
-static
-struct sdio_driver i2400m_sdio_driver = {
- .name = KBUILD_MODNAME,
- .probe = i2400ms_probe,
- .remove = i2400ms_remove,
- .id_table = i2400ms_sdio_ids,
-};
-
-
-static
-int __init i2400ms_driver_init(void)
-{
- d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400ms_debug_params,
- "i2400m_sdio.debug");
- return sdio_register_driver(&i2400m_sdio_driver);
-}
-module_init(i2400ms_driver_init);
-
-
-static
-void __exit i2400ms_driver_exit(void)
-{
- sdio_unregister_driver(&i2400m_sdio_driver);
-}
-module_exit(i2400ms_driver_exit);
-
-
-MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
-MODULE_DESCRIPTION("Intel 2400M WiMAX networking for SDIO");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(I2400MS_FW_FILE_NAME);
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 1fda46c..e74664b 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -212,7 +212,7 @@ ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
}
if (result != cmd_size) { /* all was transferred? */
dev_err(dev, "boot-mode cmd %d: incomplete transfer "
- "(%zu vs %zu submitted)\n", opcode, result, cmd_size);
+ "(%zd vs %zu submitted)\n", opcode, result, cmd_size);
result = -EIO;
goto error_cmd_size;
}
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 5f58fa5..6deaae1 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -276,7 +276,6 @@ source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
source "drivers/net/wireless/iwlegacy/Kconfig"
-source "drivers/net/wireless/iwmc3200wifi/Kconfig"
source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0ce218b..062dfdf 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -53,8 +53,6 @@ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
obj-$(CONFIG_WL_TI) += ti/
-obj-$(CONFIG_IWM) += iwmc3200wifi/
-
obj-$(CONFIG_MWIFIEX) += mwifiex/
obj-$(CONFIG_BRCMFMAC) += brcm80211/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 0ac09a2..689a71c 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1738,8 +1738,7 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
return -ENOMEM;
}
- priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring +
- priv->rx_ring_size);
+ priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
priv->tx_ring_dma = priv->rx_ring_dma +
sizeof(struct adm8211_desc) * priv->rx_ring_size;
@@ -1855,7 +1854,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(perm_addr)) {
printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n",
pci_name(pdev));
- random_ether_addr(perm_addr);
+ eth_random_addr(perm_addr);
}
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 520a4b2..f9f15bb 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1997,7 +1997,7 @@ static int mpi_send_packet (struct net_device *dev)
* ------------------------------------------------
*/
- memcpy((char *)ai->txfids[0].virtual_host_addr,
+ memcpy(ai->txfids[0].virtual_host_addr,
(char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr +
@@ -4212,7 +4212,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
rc = -1;
} else {
- memcpy((char *)ai->config_desc.virtual_host_addr,
+ memcpy(ai->config_desc.virtual_host_addr,
pBuf, len);
rc = issuecommand(ai, &cmd, &rsp);
@@ -7233,8 +7233,8 @@ static int airo_get_aplist(struct net_device *dev,
}
} else {
dwrq->flags = 1; /* Should be define'd */
- memcpy(extra + sizeof(struct sockaddr)*i,
- &qual, sizeof(struct iw_quality)*i);
+ memcpy(extra + sizeof(struct sockaddr) * i, qual,
+ sizeof(struct iw_quality) * i);
}
dwrq->length = i;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index c54b7d37..6169fbd 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -143,6 +143,7 @@ struct ath_common {
u32 keymax;
DECLARE_BITMAP(keymap, ATH_KEYMAX);
DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
enum ath_crypt_caps crypt_caps;
unsigned int clockrate;
@@ -215,6 +216,7 @@ void ath_printk(const char *level, const struct ath_common *common,
* used exclusively for WLAN-BT coexistence starting from
* AR9462.
* @ATH_DBG_DFS: radar datection
+ * @ATH_DBG_WOW: Wake on Wireless
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -242,6 +244,7 @@ enum ATH_DEBUG {
ATH_DBG_BSTUCK = 0x00008000,
ATH_DBG_MCI = 0x00010000,
ATH_DBG_DFS = 0x00020000,
+ ATH_DBG_WOW = 0x00040000,
ATH_DBG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index e18a9aa..338c5c4 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -64,3 +64,11 @@ config ATH5K_PCI
---help---
This adds support for PCI type chipsets of the 5xxx Atheros
family.
+
+config ATH5K_TEST_CHANNELS
+ bool "Enables testing channels on ath5k"
+ depends on ATH5K && CFG80211_CERTIFICATION_ONUS
+ ---help---
+ This enables non-standard IEEE 802.11 channels on ath5k, which
+ can be used for research purposes. This option should be disabled
+ unless doing research.
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0ba81a6..8c4c040 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -74,10 +74,6 @@ bool ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static bool modparam_all_channels;
-module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
-MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
-
static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
@@ -258,8 +254,15 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
\********************/
/*
- * Returns true for the channel numbers used without all_channels modparam.
+ * Returns true for the channel numbers used.
*/
+#ifdef CONFIG_ATH5K_TEST_CHANNELS
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+{
+ return true;
+}
+
+#else
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
if (band == IEEE80211_BAND_2GHZ && chan <= 14)
@@ -276,6 +279,7 @@ static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
/* 802.11j 4.9GHz (20MHz) */
(chan == 184 || chan == 188 || chan == 192 || chan == 196));
}
+#endif
static unsigned int
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
@@ -316,8 +320,7 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
if (!ath5k_channel_ok(ah, &channels[count]))
continue;
- if (!modparam_all_channels &&
- !ath5k_is_standard_channel(ch, band))
+ if (!ath5k_is_standard_channel(ch, band))
continue;
count++;
@@ -1045,11 +1048,11 @@ ath5k_drain_tx_buffs(struct ath5k_hw *ah)
ath5k_txbuf_free_skb(ah, bf);
- spin_lock_bh(&ah->txbuflock);
+ spin_lock(&ah->txbuflock);
list_move_tail(&bf->list, &ah->txbuf);
ah->txbuf_len++;
txq->txq_len--;
- spin_unlock_bh(&ah->txbuflock);
+ spin_unlock(&ah->txbuflock);
}
txq->link = NULL;
txq->txq_poll_mark = false;
@@ -2415,6 +2418,22 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
* Initialization routines *
\*************************/
+static const struct ieee80211_iface_limit if_limits[] = {
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
+ { .max = 4, .types =
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) },
+};
+
+static const struct ieee80211_iface_combination if_comb = {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+};
+
int __devinit
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
{
@@ -2436,6 +2455,9 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
+ hw->wiphy->iface_combinations = &if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
/* SW support for IBSS_RSN is provided by mac80211 */
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 22b80af..260e7dc 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -594,7 +594,7 @@ ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
qi.tqi_aifs = params->aifs;
qi.tqi_cw_min = params->cw_min;
qi.tqi_cw_max = params->cw_max;
- qi.tqi_burst_time = params->txop;
+ qi.tqi_burst_time = params->txop * 32;
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Configure tx [queue %d], "
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b869a35..86aeef4 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -53,6 +53,11 @@
#define DEFAULT_BG_SCAN_PERIOD 60
+struct ath6kl_cfg80211_match_probe_ssid {
+ struct cfg80211_ssid ssid;
+ u8 flag;
+};
+
static struct ieee80211_rate ath6kl_rates[] = {
RATETAB_ENT(10, 0x1, 0),
RATETAB_ENT(20, 0x2, 0),
@@ -576,6 +581,9 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
vif->nw_type = vif->next_mode;
+ /* enable enhanced bmiss detection if applicable */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, true);
+
if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
nw_subtype = SUBTYPE_P2PCLIENT;
@@ -852,20 +860,6 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
}
}
- /*
- * Send a disconnect command to target when a disconnect event is
- * received with reason code other than 3 (DISCONNECT_CMD - disconnect
- * request from host) to make the firmware stop trying to connect even
- * after giving disconnect event. There will be one more disconnect
- * event for this disconnect command with reason code DISCONNECT_CMD
- * which will be notified to cfg80211.
- */
-
- if (reason != DISCONNECT_CMD) {
- ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
- return;
- }
-
clear_bit(CONNECT_PEND, &vif->flags);
if (vif->sme_state == SME_CONNECTING) {
@@ -875,32 +869,96 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
} else if (vif->sme_state == SME_CONNECTED) {
- cfg80211_disconnected(vif->ndev, reason,
+ cfg80211_disconnected(vif->ndev, proto_reason,
NULL, 0, GFP_KERNEL);
}
vif->sme_state = SME_DISCONNECTED;
+
+ /*
+ * Send a disconnect command to target when a disconnect event is
+ * received with reason code other than 3 (DISCONNECT_CMD - disconnect
+ * request from host) to make the firmware stop trying to connect even
+ * after giving disconnect event. There will be one more disconnect
+ * event for this disconnect command with reason code DISCONNECT_CMD
+ * which won't be notified to cfg80211.
+ */
+ if (reason != DISCONNECT_CMD)
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
}
static int ath6kl_set_probed_ssids(struct ath6kl *ar,
struct ath6kl_vif *vif,
- struct cfg80211_ssid *ssids, int n_ssids)
+ struct cfg80211_ssid *ssids, int n_ssids,
+ struct cfg80211_match_set *match_set,
+ int n_match_ssid)
{
- u8 i;
+ u8 i, j, index_to_add, ssid_found = false;
+ struct ath6kl_cfg80211_match_probe_ssid ssid_list[MAX_PROBED_SSIDS];
+
+ memset(ssid_list, 0, sizeof(ssid_list));
- if (n_ssids > MAX_PROBED_SSID_INDEX)
+ if (n_ssids > MAX_PROBED_SSIDS ||
+ n_match_ssid > MAX_PROBED_SSIDS)
return -EINVAL;
for (i = 0; i < n_ssids; i++) {
+ memcpy(ssid_list[i].ssid.ssid,
+ ssids[i].ssid,
+ ssids[i].ssid_len);
+ ssid_list[i].ssid.ssid_len = ssids[i].ssid_len;
+
+ if (ssids[i].ssid_len)
+ ssid_list[i].flag = SPECIFIC_SSID_FLAG;
+ else
+ ssid_list[i].flag = ANY_SSID_FLAG;
+
+ if (n_match_ssid == 0)
+ ssid_list[i].flag |= MATCH_SSID_FLAG;
+ }
+
+ index_to_add = i;
+
+ for (i = 0; i < n_match_ssid; i++) {
+ ssid_found = false;
+
+ for (j = 0; j < n_ssids; j++) {
+ if ((match_set[i].ssid.ssid_len ==
+ ssid_list[j].ssid.ssid_len) &&
+ (!memcmp(ssid_list[j].ssid.ssid,
+ match_set[i].ssid.ssid,
+ match_set[i].ssid.ssid_len))) {
+ ssid_list[j].flag |= MATCH_SSID_FLAG;
+ ssid_found = true;
+ break;
+ }
+ }
+
+ if (ssid_found)
+ continue;
+
+ if (index_to_add >= MAX_PROBED_SSIDS)
+ continue;
+
+ ssid_list[index_to_add].ssid.ssid_len =
+ match_set[i].ssid.ssid_len;
+ memcpy(ssid_list[index_to_add].ssid.ssid,
+ match_set[i].ssid.ssid,
+ match_set[i].ssid.ssid_len);
+ ssid_list[index_to_add].flag |= MATCH_SSID_FLAG;
+ index_to_add++;
+ }
+
+ for (i = 0; i < index_to_add; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
- ssids[i].ssid_len ?
- SPECIFIC_SSID_FLAG : ANY_SSID_FLAG,
- ssids[i].ssid_len,
- ssids[i].ssid);
+ ssid_list[i].flag,
+ ssid_list[i].ssid.ssid_len,
+ ssid_list[i].ssid.ssid);
+
}
/* Make sure no old entries are left behind */
- for (i = n_ssids; i < MAX_PROBED_SSID_INDEX; i++) {
+ for (i = index_to_add; i < MAX_PROBED_SSIDS; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
DISABLE_SSID_FLAG, 0, NULL);
}
@@ -908,11 +966,11 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
return 0;
}
-static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+static int ath6kl_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
- struct ath6kl *ar = ath6kl_priv(ndev);
- struct ath6kl_vif *vif = netdev_priv(ndev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(request->wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
s8 n_channels = 0;
u16 *channels = NULL;
int ret = 0;
@@ -934,7 +992,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
}
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
- request->n_ssids);
+ request->n_ssids, NULL, 0);
if (ret < 0)
return ret;
@@ -943,7 +1001,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
- ath6kl_err("failed to set Probe Request appie for scan");
+ ath6kl_err("failed to set Probe Request appie for scan\n");
return ret;
}
@@ -1429,14 +1487,14 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return 0;
}
-static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
- char *name,
- enum nl80211_iftype type,
- u32 *flags,
- struct vif_params *params)
+static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- struct net_device *ndev;
+ struct wireless_dev *wdev;
u8 if_idx, nw_type;
if (ar->num_vif == ar->vif_max) {
@@ -1449,20 +1507,20 @@ static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
return ERR_PTR(-EINVAL);
}
- ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
- if (!ndev)
+ wdev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+ if (!wdev)
return ERR_PTR(-ENOMEM);
ar->num_vif++;
- return ndev;
+ return wdev;
}
static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
- struct net_device *ndev)
+ struct wireless_dev *wdev)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- struct ath6kl_vif *vif = netdev_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(wdev->netdev);
spin_lock_bh(&ar->list_lock);
list_del(&vif->list);
@@ -1512,6 +1570,9 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
}
}
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
+
set_iface_type:
switch (type) {
case NL80211_IFTYPE_STATION:
@@ -2074,7 +2135,9 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
return -EINVAL;
- if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags)) {
+ if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
+ test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ ar->fw_capabilities)) {
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
vif->fw_vif_idx, false);
if (ret)
@@ -2209,7 +2272,9 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
ar->state = ATH6KL_STATE_ON;
- if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags)) {
+ if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
+ test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ ar->fw_capabilities)) {
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
vif->fw_vif_idx, true);
if (ret)
@@ -2475,7 +2540,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
bool ht_enable)
{
- struct ath6kl_htcap *htcap = &vif->htcap;
+ struct ath6kl_htcap *htcap = &vif->htcap[band];
if (htcap->ht_enable == ht_enable)
return 0;
@@ -2585,33 +2650,28 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
return 0;
}
-static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable)
{
- struct ath6kl_vif *vif;
+ int err;
- /*
- * 'dev' could be NULL if a channel change is required for the hardware
- * device itself, instead of a particular VIF.
- *
- * FIXME: To be handled properly when monitor mode is supported.
- */
- if (!dev)
- return -EBUSY;
+ if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
+ return;
- vif = netdev_priv(dev);
+ if (vif->nw_type != INFRA_NETWORK)
+ return;
- if (!ath6kl_cfg80211_ready(vif))
- return -EIO;
+ if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+ vif->ar->fw_capabilities))
+ return;
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
- __func__, chan->center_freq, chan->hw_value);
- vif->next_chan = chan->center_freq;
- vif->next_ch_type = channel_type;
- vif->next_ch_band = chan->band;
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
+ enable ? "enable" : "disable");
- return 0;
+ err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, enable);
+ if (err)
+ ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
+ enable ? "enable" : "disable", err);
}
static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
@@ -2694,9 +2754,15 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
/* TODO:
* info->interval
- * info->dtim_period
*/
+ ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
+ info->dtim_period);
+
+ /* ignore error, just print a warning and continue normally */
+ if (ret)
+ ath6kl_warn("Failed to set dtim_period in beacon: %d\n", ret);
+
if (info->beacon.head == NULL)
return -EINVAL;
mgmt = (struct ieee80211_mgmt *) info->beacon.head;
@@ -2791,7 +2857,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
p.ssid_len = vif->ssid_len;
memcpy(p.ssid, vif->ssid, vif->ssid_len);
p.dot11_auth_mode = vif->dot11_auth_mode;
- p.ch = cpu_to_le16(vif->next_chan);
+ p.ch = cpu_to_le16(info->channel->center_freq);
/* Enable uAPSD support by default */
res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
@@ -2815,8 +2881,8 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
return res;
}
- if (ath6kl_set_htcap(vif, vif->next_ch_band,
- vif->next_ch_type != NL80211_CHAN_NO_HT))
+ if (ath6kl_set_htcap(vif, info->channel->band,
+ info->channel_type != NL80211_CHAN_NO_HT))
return -EIO;
/*
@@ -2909,14 +2975,14 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
}
static int ath6kl_remain_on_channel(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration,
u64 *cookie)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
u32 id;
/* TODO: if already pending or ongoing remain-on-channel,
@@ -2933,11 +2999,11 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy,
}
static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u64 cookie)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
if (cookie != vif->last_roc_id)
return -ENOENT;
@@ -3068,15 +3134,15 @@ static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
return false;
}
-static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
bool dont_wait_for_ack, u64 *cookie)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
u32 id;
const struct ieee80211_mgmt *mgmt;
bool more_data, queued;
@@ -3121,10 +3187,10 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u16 frame_type, bool reg)
{
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
__func__, frame_type, reg);
@@ -3160,10 +3226,24 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
ath6kl_cfg80211_scan_complete_event(vif, true);
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
- request->n_ssids);
+ request->n_ssids,
+ request->match_sets,
+ request->n_match_sets);
if (ret < 0)
return ret;
+ if (!request->n_match_sets) {
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ MATCHED_SSID_FILTER, 0);
+ if (ret < 0)
+ return ret;
+ }
+
/* fw uses seconds, also make sure that it's >0 */
interval = max_t(u16, 1, request->interval / 1000);
@@ -3185,7 +3265,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
- ath6kl_warn("Failed to set probe request IE for scheduled scan: %d",
+ ath6kl_warn("Failed to set probe request IE for scheduled scan: %d\n",
ret);
return ret;
}
@@ -3217,6 +3297,18 @@ static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
return 0;
}
+static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ return ath6kl_wmi_set_bitrate_mask(ar->wmi, vif->fw_vif_idx,
+ mask);
+}
+
static const struct ieee80211_txrx_stypes
ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
@@ -3271,7 +3363,6 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.suspend = __ath6kl_cfg80211_suspend,
.resume = __ath6kl_cfg80211_resume,
#endif
- .set_channel = ath6kl_set_channel,
.start_ap = ath6kl_start_ap,
.change_beacon = ath6kl_change_beacon,
.stop_ap = ath6kl_stop_ap,
@@ -3283,6 +3374,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.mgmt_frame_register = ath6kl_mgmt_frame_register,
.sched_scan_start = ath6kl_cfg80211_sscan_start,
.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
+ .set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
};
void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
@@ -3385,9 +3477,9 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
ar->num_vif--;
}
-struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
- enum nl80211_iftype type, u8 fw_vif_idx,
- u8 nw_type)
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type)
{
struct net_device *ndev;
struct ath6kl_vif *vif;
@@ -3410,7 +3502,8 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
vif->bg_scan_period = 0;
- vif->htcap.ht_enable = true;
+ vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
+ vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
if (fw_vif_idx != 0)
@@ -3440,7 +3533,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
list_add_tail(&vif->list, &ar->vif_list);
spin_unlock_bh(&ar->list_lock);
- return ndev;
+ return &vif->wdev;
err:
aggr_module_destroy(vif->aggr_cntxt);
@@ -3470,7 +3563,13 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
}
/* max num of ssids that can be probed during scanning */
- wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
+
+ /* max num of ssids that can be matched after scan */
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
+ ar->fw_capabilities))
+ wiphy->max_match_sets = MAX_PROBED_SSIDS;
+
wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
switch (ar->hw.cap) {
case WMI_11AN_CAP:
@@ -3507,6 +3606,17 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
ath6kl_band_5ghz.ht_cap.cap = 0;
ath6kl_band_5ghz.ht_cap.ht_supported = false;
}
+
+ if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES) {
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff;
+ } else {
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ }
+
if (band_2gig)
wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
if (band_5gig)
@@ -3517,6 +3627,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->cipher_suites = cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+#ifdef CONFIG_PM
wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -3526,8 +3637,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
wiphy->wowlan.pattern_min_len = 1;
wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+#endif
- wiphy->max_sched_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 5ea8cbb..56b1ebe 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -25,9 +25,9 @@ enum ath6kl_cfg_suspend_mode {
ATH6KL_CFG_SUSPEND_SCHED_SCAN,
};
-struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
- enum nl80211_iftype type,
- u8 fw_vif_idx, u8 nw_type);
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type);
void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
enum wmi_phy_mode mode);
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
@@ -62,5 +62,7 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
struct ath6kl *ath6kl_cfg80211_create(void);
void ath6kl_cfg80211_destroy(struct ath6kl *ar);
+/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */
+void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable);
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index fdb3b1d..82c4dd2 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(ath6kl_core_rx_complete);
int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
{
struct ath6kl_bmi_target_info targ_info;
- struct net_device *ndev;
+ struct wireless_dev *wdev;
int ret = 0, i;
switch (htc_type) {
@@ -187,12 +187,12 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
rtnl_lock();
/* Add an initial station interface */
- ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
+ wdev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
INFRA_NETWORK);
rtnl_unlock();
- if (!ndev) {
+ if (!wdev) {
ath6kl_err("Failed to instantiate a network device\n");
ret = -ENOMEM;
wiphy_unregister(ar->wiphy);
@@ -200,7 +200,7 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
- __func__, ndev->name, ndev, ar);
+ __func__, wdev->netdev->name, wdev->netdev, ar);
return ret;
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 4d9c6f1..cec49a3 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -100,6 +100,21 @@ enum ath6kl_fw_capability {
/* Firmware has support to override rsn cap of rsn ie */
ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+ /*
+ * Multicast support in WOW and host awake mode.
+ * Allow all multicast in host awake mode.
+ * Apply multicast filter in WOW mode.
+ */
+ ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+
+ /* Firmware supports enhanced bmiss detection */
+ ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+
+ /*
+ * FW supports matching of ssid in schedule scan
+ */
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
+
/* this needs to be last */
ATH6KL_FW_CAPABILITY_MAX,
};
@@ -112,6 +127,10 @@ struct ath6kl_fw_ie {
u8 data[0];
};
+enum ath6kl_hw_flags {
+ ATH6KL_HW_FLAG_64BIT_RATES = BIT(0),
+};
+
#define ATH6KL_FW_API2_FILE "fw-2.bin"
#define ATH6KL_FW_API3_FILE "fw-3.bin"
@@ -196,7 +215,7 @@ struct ath6kl_fw_ie {
#define AGGR_NUM_OF_FREE_NETBUFS 16
-#define AGGR_RX_TIMEOUT 400 /* in ms */
+#define AGGR_RX_TIMEOUT 100 /* in ms */
#define WMI_TIMEOUT (2 * HZ)
@@ -245,7 +264,6 @@ struct skb_hold_q {
struct rxtid {
bool aggr;
- bool progress;
bool timer_mon;
u16 win_sz;
u16 seq_next;
@@ -254,9 +272,15 @@ struct rxtid {
struct sk_buff_head q;
/*
- * FIXME: No clue what this should protect. Apparently it should
- * protect some of the fields above but they are also accessed
- * without taking the lock.
+ * lock mainly protects seq_next and hold_q. Movement of seq_next
+ * needs to be protected between aggr_timeout() and
+ * aggr_process_recv_frm(). hold_q will be holding the pending
+ * reorder frames and it's access should also be protected.
+ * Some of the other fields like hold_q_sz, win_sz and aggr are
+ * initialized/reset when receiving addba/delba req, also while
+ * deleting aggr state all the pending buffers are flushed before
+ * resetting these fields, so there should not be any race in accessing
+ * these fields.
*/
spinlock_t lock;
};
@@ -541,7 +565,7 @@ struct ath6kl_vif {
struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
struct aggr_info *aggr_cntxt;
- struct ath6kl_htcap htcap;
+ struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
struct timer_list disconnect_timer;
struct timer_list sched_scan_timer;
@@ -553,9 +577,6 @@ struct ath6kl_vif {
u32 last_cancel_roc_id;
u32 send_action_id;
bool probe_req_report;
- u16 next_chan;
- enum nl80211_channel_type next_ch_type;
- enum ieee80211_band next_ch_band;
u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
@@ -568,6 +589,11 @@ struct ath6kl_vif {
struct list_head mc_filter;
};
+static inline struct ath6kl_vif *ath6kl_vif_from_wdev(struct wireless_dev *wdev)
+{
+ return container_of(wdev, struct ath6kl_vif, wdev);
+}
+
#define WOW_LIST_ID 0
#define WOW_HOST_REQ_DELAY 500 /* ms */
@@ -687,6 +713,8 @@ struct ath6kl {
u32 testscript_addr;
enum wmi_phy_cap cap;
+ u32 flags;
+
struct ath6kl_hw_fw {
const char *dir;
const char *otp;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 2798624d..cd0e1ba 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -1309,7 +1309,7 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
}
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
+ "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
packet, packet->info.rx.exp_hdr,
padded_len, dev->ar->mbox_info.htc_addr);
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 7eb0515..f90b5db 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -42,6 +42,7 @@ static const struct ath6kl_hw hw_list[] = {
.reserved_ram_size = 6912,
.refclk_hz = 26000000,
.uarttx_pin = 8,
+ .flags = 0,
/* hw2.0 needs override address hardcoded */
.app_start_override_addr = 0x944C00,
@@ -67,6 +68,7 @@ static const struct ath6kl_hw hw_list[] = {
.refclk_hz = 26000000,
.uarttx_pin = 8,
.testscript_addr = 0x57ef74,
+ .flags = 0,
.fw = {
.dir = AR6003_HW_2_1_1_FW_DIR,
@@ -91,6 +93,7 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x433900,
.refclk_hz = 26000000,
.uarttx_pin = 11,
+ .flags = ATH6KL_HW_FLAG_64BIT_RATES,
.fw = {
.dir = AR6004_HW_1_0_FW_DIR,
@@ -110,6 +113,7 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x43d400,
.refclk_hz = 40000000,
.uarttx_pin = 11,
+ .flags = ATH6KL_HW_FLAG_64BIT_RATES,
.fw = {
.dir = AR6004_HW_1_1_FW_DIR,
@@ -129,6 +133,7 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x435c00,
.refclk_hz = 40000000,
.uarttx_pin = 11,
+ .flags = ATH6KL_HW_FLAG_64BIT_RATES,
.fw = {
.dir = AR6004_HW_1_2_FW_DIR,
@@ -938,6 +943,14 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
}
switch (ie_id) {
+ case ATH6KL_FW_IE_FW_VERSION:
+ strlcpy(ar->wiphy->fw_version, data,
+ sizeof(ar->wiphy->fw_version));
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found fw version %s\n",
+ ar->wiphy->fw_version);
+ break;
case ATH6KL_FW_IE_OTP_IMAGE:
ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n",
ie_len);
@@ -991,9 +1004,6 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
ar->hw.reserved_ram_size);
break;
case ATH6KL_FW_IE_CAPABILITIES:
- if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8))
- break;
-
ath6kl_dbg(ATH6KL_DBG_BOOT,
"found firmware capabilities ie (%zd B)\n",
ie_len);
@@ -1002,6 +1012,9 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
index = i / 8;
bit = i % 8;
+ if (index == ie_len)
+ break;
+
if (data[index] & (1 << bit))
__set_bit(i, ar->fw_capabilities);
}
@@ -1392,6 +1405,12 @@ static int ath6kl_init_upload(struct ath6kl *ar)
ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
ath6kl_err("temporary war to avoid sdio crc error\n");
+ param = 0x28;
+ address = GPIO_BASE_ADDRESS + GPIO_PIN9_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
param = 0x20;
address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
@@ -1659,6 +1678,9 @@ void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
cfg80211_scan_done(vif->scan_req, true);
vif->scan_req = NULL;
}
+
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
}
void ath6kl_stop_txrx(struct ath6kl *ar)
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index e552447..c189e28 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -554,20 +554,24 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
struct ath6kl *ar = devt;
memcpy(ar->mac_addr, datap, ETH_ALEN);
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
- __func__, ar->mac_addr);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "ready event mac addr %pM sw_ver 0x%x abi_ver 0x%x cap 0x%x\n",
+ ar->mac_addr, sw_ver, abi_ver, cap);
ar->version.wlan_ver = sw_ver;
ar->version.abi_ver = abi_ver;
ar->hw.cap = cap;
- snprintf(ar->wiphy->fw_version,
- sizeof(ar->wiphy->fw_version),
- "%u.%u.%u.%u",
- (ar->version.wlan_ver & 0xf0000000) >> 28,
- (ar->version.wlan_ver & 0x0f000000) >> 24,
- (ar->version.wlan_ver & 0x00ff0000) >> 16,
- (ar->version.wlan_ver & 0x0000ffff));
+ if (strlen(ar->wiphy->fw_version) == 0) {
+ snprintf(ar->wiphy->fw_version,
+ sizeof(ar->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ (ar->version.wlan_ver & 0xf0000000) >> 28,
+ (ar->version.wlan_ver & 0x0f000000) >> 24,
+ (ar->version.wlan_ver & 0x00ff0000) >> 16,
+ (ar->version.wlan_ver & 0x0000ffff));
+ }
/* indicate to the waiting thread that the ready event was received */
set_bit(WMI_READY, &ar->flag);
@@ -598,7 +602,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
struct ath6kl *ar = vif->ar;
- vif->next_chan = channel;
vif->profile.ch = cpu_to_le16(channel);
switch (vif->nw_type) {
@@ -1167,7 +1170,10 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
else
clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
- mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
+ if (test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ vif->ar->fw_capabilities)) {
+ mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
+ }
if (!(ndev->flags & IFF_MULTICAST)) {
mc_all_on = false;
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index 78e0ef4..a98c12b 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -45,6 +45,7 @@
#define LPO_CAL_ENABLE_S 20
#define LPO_CAL_ENABLE 0x00100000
+#define GPIO_PIN9_ADDRESS 0x0000004c
#define GPIO_PIN10_ADDRESS 0x00000050
#define GPIO_PIN11_ADDRESS 0x00000054
#define GPIO_PIN12_ADDRESS 0x00000058
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 67206ae..7dfa0fd 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1036,6 +1036,7 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
rxtid = &agg_conn->rx_tid[tid];
stats = &agg_conn->stat[tid];
+ spin_lock_bh(&rxtid->lock);
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
/*
@@ -1054,8 +1055,6 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
seq_end = seq_no ? seq_no : rxtid->seq_next;
idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
- spin_lock_bh(&rxtid->lock);
-
do {
node = &rxtid->hold_q[idx];
if ((order == 1) && (!node->skb))
@@ -1127,11 +1126,13 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
((end > extended_end) && (cur > extended_end) &&
(cur < end))) {
aggr_deque_frms(agg_conn, tid, 0, 0);
+ spin_lock_bh(&rxtid->lock);
if (cur >= rxtid->hold_q_sz - 1)
rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
else
rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
(rxtid->hold_q_sz - 2 - cur);
+ spin_unlock_bh(&rxtid->lock);
} else {
/*
* Dequeue only those frames that are outside the
@@ -1185,25 +1186,25 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
aggr_deque_frms(agg_conn, tid, 0, 1);
if (agg_conn->timer_scheduled)
- rxtid->progress = true;
- else
- for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
- if (rxtid->hold_q[idx].skb) {
- /*
- * There is a frame in the queue and no
- * timer so start a timer to ensure that
- * the frame doesn't remain stuck
- * forever.
- */
- agg_conn->timer_scheduled = true;
- mod_timer(&agg_conn->timer,
- (jiffies +
- HZ * (AGGR_RX_TIMEOUT) / 1000));
- rxtid->progress = false;
- rxtid->timer_mon = true;
- break;
- }
+ return is_queued;
+
+ spin_lock_bh(&rxtid->lock);
+ for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+ if (rxtid->hold_q[idx].skb) {
+ /*
+ * There is a frame in the queue and no
+ * timer so start a timer to ensure that
+ * the frame doesn't remain stuck
+ * forever.
+ */
+ agg_conn->timer_scheduled = true;
+ mod_timer(&agg_conn->timer,
+ (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
+ rxtid->timer_mon = true;
+ break;
}
+ }
+ spin_unlock_bh(&rxtid->lock);
return is_queued;
}
@@ -1608,7 +1609,7 @@ static void aggr_timeout(unsigned long arg)
rxtid = &aggr_conn->rx_tid[i];
stats = &aggr_conn->stat[i];
- if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
+ if (!rxtid->aggr || !rxtid->timer_mon)
continue;
stats->num_timeouts++;
@@ -1626,14 +1627,15 @@ static void aggr_timeout(unsigned long arg)
rxtid = &aggr_conn->rx_tid[i];
if (rxtid->aggr && rxtid->hold_q) {
+ spin_lock_bh(&rxtid->lock);
for (j = 0; j < rxtid->hold_q_sz; j++) {
if (rxtid->hold_q[j].skb) {
aggr_conn->timer_scheduled = true;
rxtid->timer_mon = true;
- rxtid->progress = false;
break;
}
}
+ spin_unlock_bh(&rxtid->lock);
if (j >= rxtid->hold_q_sz)
rxtid->timer_mon = false;
@@ -1660,7 +1662,6 @@ static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
aggr_deque_frms(aggr_conn, tid, 0, 0);
rxtid->aggr = false;
- rxtid->progress = false;
rxtid->timer_mon = false;
rxtid->win_sz = 0;
rxtid->seq_next = 0;
@@ -1739,7 +1740,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
for (i = 0; i < NUM_OF_TIDS; i++) {
rxtid = &aggr_conn->rx_tid[i];
rxtid->aggr = false;
- rxtid->progress = false;
rxtid->timer_mon = false;
skb_queue_head_init(&rxtid->q);
spin_lock_init(&rxtid->lock);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index ee8ec23..c30ab4b 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -474,7 +474,7 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
return -EINVAL;
}
id = vif->last_roc_id;
- cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT,
+ cfg80211_ready_on_channel(&vif->wdev, id, chan, NL80211_CHAN_NO_HT,
dur, GFP_ATOMIC);
return 0;
@@ -513,7 +513,7 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
else
id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
vif->last_cancel_roc_id = 0;
- cfg80211_remain_on_channel_expired(vif->ndev, id, chan,
+ cfg80211_remain_on_channel_expired(&vif->wdev, id, chan,
NL80211_CHAN_NO_HT, GFP_ATOMIC);
return 0;
@@ -533,7 +533,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
id, ev->ack_status);
if (wmi->last_mgmt_tx_frame) {
- cfg80211_mgmt_tx_status(vif->ndev, id,
+ cfg80211_mgmt_tx_status(&vif->wdev, id,
wmi->last_mgmt_tx_frame,
wmi->last_mgmt_tx_frame_len,
!!ev->ack_status, GFP_ATOMIC);
@@ -568,7 +568,7 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(vif->ndev, freq, 0,
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0,
ev->data, dlen, GFP_ATOMIC);
return 0;
@@ -608,7 +608,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(vif->ndev, freq, 0,
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0,
ev->data, dlen, GFP_ATOMIC);
return 0;
@@ -743,7 +743,6 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
return -ENOMEM;
cmd = (struct roam_ctrl_cmd *) skb->data;
- memset(cmd, 0, sizeof(*cmd));
memcpy(cmd->info.bssid, bssid, ETH_ALEN);
cmd->roam_ctrl = WMI_FORCE_ROAM;
@@ -753,6 +752,22 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
NO_SYNC_WMIFLAG);
}
+int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
+{
+ struct sk_buff *skb;
+ struct set_dtim_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct set_dtim_cmd *) skb->data;
+
+ cmd->dtim_period = cpu_to_le32(dtim_period);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG);
+}
+
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
{
struct sk_buff *skb;
@@ -763,7 +778,6 @@ int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
return -ENOMEM;
cmd = (struct roam_ctrl_cmd *) skb->data;
- memset(cmd, 0, sizeof(*cmd));
cmd->info.roam_mode = mode;
cmd->roam_ctrl = WMI_SET_ROAM_MODE;
@@ -1995,7 +2009,7 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
struct wmi_probed_ssid_cmd *cmd;
int ret;
- if (index > MAX_PROBED_SSID_INDEX)
+ if (index >= MAX_PROBED_SSIDS)
return -EINVAL;
if (ssid_len > sizeof(cmd->ssid))
@@ -2599,6 +2613,115 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
spin_unlock_bh(&wmi->lock);
}
+static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct sk_buff *skb;
+ int ret, mode, band;
+ u64 mcsrate, ratemask[IEEE80211_NUM_BANDS];
+ struct wmi_set_tx_select_rates64_cmd *cmd;
+
+ memset(&ratemask, 0, sizeof(ratemask));
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ /* copy legacy rate mask */
+ ratemask[band] = mask->control[band].legacy;
+ if (band == IEEE80211_BAND_5GHZ)
+ ratemask[band] =
+ mask->control[band].legacy << 4;
+
+ /* copy mcs rate mask */
+ mcsrate = mask->control[band].mcs[1];
+ mcsrate <<= 8;
+ mcsrate |= mask->control[band].mcs[0];
+ ratemask[band] |= mcsrate << 12;
+ ratemask[band] |= mcsrate << 28;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Ratemask 64 bit: 2.4:%llx 5:%llx\n",
+ ratemask[0], ratemask[1]);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_select_rates64_cmd *) skb->data;
+ for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
+ /* A mode operate in 5GHZ band */
+ if (mode == WMI_RATES_MODE_11A ||
+ mode == WMI_RATES_MODE_11A_HT20 ||
+ mode == WMI_RATES_MODE_11A_HT40)
+ band = IEEE80211_BAND_5GHZ;
+ else
+ band = IEEE80211_BAND_2GHZ;
+ cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct sk_buff *skb;
+ int ret, mode, band;
+ u32 mcsrate, ratemask[IEEE80211_NUM_BANDS];
+ struct wmi_set_tx_select_rates32_cmd *cmd;
+
+ memset(&ratemask, 0, sizeof(ratemask));
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ /* copy legacy rate mask */
+ ratemask[band] = mask->control[band].legacy;
+ if (band == IEEE80211_BAND_5GHZ)
+ ratemask[band] =
+ mask->control[band].legacy << 4;
+
+ /* copy mcs rate mask */
+ mcsrate = mask->control[band].mcs[0];
+ ratemask[band] |= mcsrate << 12;
+ ratemask[band] |= mcsrate << 20;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Ratemask 32 bit: 2.4:%x 5:%x\n",
+ ratemask[0], ratemask[1]);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_select_rates32_cmd *) skb->data;
+ for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
+ /* A mode operate in 5GHZ band */
+ if (mode == WMI_RATES_MODE_11A ||
+ mode == WMI_RATES_MODE_11A_HT20 ||
+ mode == WMI_RATES_MODE_11A_HT40)
+ band = IEEE80211_BAND_5GHZ;
+ else
+ band = IEEE80211_BAND_2GHZ;
+ cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES)
+ return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
+ else
+ return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
+}
+
int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_host_mode host_mode)
{
@@ -2997,6 +3120,25 @@ int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
return ret;
}
+int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enhance)
+{
+ struct sk_buff *skb;
+ struct wmi_sta_bmiss_enhance_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_bmiss_enhance_cmd *) skb->data;
+ cmd->enable = enhance ? 1 : 0;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_STA_BMISS_ENHANCE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
s32 ath6kl_wmi_get_rate(s8 rate_index)
{
if (rate_index == RATE_AUTO)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 9076bec..43339ac 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -624,6 +624,10 @@ enum wmi_cmd_id {
WMI_SEND_MGMT_CMDID,
WMI_BEGIN_SCAN_CMDID,
+ WMI_SET_BLACK_LIST,
+ WMI_SET_MCASTRATE,
+
+ WMI_STA_BMISS_ENHANCE_CMDID,
};
enum wmi_mgmt_frame_type {
@@ -960,6 +964,9 @@ enum wmi_bss_filter {
/* beacons matching probed ssid */
PROBED_SSID_FILTER,
+ /* beacons matching matched ssid */
+ MATCHED_SSID_FILTER,
+
/* marker only */
LAST_BSS_FILTER,
};
@@ -978,7 +985,7 @@ struct wmi_bss_filter_cmd {
} __packed;
/* WMI_SET_PROBED_SSID_CMDID */
-#define MAX_PROBED_SSID_INDEX 9
+#define MAX_PROBED_SSIDS 16
enum wmi_ssid_flag {
/* disables entry */
@@ -989,10 +996,13 @@ enum wmi_ssid_flag {
/* probes for any ssid */
ANY_SSID_FLAG = 0x02,
+
+ /* match for ssid */
+ MATCH_SSID_FLAG = 0x08,
};
struct wmi_probed_ssid_cmd {
- /* 0 to MAX_PROBED_SSID_INDEX */
+ /* 0 to MAX_PROBED_SSIDS - 1 */
u8 entry_index;
/* see, enum wmi_ssid_flg */
@@ -1017,6 +1027,11 @@ struct wmi_bmiss_time_cmd {
__le16 num_beacons;
};
+/* WMI_STA_ENHANCE_BMISS_CMDID */
+struct wmi_sta_bmiss_enhance_cmd {
+ u8 enable;
+} __packed;
+
/* WMI_SET_POWER_MODE_CMDID */
enum wmi_power_mode {
REC_POWER = 0x01,
@@ -1048,6 +1063,36 @@ struct wmi_power_params_cmd {
__le16 ps_fail_event_policy;
} __packed;
+/*
+ * Ratemask for below modes should be passed
+ * to WMI_SET_TX_SELECT_RATES_CMDID.
+ * AR6003 has 32 bit mask for each modes.
+ * First 12 bits for legacy rates, 13 to 20
+ * bits for HT 20 rates and 21 to 28 bits for
+ * HT 40 rates
+ */
+enum wmi_mode_phy {
+ WMI_RATES_MODE_11A = 0,
+ WMI_RATES_MODE_11G,
+ WMI_RATES_MODE_11B,
+ WMI_RATES_MODE_11GONLY,
+ WMI_RATES_MODE_11A_HT20,
+ WMI_RATES_MODE_11G_HT20,
+ WMI_RATES_MODE_11A_HT40,
+ WMI_RATES_MODE_11G_HT40,
+ WMI_RATES_MODE_MAX
+};
+
+/* WMI_SET_TX_SELECT_RATES_CMDID */
+struct wmi_set_tx_select_rates32_cmd {
+ __le32 ratemask[WMI_RATES_MODE_MAX];
+} __packed;
+
+/* WMI_SET_TX_SELECT_RATES_CMDID */
+struct wmi_set_tx_select_rates64_cmd {
+ __le64 ratemask[WMI_RATES_MODE_MAX];
+} __packed;
+
/* WMI_SET_DISC_TIMEOUT_CMDID */
struct wmi_disc_timeout_cmd {
/* seconds */
@@ -1572,6 +1617,10 @@ struct roam_ctrl_cmd {
u8 roam_ctrl;
} __packed;
+struct set_dtim_cmd {
+ __le32 dtim_period;
+} __packed;
+
/* BSS INFO HDR version 2.0 */
struct wmi_bss_info_hdr2 {
__le16 ch; /* frequency in MHz */
@@ -2532,6 +2581,8 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
__be32 ips0, __be32 ips1);
int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_host_mode host_mode);
+int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask);
int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_wow_mode wow_mode,
u32 filter, u16 host_req_delay);
@@ -2542,11 +2593,14 @@ int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
u16 list_id, u16 filter_id);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
u8 *filter, bool add_filter);
+int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable);
+
/* AP mode uAPSD */
int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index e507e78..c7aa664 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -64,7 +64,7 @@ config ATH9K_DEBUGFS
config ATH9K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
- depends on ATH9K && EXPERT
+ depends on ATH9K && CFG80211_CERTIFICATION_ONUS
default n
---help---
This option enables DFS support for initiating radiation on
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 3f0b8472..2ad8f94 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -3,7 +3,9 @@ ath9k-y += beacon.o \
init.o \
main.o \
recv.o \
- xmit.o
+ xmit.o \
+ link.o \
+ antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
@@ -15,6 +17,7 @@ ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
dfs.o \
dfs_pattern_detector.o \
dfs_pri_detector.o
+ath9k-$(CONFIG_PM_SLEEP) += wow.o
obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 5e47ca6..3a69804 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -35,6 +35,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
.name = "ar934x_wmac",
.driver_data = AR9300_DEVID_AR9340,
},
+ {
+ .name = "qca955x_wmac",
+ .driver_data = AR9300_DEVID_QCA955X,
+ },
{},
};
@@ -126,7 +130,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->irq = irq;
/* Will be cleared in ath9k_start() */
- sc->sc_flags |= SC_OP_INVALID;
+ set_bit(SC_OP_INVALID, &sc->sc_flags);
ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index b4c77f9..ff007f5 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -104,11 +104,6 @@ static const struct ani_cck_level_entry cck_level_table[] = {
#define ATH9K_ANI_CCK_DEF_LEVEL \
2 /* default level - matches the INI settings */
-static bool use_new_ani(struct ath_hw *ah)
-{
- return AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani;
-}
-
static void ath9k_hw_update_mibstats(struct ath_hw *ah,
struct ath9k_mib_stats *stats)
{
@@ -122,8 +117,6 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
static void ath9k_ani_restart(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 ofdm_base = 0, cck_base = 0;
if (!DO_ANI(ah))
return;
@@ -131,18 +124,10 @@ static void ath9k_ani_restart(struct ath_hw *ah)
aniState = &ah->curchan->ani;
aniState->listenTime = 0;
- if (!use_new_ani(ah)) {
- ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
- cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
- }
-
- ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n",
- ofdm_base, cck_base);
-
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
- REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
+ REG_WRITE(ah, AR_PHY_ERR_1, 0);
+ REG_WRITE(ah, AR_PHY_ERR_2, 0);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
@@ -154,129 +139,23 @@ static void ath9k_ani_restart(struct ath_hw *ah)
aniState->cckPhyErrCount = 0;
}
-static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
- struct ar5416AniState *aniState;
- int32_t rssi;
-
- aniState = &ah->curchan->ani;
-
- if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel + 1)) {
- return;
- }
- }
-
- if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
- aniState->spurImmunityLevel + 1)) {
- return;
- }
- }
-
- if (ah->opmode == NL80211_IFTYPE_AP) {
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- }
- return;
- }
- rssi = BEACON_RSSI(ah);
- if (rssi > aniState->rssiThrHigh) {
- if (!aniState->ofdmWeakSigDetectOff) {
- if (ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- false)) {
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
- return;
- }
- }
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- return;
- }
- } else if (rssi > aniState->rssiThrLow) {
- if (aniState->ofdmWeakSigDetectOff)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- true);
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- return;
- } else {
- if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
- !conf_is_ht(conf)) {
- if (!aniState->ofdmWeakSigDetectOff)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- false);
- if (aniState->firstepLevel > 0)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_FIRSTEP_LEVEL, 0);
- return;
- }
- }
-}
-
-static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
- struct ar5416AniState *aniState;
- int32_t rssi;
-
- aniState = &ah->curchan->ani;
- if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel + 1)) {
- return;
- }
- }
- if (ah->opmode == NL80211_IFTYPE_AP) {
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- }
- return;
- }
- rssi = BEACON_RSSI(ah);
- if (rssi > aniState->rssiThrLow) {
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- } else {
- if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
- !conf_is_ht(conf)) {
- if (aniState->firstepLevel > 0)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_FIRSTEP_LEVEL, 0);
- }
- }
-}
-
/* Adjust the OFDM Noise Immunity Level */
-static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
+static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
+ bool scan)
{
struct ar5416AniState *aniState = &ah->curchan->ani;
struct ath_common *common = ath9k_hw_common(ah);
const struct ani_ofdm_level_entry *entry_ofdm;
const struct ani_cck_level_entry *entry_cck;
-
- aniState->noiseFloor = BEACON_RSSI(ah);
+ bool weak_sig;
ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->ofdmNoiseImmunityLevel,
- immunityLevel, aniState->noiseFloor,
+ immunityLevel, BEACON_RSSI(ah),
aniState->rssiThrLow, aniState->rssiThrHigh);
- if (aniState->update_ani)
- aniState->ofdmNoiseImmunityLevel =
- (immunityLevel > ATH9K_ANI_OFDM_DEF_LEVEL) ?
- immunityLevel : ATH9K_ANI_OFDM_DEF_LEVEL;
+ if (!scan)
+ aniState->ofdmNoiseImmunityLevel = immunityLevel;
entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -292,12 +171,22 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
ATH9K_ANI_FIRSTEP_LEVEL,
entry_ofdm->fir_step_level);
- if ((aniState->noiseFloor >= aniState->rssiThrHigh) &&
- (!aniState->ofdmWeakSigDetectOff !=
- entry_ofdm->ofdm_weak_signal_on)) {
+ weak_sig = entry_ofdm->ofdm_weak_signal_on;
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ BEACON_RSSI(ah) <= aniState->rssiThrHigh)
+ weak_sig = true;
+
+ if (aniState->ofdmWeakSigDetect != weak_sig)
ath9k_hw_ani_control(ah,
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
entry_ofdm->ofdm_weak_signal_on);
+
+ if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI;
+ } else {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
}
}
@@ -308,43 +197,35 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
if (!DO_ANI(ah))
return;
- if (!use_new_ani(ah)) {
- ath9k_hw_ani_ofdm_err_trigger_old(ah);
- return;
- }
-
aniState = &ah->curchan->ani;
if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
- ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1);
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
}
/*
* Set the ANI settings to match an CCK level.
*/
-static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
+static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
+ bool scan)
{
struct ar5416AniState *aniState = &ah->curchan->ani;
struct ath_common *common = ath9k_hw_common(ah);
const struct ani_ofdm_level_entry *entry_ofdm;
const struct ani_cck_level_entry *entry_cck;
- aniState->noiseFloor = BEACON_RSSI(ah);
ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->cckNoiseImmunityLevel, immunityLevel,
- aniState->noiseFloor, aniState->rssiThrLow,
+ BEACON_RSSI(ah), aniState->rssiThrLow,
aniState->rssiThrHigh);
- if ((ah->opmode == NL80211_IFTYPE_STATION ||
- ah->opmode == NL80211_IFTYPE_ADHOC) &&
- aniState->noiseFloor <= aniState->rssiThrLow &&
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ BEACON_RSSI(ah) <= aniState->rssiThrLow &&
immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
- if (aniState->update_ani)
- aniState->cckNoiseImmunityLevel =
- (immunityLevel > ATH9K_ANI_CCK_DEF_LEVEL) ?
- immunityLevel : ATH9K_ANI_CCK_DEF_LEVEL;
+ if (!scan)
+ aniState->cckNoiseImmunityLevel = immunityLevel;
entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -359,7 +240,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
return;
- if (aniState->mrcCCKOff == entry_cck->mrc_cck_on)
+ if (aniState->mrcCCK != entry_cck->mrc_cck_on)
ath9k_hw_ani_control(ah,
ATH9K_ANI_MRC_CCK,
entry_cck->mrc_cck_on);
@@ -372,68 +253,11 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
if (!DO_ANI(ah))
return;
- if (!use_new_ani(ah)) {
- ath9k_hw_ani_cck_err_trigger_old(ah);
- return;
- }
-
aniState = &ah->curchan->ani;
if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
- ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1);
-}
-
-static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
-{
- struct ar5416AniState *aniState;
- int32_t rssi;
-
- aniState = &ah->curchan->ani;
-
- if (ah->opmode == NL80211_IFTYPE_AP) {
- if (aniState->firstepLevel > 0) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1))
- return;
- }
- } else {
- rssi = BEACON_RSSI(ah);
- if (rssi > aniState->rssiThrHigh) {
- /* XXX: Handle me */
- } else if (rssi > aniState->rssiThrLow) {
- if (aniState->ofdmWeakSigDetectOff) {
- if (ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- true))
- return;
- }
- if (aniState->firstepLevel > 0) {
- if (ath9k_hw_ani_control(ah,
- ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1))
- return;
- }
- } else {
- if (aniState->firstepLevel > 0) {
- if (ath9k_hw_ani_control(ah,
- ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1))
- return;
- }
- }
- }
-
- if (aniState->spurImmunityLevel > 0) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
- aniState->spurImmunityLevel - 1))
- return;
- }
-
- if (aniState->noiseImmunityLevel > 0) {
- ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel - 1);
- return;
- }
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
+ false);
}
/*
@@ -446,87 +270,18 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
aniState = &ah->curchan->ani;
- if (!use_new_ani(ah)) {
- ath9k_hw_ani_lower_immunity_old(ah);
- return;
- }
-
/* lower OFDM noise immunity */
if (aniState->ofdmNoiseImmunityLevel > 0 &&
(aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
- ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1);
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1,
+ false);
return;
}
/* lower CCK noise immunity */
if (aniState->cckNoiseImmunityLevel > 0)
- ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1);
-}
-
-static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
-{
- struct ar5416AniState *aniState;
- struct ath9k_channel *chan = ah->curchan;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!DO_ANI(ah))
- return;
-
- aniState = &ah->curchan->ani;
-
- if (ah->opmode != NL80211_IFTYPE_STATION
- && ah->opmode != NL80211_IFTYPE_ADHOC) {
- ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode);
- ah->stats.ast_ani_reset++;
-
- if (ah->opmode == NL80211_IFTYPE_AP) {
- /*
- * ath9k_hw_ani_control() will only process items set on
- * ah->ani_function
- */
- if (IS_CHAN_2GHZ(chan))
- ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
- ATH9K_ANI_FIRSTEP_LEVEL);
- else
- ah->ani_function = 0;
- }
-
- ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
- ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
- ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- !ATH9K_ANI_USE_OFDM_WEAK_SIG);
- ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
- ATH9K_ANI_CCK_WEAK_SIG_THR);
-
- ath9k_ani_restart(ah);
- return;
- }
-
- if (aniState->noiseImmunityLevel != 0)
- ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel);
- if (aniState->spurImmunityLevel != 0)
- ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
- aniState->spurImmunityLevel);
- if (aniState->ofdmWeakSigDetectOff)
- ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- !aniState->ofdmWeakSigDetectOff);
- if (aniState->cckWeakSigThreshold)
- ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
- aniState->cckWeakSigThreshold);
- if (aniState->firstepLevel != 0)
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel);
-
- ath9k_ani_restart(ah);
-
- ENABLE_REGWRITE_BUFFER(ah);
-
- REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
- REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
-
- REGWRITE_BUFFER_FLUSH(ah);
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1,
+ false);
}
/*
@@ -539,13 +294,11 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
struct ar5416AniState *aniState = &ah->curchan->ani;
struct ath9k_channel *chan = ah->curchan;
struct ath_common *common = ath9k_hw_common(ah);
+ int ofdm_nil, cck_nil;
if (!DO_ANI(ah))
return;
- if (!use_new_ani(ah))
- return ath9k_ani_reset_old(ah, is_scanning);
-
BUG_ON(aniState == NULL);
ah->stats.ast_ani_reset++;
@@ -563,6 +316,11 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
/* always allow mode (on/off) to be controlled */
ah->ani_function |= ATH9K_ANI_MODE;
+ ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
+ aniState->ofdmNoiseImmunityLevel);
+ cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
+ aniState->cckNoiseImmunityLevel);
+
if (is_scanning ||
(ah->opmode != NL80211_IFTYPE_STATION &&
ah->opmode != NL80211_IFTYPE_ADHOC)) {
@@ -585,9 +343,8 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
- aniState->update_ani = false;
- ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
- ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
+ ofdm_nil = ATH9K_ANI_OFDM_DEF_LEVEL;
+ cck_nil = ATH9K_ANI_CCK_DEF_LEVEL;
}
} else {
/*
@@ -601,13 +358,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
-
- aniState->update_ani = true;
- ath9k_hw_set_ofdm_nil(ah,
- aniState->ofdmNoiseImmunityLevel);
- ath9k_hw_set_cck_nil(ah,
- aniState->cckNoiseImmunityLevel);
}
+ ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
+ ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
/*
* enable phy counters if hw supports or if not, enable phy
@@ -627,9 +380,6 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ar5416AniState *aniState = &ah->curchan->ani;
- u32 ofdm_base = 0;
- u32 cck_base = 0;
- u32 ofdmPhyErrCnt, cckPhyErrCnt;
u32 phyCnt1, phyCnt2;
int32_t listenTime;
@@ -642,11 +392,6 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
return false;
}
- if (!use_new_ani(ah)) {
- ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
- cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
- }
-
aniState->listenTime += listenTime;
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -654,35 +399,12 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
- if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
- if (phyCnt1 < ofdm_base) {
- ath_dbg(common, ANI,
- "phyCnt1 0x%x, resetting counter value to 0x%x\n",
- phyCnt1, ofdm_base);
- REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
- REG_WRITE(ah, AR_PHY_ERR_MASK_1,
- AR_PHY_ERR_OFDM_TIMING);
- }
- if (phyCnt2 < cck_base) {
- ath_dbg(common, ANI,
- "phyCnt2 0x%x, resetting counter value to 0x%x\n",
- phyCnt2, cck_base);
- REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
- REG_WRITE(ah, AR_PHY_ERR_MASK_2,
- AR_PHY_ERR_CCK_TIMING);
- }
- return false;
- }
+ ah->stats.ast_ani_ofdmerrs += phyCnt1 - aniState->ofdmPhyErrCount;
+ aniState->ofdmPhyErrCount = phyCnt1;
- ofdmPhyErrCnt = phyCnt1 - ofdm_base;
- ah->stats.ast_ani_ofdmerrs +=
- ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
- aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
+ ah->stats.ast_ani_cckerrs += phyCnt2 - aniState->cckPhyErrCount;
+ aniState->cckPhyErrCount = phyCnt2;
- cckPhyErrCnt = phyCnt2 - cck_base;
- ah->stats.ast_ani_cckerrs +=
- cckPhyErrCnt - aniState->cckPhyErrCount;
- aniState->cckPhyErrCount = cckPhyErrCnt;
return true;
}
@@ -716,21 +438,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
if (aniState->listenTime > ah->aniperiod) {
if (cckPhyErrRate < ah->config.cck_trig_low &&
- ((ofdmPhyErrRate < ah->config.ofdm_trig_low &&
- aniState->ofdmNoiseImmunityLevel <
- ATH9K_ANI_OFDM_DEF_LEVEL) ||
- (ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI &&
- aniState->ofdmNoiseImmunityLevel >=
- ATH9K_ANI_OFDM_DEF_LEVEL))) {
+ ofdmPhyErrRate < ah->config.ofdm_trig_low) {
ath9k_hw_ani_lower_immunity(ah);
aniState->ofdmsTurn = !aniState->ofdmsTurn;
- } else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high &&
- aniState->ofdmNoiseImmunityLevel >=
- ATH9K_ANI_OFDM_DEF_LEVEL) ||
- (ofdmPhyErrRate >
- ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI &&
- aniState->ofdmNoiseImmunityLevel <
- ATH9K_ANI_OFDM_DEF_LEVEL)) {
+ } else if (ofdmPhyErrRate > ah->config.ofdm_trig_high) {
ath9k_hw_ani_ofdm_err_trigger(ah);
aniState->ofdmsTurn = false;
} else if (cckPhyErrRate > ah->config.cck_trig_high) {
@@ -778,49 +489,6 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
-/*
- * Process a MIB interrupt. We may potentially be invoked because
- * any of the MIB counters overflow/trigger so don't assume we're
- * here because a PHY error counter triggered.
- */
-void ath9k_hw_proc_mib_event(struct ath_hw *ah)
-{
- u32 phyCnt1, phyCnt2;
-
- /* Reset these counters regardless */
- REG_WRITE(ah, AR_FILT_OFDM, 0);
- REG_WRITE(ah, AR_FILT_CCK, 0);
- if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
- REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
-
- /* Clear the mib counters and save them in the stats */
- ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
-
- if (!DO_ANI(ah)) {
- /*
- * We must always clear the interrupt cause by
- * resetting the phy error regs.
- */
- REG_WRITE(ah, AR_PHY_ERR_1, 0);
- REG_WRITE(ah, AR_PHY_ERR_2, 0);
- return;
- }
-
- /* NB: these are not reset-on-read */
- phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
- phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
- if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
- ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
-
- if (!use_new_ani(ah))
- ath9k_hw_ani_read_counters(ah);
-
- /* NB: always restart to insure the h/w counters are reset */
- ath9k_ani_restart(ah);
- }
-}
-EXPORT_SYMBOL(ath9k_hw_proc_mib_event);
-
void ath9k_hw_ani_setup(struct ath_hw *ah)
{
int i;
@@ -845,66 +513,37 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath_dbg(common, ANI, "Initialize ANI\n");
- if (use_new_ani(ah)) {
- ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
- ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
- ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_NEW;
- ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_NEW;
- } else {
- ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
- ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
-
- ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
- ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
- }
+ ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
+ ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
for (i = 0; i < ARRAY_SIZE(ah->channels); i++) {
struct ath9k_channel *chan = &ah->channels[i];
struct ar5416AniState *ani = &chan->ani;
- if (use_new_ani(ah)) {
- ani->spurImmunityLevel =
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+ ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
- ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+ ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
- if (AR_SREV_9300_20_OR_LATER(ah))
- ani->mrcCCKOff =
- !ATH9K_ANI_ENABLE_MRC_CCK;
- else
- ani->mrcCCKOff = true;
-
- ani->ofdmsTurn = true;
- } else {
- ani->spurImmunityLevel =
- ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
- ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
-
- ani->cckWeakSigThreshold =
- ATH9K_ANI_CCK_WEAK_SIG_THR;
- }
+ ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
+
+ ani->ofdmsTurn = true;
ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
- ani->ofdmWeakSigDetectOff =
- !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
- ani->update_ani = false;
}
/*
* since we expect some ongoing maintenance on the tables, let's sanity
* check here default level should not modify INI setting.
*/
- if (use_new_ani(ah)) {
- ah->aniperiod = ATH9K_ANI_PERIOD_NEW;
- ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
- } else {
- ah->aniperiod = ATH9K_ANI_PERIOD_OLD;
- ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
- }
+ ah->aniperiod = ATH9K_ANI_PERIOD;
+ ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL;
if (ah->config.enable_ani)
ah->proc_phyerr |= HAL_PROCESS_ANI;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 72e2b87..1485bf5 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -24,42 +24,34 @@
#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
/* units are errors per second */
-#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
-#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 3500
+#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
/* units are errors per second */
-#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
-#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
+#define ATH9K_ANI_OFDM_TRIG_LOW 400
#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
/* units are errors per second */
-#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
-#define ATH9K_ANI_CCK_TRIG_HIGH_NEW 600
+#define ATH9K_ANI_CCK_TRIG_HIGH 600
/* units are errors per second */
-#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
-#define ATH9K_ANI_CCK_TRIG_LOW_NEW 300
+#define ATH9K_ANI_CCK_TRIG_LOW 300
#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
#define ATH9K_ANI_CCK_WEAK_SIG_THR false
-#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD 7
-#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW 3
+#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
-#define ATH9K_ANI_FIRSTEP_LVL_OLD 0
-#define ATH9K_ANI_FIRSTEP_LVL_NEW 2
+#define ATH9K_ANI_FIRSTEP_LVL 2
#define ATH9K_ANI_RSSI_THR_HIGH 40
#define ATH9K_ANI_RSSI_THR_LOW 7
-#define ATH9K_ANI_PERIOD_OLD 100
-#define ATH9K_ANI_PERIOD_NEW 300
+#define ATH9K_ANI_PERIOD 300
/* in ms */
-#define ATH9K_ANI_POLLINTERVAL_OLD 100
-#define ATH9K_ANI_POLLINTERVAL_NEW 1000
+#define ATH9K_ANI_POLLINTERVAL 1000
#define HAL_NOISE_IMMUNE_MAX 4
#define HAL_SPUR_IMMUNE_MAX 7
@@ -70,8 +62,6 @@
#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22
-#define ATH9K_ANI_ENABLE_MRC_CCK true
-
/* values here are relative to the INI */
enum ath9k_ani_cmd {
@@ -119,16 +109,14 @@ struct ar5416AniState {
u8 ofdmNoiseImmunityLevel;
u8 cckNoiseImmunityLevel;
bool ofdmsTurn;
- u8 mrcCCKOff;
+ u8 mrcCCK;
u8 spurImmunityLevel;
u8 firstepLevel;
- u8 ofdmWeakSigDetectOff;
+ u8 ofdmWeakSigDetect;
u8 cckWeakSigThreshold;
- bool update_ani;
u32 listenTime;
int32_t rssiThrLow;
int32_t rssiThrHigh;
- u32 noiseFloor;
u32 ofdmPhyErrCount;
u32 cckPhyErrCount;
int16_t pktRssi[2];
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
new file mode 100644
index 0000000..bbcfeb3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
+ int mindelta, int main_rssi_avg,
+ int alt_rssi_avg, int pkt_count)
+{
+ return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
+ (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
+ (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
+}
+
+static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
+ int curr_main_set, int curr_alt_set,
+ int alt_rssi_avg, int main_rssi_avg)
+{
+ bool result = false;
+ switch (div_group) {
+ case 0:
+ if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
+ result = true;
+ break;
+ case 1:
+ case 2:
+ if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
+ (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
+ (alt_rssi_avg >= (main_rssi_avg - 5))) ||
+ ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
+ (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
+ (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
+ (alt_rssi_avg >= 4))
+ result = true;
+ else
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf ant_conf,
+ int main_rssi_avg)
+{
+ antcomb->quick_scan_cnt = 0;
+
+ if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ antcomb->rssi_lna2 = main_rssi_avg;
+ else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
+ antcomb->rssi_lna1 = main_rssi_avg;
+
+ switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
+ case 0x10: /* LNA2 A-B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
+ break;
+ case 0x20: /* LNA1 A-B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case 0x13: /* LNA2 A+B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
+ break;
+ case 0x23: /* LNA1 A+B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg,
+ int alt_ratio)
+{
+ /* alt_good */
+ switch (antcomb->quick_scan_cnt) {
+ case 0:
+ /* set alt to main, and alt to first conf */
+ div_ant_conf->main_lna_conf = antcomb->main_conf;
+ div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
+ break;
+ case 1:
+ /* set alt to main, and alt to first conf */
+ div_ant_conf->main_lna_conf = antcomb->main_conf;
+ div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ antcomb->rssi_first = main_rssi_avg;
+ antcomb->rssi_second = alt_rssi_avg;
+
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
+ /* main is LNA1 */
+ if (ath_is_alt_ant_ratio_better(alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
+ if (ath_is_alt_ant_ratio_better(alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ } else {
+ if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
+ (alt_rssi_avg > main_rssi_avg +
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
+ (alt_rssi_avg > main_rssi_avg)) &&
+ (antcomb->total_pkt_count > 50))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ }
+ break;
+ case 2:
+ antcomb->alt_good = false;
+ antcomb->scan_not_start = false;
+ antcomb->scan = false;
+ antcomb->rssi_first = main_rssi_avg;
+ antcomb->rssi_third = alt_rssi_avg;
+
+ if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
+ antcomb->rssi_lna1 = alt_rssi_avg;
+ else if (antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ antcomb->rssi_lna2 = alt_rssi_avg;
+ else if (antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
+ antcomb->rssi_lna2 = main_rssi_avg;
+ else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
+ antcomb->rssi_lna1 = main_rssi_avg;
+ }
+
+ if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
+ ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
+ if (ath_is_alt_ant_ratio_better(alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
+ if (ath_is_alt_ant_ratio_better(alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ } else {
+ if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
+ (alt_rssi_avg > main_rssi_avg +
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
+ (alt_rssi_avg > main_rssi_avg)) &&
+ (antcomb->total_pkt_count > 50))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ }
+
+ /* set alt to the conf with maximun ratio */
+ if (antcomb->first_ratio && antcomb->second_ratio) {
+ if (antcomb->rssi_second > antcomb->rssi_third) {
+ /* first alt*/
+ if ((antcomb->first_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2*/
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf =
+ antcomb->first_quick_scan_conf;
+ } else if ((antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2)) {
+ /* Set alt LNA1 or LNA2 */
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ } else {
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf =
+ antcomb->second_quick_scan_conf;
+ }
+ } else if (antcomb->first_ratio) {
+ /* first alt */
+ if ((antcomb->first_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf =
+ antcomb->first_quick_scan_conf;
+ } else if (antcomb->second_ratio) {
+ /* second alt */
+ if ((antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf =
+ antcomb->second_quick_scan_conf;
+ } else {
+ /* main is largest */
+ if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf = antcomb->main_conf;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio)
+{
+ if (ant_conf->div_group == 0) {
+ /* Adjust the fast_div_bias based on main and alt lna conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x3b;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x3d;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x10: /* LNA2 A-B */
+ ant_conf->fast_div_bias = 0x7;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x13: /* LNA2 A+B */
+ ant_conf->fast_div_bias = 0x7;
+ break;
+ case 0x20: /* LNA1 A-B */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x0;
+ break;
+ case 0x23: /* LNA1 A+B */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x3b;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x3d;
+ break;
+ default:
+ break;
+ }
+ } else if (ant_conf->div_group == 1) {
+ /* Adjust the fast_div_bias based on main and alt_lna_conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x10: /* LNA2 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x13: /* LNA2 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x20: /* LNA1 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x23: /* LNA1 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ default:
+ break;
+ }
+ } else if (ant_conf->div_group == 2) {
+ /* Adjust the fast_div_bias based on main and alt_lna_conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x10: /* LNA2 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x13: /* LNA2 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x20: /* LNA1 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x23: /* LNA1 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
+{
+ struct ath_hw_antcomb_conf div_ant_conf;
+ struct ath_ant_comb *antcomb = &sc->ant_comb;
+ int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
+ int curr_main_set;
+ int main_rssi = rs->rs_rssi_ctl0;
+ int alt_rssi = rs->rs_rssi_ctl1;
+ int rx_ant_conf, main_ant_conf;
+ bool short_scan = false;
+
+ rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
+ ATH_ANT_RX_MASK;
+ main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
+ ATH_ANT_RX_MASK;
+
+ /* Record packet only when both main_rssi and alt_rssi is positive */
+ if (main_rssi > 0 && alt_rssi > 0) {
+ antcomb->total_pkt_count++;
+ antcomb->main_total_rssi += main_rssi;
+ antcomb->alt_total_rssi += alt_rssi;
+ if (main_ant_conf == rx_ant_conf)
+ antcomb->main_recv_cnt++;
+ else
+ antcomb->alt_recv_cnt++;
+ }
+
+ /* Short scan check */
+ if (antcomb->scan && antcomb->alt_good) {
+ if (time_after(jiffies, antcomb->scan_start_time +
+ msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
+ short_scan = true;
+ else
+ if (antcomb->total_pkt_count ==
+ ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
+ alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+ antcomb->total_pkt_count);
+ if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
+ short_scan = true;
+ }
+ }
+
+ if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
+ rs->rs_moreaggr) && !short_scan)
+ return;
+
+ if (antcomb->total_pkt_count) {
+ alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+ antcomb->total_pkt_count);
+ main_rssi_avg = (antcomb->main_total_rssi /
+ antcomb->total_pkt_count);
+ alt_rssi_avg = (antcomb->alt_total_rssi /
+ antcomb->total_pkt_count);
+ }
+
+
+ ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
+ curr_alt_set = div_ant_conf.alt_lna_conf;
+ curr_main_set = div_ant_conf.main_lna_conf;
+
+ antcomb->count++;
+
+ if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
+ if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
+ ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
+ main_rssi_avg);
+ antcomb->alt_good = true;
+ } else {
+ antcomb->alt_good = false;
+ }
+
+ antcomb->count = 0;
+ antcomb->scan = true;
+ antcomb->scan_not_start = true;
+ }
+
+ if (!antcomb->scan) {
+ if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
+ alt_ratio, curr_main_set, curr_alt_set,
+ alt_rssi_avg, main_rssi_avg)) {
+ if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
+ /* Switch main and alt LNA */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ }
+
+ goto div_comb_done;
+ } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
+ (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
+ /* Set alt to another LNA */
+ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+
+ goto div_comb_done;
+ }
+
+ if ((alt_rssi_avg < (main_rssi_avg +
+ div_ant_conf.lna1_lna2_delta)))
+ goto div_comb_done;
+ }
+
+ if (!antcomb->scan_not_start) {
+ switch (curr_alt_set) {
+ case ATH_ANT_DIV_COMB_LNA2:
+ antcomb->rssi_lna2 = alt_rssi_avg;
+ antcomb->rssi_lna1 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1:
+ antcomb->rssi_lna1 = alt_rssi_avg;
+ antcomb->rssi_lna2 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
+ antcomb->rssi_add = alt_rssi_avg;
+ antcomb->scan = true;
+ /* set to A-B */
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
+ antcomb->rssi_sub = alt_rssi_avg;
+ antcomb->scan = false;
+ if (antcomb->rssi_lna2 >
+ (antcomb->rssi_lna1 +
+ ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+ /* use LNA2 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA1 */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ }
+ } else {
+ /* use LNA1 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA2 */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (!antcomb->alt_good) {
+ antcomb->scan_not_start = false;
+ /* Set alt to another LNA */
+ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ }
+ goto div_comb_done;
+ }
+ }
+
+ ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
+ main_rssi_avg, alt_rssi_avg,
+ alt_ratio);
+
+ antcomb->quick_scan_cnt++;
+
+div_comb_done:
+ ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
+ ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
+
+ antcomb->scan_start_time = jiffies;
+ antcomb->total_pkt_count = 0;
+ antcomb->main_total_rssi = 0;
+ antcomb->alt_total_rssi = 0;
+ antcomb->main_recv_cnt = 0;
+ antcomb->alt_recv_cnt = 0;
+}
+
+void ath_ant_comb_update(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_hw_antcomb_conf div_ant_conf;
+ u8 lna_conf;
+
+ ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+
+ if (sc->ant_rx == 1)
+ lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ lna_conf = ATH_ANT_DIV_COMB_LNA2;
+
+ div_ant_conf.main_lna_conf = lna_conf;
+ div_ant_conf.alt_lna_conf = lna_conf;
+
+ ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index c7492c6..874186b 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -995,141 +995,6 @@ static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
return pll;
}
-static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
- enum ath9k_ani_cmd cmd,
- int param)
-{
- struct ar5416AniState *aniState = &ah->curchan->ani;
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (cmd & ah->ani_function) {
- case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
- u32 level = param;
-
- if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(ah->totalSizeDesired));
- return false;
- }
-
- REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
- AR_PHY_DESIRED_SZ_TOT_DES,
- ah->totalSizeDesired[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_LOW,
- ah->coarse_low[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_HIGH,
- ah->coarse_high[level]);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRPWR,
- ah->firpwr[level]);
-
- if (level > aniState->noiseImmunityLevel)
- ah->stats.ast_ani_niup++;
- else if (level < aniState->noiseImmunityLevel)
- ah->stats.ast_ani_nidown++;
- aniState->noiseImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- u32 on = param ? 1 : 0;
-
- if (on)
- REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- else
- REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
-
- if (!on != aniState->ofdmWeakSigDetectOff) {
- if (on)
- ah->stats.ast_ani_ofdmon++;
- else
- ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
- }
- break;
- }
- case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- static const int weakSigThrCck[] = { 8, 6 };
- u32 high = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
- weakSigThrCck[high]);
- if (high != aniState->cckWeakSigThreshold) {
- if (high)
- ah->stats.ast_ani_cckhigh++;
- else
- ah->stats.ast_ani_ccklow++;
- aniState->cckWeakSigThreshold = high;
- }
- break;
- }
- case ATH9K_ANI_FIRSTEP_LEVEL:{
- static const int firstep[] = { 0, 4, 8 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(firstep)) {
- ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(firstep));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRSTEP,
- firstep[level]);
- if (level > aniState->firstepLevel)
- ah->stats.ast_ani_stepup++;
- else if (level < aniState->firstepLevel)
- ah->stats.ast_ani_stepdown++;
- aniState->firstepLevel = level;
- break;
- }
- case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(cycpwrThr1));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_TIMING5,
- AR_PHY_TIMING5_CYCPWR_THR1,
- cycpwrThr1[level]);
- if (level > aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurup++;
- else if (level < aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurdown++;
- aniState->spurImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_PRESENT:
- break;
- default:
- ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
- return false;
- }
-
- ath_dbg(common, ANI, "ANI parameters:\n");
- ath_dbg(common, ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_dbg(common, ANI,
- "cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
- aniState->firstepLevel,
- aniState->listenTime);
- ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
-
- return true;
-}
-
static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
enum ath9k_ani_cmd cmd,
int param)
@@ -1206,18 +1071,18 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on != aniState->ofdmWeakSigDetect) {
ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
- !aniState->ofdmWeakSigDetectOff ?
+ aniState->ofdmWeakSigDetect ?
"on" : "off",
on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
+ aniState->ofdmWeakSigDetect = on;
}
break;
}
@@ -1236,7 +1101,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
* from INI file & cap value
*/
value = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstep;
if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -1251,7 +1116,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
* from INI file & cap value
*/
value2 = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstepLow;
if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -1267,7 +1132,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
chan->channel,
aniState->firstepLevel,
level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
+ ATH9K_ANI_FIRSTEP_LVL,
value,
aniState->iniDef.firstep);
ath_dbg(common, ANI,
@@ -1275,7 +1140,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
chan->channel,
aniState->firstepLevel,
level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
+ ATH9K_ANI_FIRSTEP_LVL,
value2,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
@@ -1300,7 +1165,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
* from INI file & cap value
*/
value = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1;
if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -1316,7 +1181,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
* from INI file & cap value
*/
value2 = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1Ext;
if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -1331,7 +1196,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
chan->channel,
aniState->spurImmunityLevel,
level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
value,
aniState->iniDef.cycpwrThr1);
ath_dbg(common, ANI,
@@ -1339,7 +1204,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
chan->channel,
aniState->spurImmunityLevel,
level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
value2,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
@@ -1367,9 +1232,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+ aniState->ofdmWeakSigDetect ? "on" : "off",
aniState->firstepLevel,
- !aniState->mrcCCKOff ? "on" : "off",
+ aniState->mrcCCK ? "on" : "off",
aniState->listenTime,
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
@@ -1454,10 +1319,10 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
AR_PHY_EXT_TIMING5_CYCPWR_THR1);
/* these levels just got reset to defaults by the INI */
- aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
- aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
- aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
- aniState->mrcCCKOff = true; /* not available on pre AR9003 */
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->mrcCCK = false; /* not available on pre AR9003 */
}
static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
@@ -1545,11 +1410,8 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->do_getnf = ar5008_hw_do_getnf;
priv_ops->set_radar_params = ar5008_hw_set_radar_params;
- if (modparam_force_new_ani) {
- priv_ops->ani_control = ar5008_hw_ani_control_new;
- priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
- } else
- priv_ops->ani_control = ar5008_hw_ani_control_old;
+ priv_ops->ani_control = ar5008_hw_ani_control_new;
+ priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d9a69fc..648da3e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -21,110 +21,79 @@
#include "ar9002_initvals.h"
#include "ar9002_phy.h"
-int modparam_force_new_ani;
-module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
-MODULE_PARM_DESC(force_new_ani, "Force new ANI for AR5008, AR9001, AR9002");
-
/* General hardware code for the A5008/AR9001/AR9002 hadware families */
static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
{
if (AR_SREV_9271(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
- ARRAY_SIZE(ar9271Modes_9271), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
- ARRAY_SIZE(ar9271Common_9271), 2);
- INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
- ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 5);
+ INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271);
+ INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg);
return;
}
if (ah->config.pcie_clock_req)
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
+ ar9280PciePhy_clkreq_off_L1_9280);
else
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
+ ar9280PciePhy_clkreq_always_on_L1_9280);
+#ifdef CONFIG_PM_SLEEP
+ INIT_INI_ARRAY(&ah->iniPcieSerdesWow,
+ ar9280PciePhy_awow);
+#endif
if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
- ARRAY_SIZE(ar9287Modes_9287_1_1), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
- ARRAY_SIZE(ar9287Common_9287_1_1), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
- ARRAY_SIZE(ar9285Modes_9285_1_2), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
- ARRAY_SIZE(ar9285Common_9285_1_2), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2);
} else if (AR_SREV_9280_20_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
- ARRAY_SIZE(ar9280Modes_9280_2), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
- ARRAY_SIZE(ar9280Common_9280_2), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9280Modes_fast_clock_9280_2,
- ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
+ ar9280Modes_fast_clock_9280_2);
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
- ARRAY_SIZE(ar5416Modes_9160), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
- ARRAY_SIZE(ar5416Common_9160), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160);
if (AR_SREV_9160_11(ah)) {
INIT_INI_ARRAY(&ah->iniAddac,
- ar5416Addac_9160_1_1,
- ARRAY_SIZE(ar5416Addac_9160_1_1), 2);
+ ar5416Addac_9160_1_1);
} else {
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
- ARRAY_SIZE(ar5416Addac_9160), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160);
}
} else if (AR_SREV_9100_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
- ARRAY_SIZE(ar5416Modes_9100), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
- ARRAY_SIZE(ar5416Common_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
- ARRAY_SIZE(ar5416Bank6_9100), 3);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
- ARRAY_SIZE(ar5416Addac_9100), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100);
} else {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
- ARRAY_SIZE(ar5416Modes), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
- ARRAY_SIZE(ar5416Common), 2);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
- ARRAY_SIZE(ar5416Bank6TPC), 3);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
- ARRAY_SIZE(ar5416Addac), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac);
}
if (!AR_SREV_9280_20_OR_LATER(ah)) {
/* Common for AR5416, AR913x, AR9160 */
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
- ARRAY_SIZE(ar5416BB_RfGain), 3);
-
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
- ARRAY_SIZE(ar5416Bank0), 2);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
- ARRAY_SIZE(ar5416Bank1), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
- ARRAY_SIZE(ar5416Bank2), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
- ARRAY_SIZE(ar5416Bank3), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
- ARRAY_SIZE(ar5416Bank7), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain);
+
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7);
/* Common for AR5416, AR9160 */
if (!AR_SREV_9100(ah))
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
- ARRAY_SIZE(ar5416Bank6), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6);
/* Common for AR913x, AR9160 */
if (!AR_SREV_5416(ah))
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
- ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC,
+ ar5416Bank6TPC_9100);
}
/* iniAddac needs to be modified for these chips */
@@ -147,13 +116,9 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
}
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniCckfirNormal,
- ar9287Common_normal_cck_fir_coeff_9287_1_1,
- ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_9287_1_1),
- 2);
+ ar9287Common_normal_cck_fir_coeff_9287_1_1);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- ar9287Common_japan_2484_cck_fir_coeff_9287_1_1,
- ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_9287_1_1),
- 2);
+ ar9287Common_japan_2484_cck_fir_coeff_9287_1_1);
}
}
@@ -167,20 +132,16 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_13db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 5);
+ ar9280Modes_backoff_13db_rxgain_9280_2);
else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_23db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 5);
+ ar9280Modes_backoff_23db_rxgain_9280_2);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 5);
+ ar9280Modes_original_rxgain_9280_2);
} else {
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 5);
+ ar9280Modes_original_rxgain_9280_2);
}
}
@@ -190,16 +151,13 @@ static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
AR5416_EEP_MINOR_VER_19) {
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_high_power_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 5);
+ ar9280Modes_high_power_tx_gain_9280_2);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 5);
+ ar9280Modes_original_tx_gain_9280_2);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 5);
+ ar9280Modes_original_tx_gain_9280_2);
}
}
@@ -207,12 +165,10 @@ static void ar9271_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
{
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9271Modes_high_power_tx_gain_9271,
- ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 5);
+ ar9271Modes_high_power_tx_gain_9271);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9271Modes_normal_power_tx_gain_9271,
- ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 5);
+ ar9271Modes_normal_power_tx_gain_9271);
}
static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
@@ -221,8 +177,7 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
if (AR_SREV_9287_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 5);
+ ar9287Modes_rx_gain_9287_1_1);
else if (AR_SREV_9280_20(ah))
ar9280_20_hw_init_rxgain_ini(ah);
@@ -230,8 +185,7 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
ar9271_hw_init_txgain_ini(ah, txgain_type);
} else if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 5);
+ ar9287Modes_tx_gain_9287_1_1);
} else if (AR_SREV_9280_20(ah)) {
ar9280_20_hw_init_txgain_ini(ah, txgain_type);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
@@ -239,26 +193,18 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_XE2_0_high_power,
- ARRAY_SIZE(
- ar9285Modes_XE2_0_high_power), 5);
+ ar9285Modes_XE2_0_high_power);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_high_power_tx_gain_9285_1_2,
- ARRAY_SIZE(
- ar9285Modes_high_power_tx_gain_9285_1_2), 5);
+ ar9285Modes_high_power_tx_gain_9285_1_2);
}
} else {
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_XE2_0_normal_power,
- ARRAY_SIZE(
- ar9285Modes_XE2_0_normal_power), 5);
+ ar9285Modes_XE2_0_normal_power);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_original_tx_gain_9285_1_2,
- ARRAY_SIZE(
- ar9285Modes_original_tx_gain_9285_1_2), 5);
+ ar9285Modes_original_tx_gain_9285_1_2);
}
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 4d18c66..beb6162 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -925,6 +925,20 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
{0x00004044, 0x00000000},
};
+static const u32 ar9280PciePhy_awow[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x9248fd00},
+ {0x00004040, 0x24924924},
+ {0x00004040, 0xa8000019},
+ {0x00004040, 0x13160820},
+ {0x00004040, 0xe5980560},
+ {0x00004040, 0xc01dcffd},
+ {0x00004040, 0x1aaabe41},
+ {0x00004040, 0xbe105554},
+ {0x00004040, 0x00043007},
+ {0x00004044, 0x00000000},
+};
+
static const u32 ar9285Modes_9285_1_2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 952cb2b..89bf94d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 9fdd70f..84b558d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -159,14 +159,11 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah,
}
}
- /* Do NF cal only at longer intervals */
- if (longcal) {
- /*
- * Get the value from the previous NF cal and update
- * history buffer.
- */
- ath9k_hw_getnf(ah, chan);
-
+ /*
+ * Do NF cal only at longer intervals. Get the value from
+ * the previous NF cal and update history buffer.
+ */
+ if (longcal && ath9k_hw_getnf(ah, chan)) {
/*
* Load the NF from history buffer of the current channel.
* NF is slow time-variant, so it is OK to use a historical
@@ -653,7 +650,6 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
}
static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
- u8 num_chains,
struct coeff *coeff,
bool is_reusable)
{
@@ -677,7 +673,9 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
}
/* Load the average of 2 passes */
- for (i = 0; i < num_chains; i++) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
nmeasurement = REG_READ_FIELD(ah,
AR_PHY_TX_IQCAL_STATUS_B0,
AR_PHY_CALIBRATED_GAINS_0);
@@ -767,16 +765,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
};
struct coeff coeff;
s32 iq_res[6];
- u8 num_chains = 0;
int i, im, j;
int nmeasurement;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (ah->txchainmask & (1 << i))
- num_chains++;
- }
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
- for (i = 0; i < num_chains; i++) {
nmeasurement = REG_READ_FIELD(ah,
AR_PHY_TX_IQCAL_STATUS_B0,
AR_PHY_CALIBRATED_GAINS_0);
@@ -839,8 +834,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
coeff.phs_coeff[i][im] -= 128;
}
}
- ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains,
- &coeff, is_reusable);
+ ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
return;
@@ -901,7 +895,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
- bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -970,7 +963,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
} else if (caldata && !caldata->done_txiqcal_once)
run_agc_cal = true;
- if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
+ if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
ar9003_mci_init_cal_req(ah, &is_reusable);
if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
@@ -993,7 +986,7 @@ skip_tx_iqcal:
0, AH_WAIT_TIMEOUT);
}
- if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
+ if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
ar9003_mci_init_cal_done(ah);
if (rtt && !run_rtt_cal) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ac53d90..2588848 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -131,8 +131,9 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -331,8 +332,9 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -704,8 +706,9 @@ static const struct ar9300_eeprom ar9300_x113 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -904,8 +907,9 @@ static const struct ar9300_eeprom ar9300_x113 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -1278,8 +1282,9 @@ static const struct ar9300_eeprom ar9300_h112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -1478,8 +1483,9 @@ static const struct ar9300_eeprom ar9300_h112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -1852,8 +1858,9 @@ static const struct ar9300_eeprom ar9300_x112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -2052,8 +2059,9 @@ static const struct ar9300_eeprom ar9300_x112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -2425,8 +2433,9 @@ static const struct ar9300_eeprom ar9300_h116 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80C080),
.papdRateMaskHt40 = LE32(0x0080C080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -2625,8 +2634,9 @@ static const struct ar9300_eeprom ar9300_h116 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -2971,14 +2981,6 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return (pBase->txrxMask >> 4) & 0xf;
case EEP_RX_MASK:
return pBase->txrxMask & 0xf;
- case EEP_DRIVE_STRENGTH:
-#define AR9300_EEP_BASE_DRIV_STRENGTH 0x1
- return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH;
- case EEP_INTERNAL_REGULATOR:
- /* Bit 4 is internal regulator flag */
- return (pBase->featureEnable & 0x10) >> 4;
- case EEP_SWREG:
- return le32_to_cpu(pBase->swreg);
case EEP_PAPRD:
return !!(pBase->featureEnable & BIT(5));
case EEP_CHAIN_MASK_REDUCE:
@@ -2989,8 +2991,6 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
return eep->modalHeader2G.antennaGain;
- case EEP_QUICK_DROP:
- return pBase->miscConfiguration & BIT(1);
default:
return 0;
}
@@ -3178,7 +3178,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
mdata_size, length);
return -1;
}
- memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
+ memcpy(mptr, word + COMP_HDR_LEN, length);
ath_dbg(common, EEPROM,
"restored eeprom %d: uncompressed, length %d\n",
it, length);
@@ -3199,7 +3199,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
"restore eeprom %d: block, reference %d, length %d\n",
it, reference, length);
ar9300_uncompress_block(ah, mptr, mdata_size,
- (u8 *) (word + COMP_HDR_LEN), length);
+ (word + COMP_HDR_LEN), length);
break;
default:
ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
@@ -3260,10 +3260,20 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
int it;
u16 checksum, mchecksum;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ar9300_eeprom *eep;
eeprom_read_op read;
- if (ath9k_hw_use_flash(ah))
- return ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
+ if (ath9k_hw_use_flash(ah)) {
+ u8 txrx;
+
+ ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
+
+ /* check if eeprom contains valid data */
+ eep = (struct ar9300_eeprom *) mptr;
+ txrx = eep->baseEepHeader.txrxMask;
+ if (txrx != 0 && txrx != 0xff)
+ return 0;
+ }
word = kzalloc(2048, GFP_KERNEL);
if (!word)
@@ -3412,11 +3422,11 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
- len += ar9003_dump_modal_eeprom(buf, len, size,
+ len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
len += snprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
- len += ar9003_dump_modal_eeprom(buf, len, size,
+ len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
goto out;
}
@@ -3493,23 +3503,24 @@ static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
return 0;
}
-static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
+static struct ar9300_modal_eep_header *ar9003_modal_header(struct ath_hw *ah,
+ bool is2ghz)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
if (is2ghz)
- return eep->modalHeader2G.xpaBiasLvl;
+ return &eep->modalHeader2G;
else
- return eep->modalHeader5G.xpaBiasLvl;
+ return &eep->modalHeader5G;
}
static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
{
- int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
+ int bias = ar9003_modal_header(ah, is2ghz)->xpaBiasLvl;
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
- else if (AR_SREV_9462(ah))
+ else if (AR_SREV_9462(ah) || AR_SREV_9550(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
else {
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3521,57 +3532,26 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
}
}
-static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz)
+static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le16 val;
-
- if (is_2ghz)
- val = eep->modalHeader2G.switchcomspdt;
- else
- val = eep->modalHeader5G.switchcomspdt;
- return le16_to_cpu(val);
+ return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt);
}
static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le32 val;
-
- if (is2ghz)
- val = eep->modalHeader2G.antCtrlCommon;
- else
- val = eep->modalHeader5G.antCtrlCommon;
- return le32_to_cpu(val);
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon);
}
static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le32 val;
-
- if (is2ghz)
- val = eep->modalHeader2G.antCtrlCommon2;
- else
- val = eep->modalHeader5G.antCtrlCommon2;
- return le32_to_cpu(val);
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2);
}
-static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
- int chain,
+static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le16 val = 0;
-
- if (chain >= 0 && chain < AR9300_MAX_CHAINS) {
- if (is2ghz)
- val = eep->modalHeader2G.antCtrlChain[chain];
- else
- val = eep->modalHeader5G.antCtrlChain[chain];
- }
-
+ __le16 val = ar9003_modal_header(ah, is2ghz)->antCtrlChain[chain];
return le16_to_cpu(val);
}
@@ -3591,6 +3571,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9462(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9462_ALL, value);
+ } else if (AR_SREV_9550(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
+ AR_SWITCH_TABLE_COM_AR9550_ALL, value);
} else
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_ALL, value);
@@ -3613,6 +3596,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
value = ar9003_switch_com_spdt_get(ah, is2ghz);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
AR_SWITCH_TABLE_COM_SPDT_ALL, value);
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_SPDT_ENABLE);
}
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
@@ -3677,11 +3661,12 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
int drive_strength;
unsigned long reg;
- drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH);
-
+ drive_strength = pBase->miscConfiguration & BIT(0);
if (!drive_strength)
return;
@@ -3809,13 +3794,13 @@ static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
return true;
}
-static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
{
- int internal_regulator =
- ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
u32 reg_val;
- if (internal_regulator) {
+ if (pBase->featureEnable & BIT(4)) {
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
int reg_pmu_set;
@@ -3859,11 +3844,11 @@ static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
return;
} else if (AR_SREV_9462(ah)) {
- reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ reg_val = le32_to_cpu(pBase->swreg);
REG_WRITE(ah, AR_PHY_PMU1, reg_val);
} else {
/* Internal regulator is ON. Write swreg register. */
- reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ reg_val = le32_to_cpu(pBase->swreg);
REG_WRITE(ah, AR_RTC_REG_CONTROL1,
REG_READ(ah, AR_RTC_REG_CONTROL1) &
(~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
@@ -3905,6 +3890,9 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0];
+ if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
+ return;
+
if (eep->baseEepHeader.featureEnable & 0x40) {
tuning_caps_param &= 0x7f;
REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC,
@@ -3917,10 +3905,11 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP);
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+ int quick_drop;
s32 t[3], f[3] = {5180, 5500, 5785};
- if (!quick_drop)
+ if (!(pBase->miscConfiguration & BIT(1)))
return;
if (freq < 4000)
@@ -3934,13 +3923,11 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
}
-static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
+static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u32 value;
- value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff :
- eep->modalHeader5G.txEndToXpaOff;
+ value = ar9003_modal_header(ah, is2ghz)->txEndToXpaOff;
REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
@@ -3948,19 +3935,63 @@ static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
}
+static void ar9003_hw_xpa_timing_control_apply(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 xpa_ctl;
+
+ if (!(eep->baseEepHeader.featureEnable & 0x80))
+ return;
+
+ if (!AR_SREV_9300(ah) && !AR_SREV_9340(ah) && !AR_SREV_9580(ah))
+ return;
+
+ xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn;
+ if (is2ghz)
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON, xpa_ctl);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON, xpa_ctl);
+}
+
+static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 bias;
+
+ if (!(eep->baseEepHeader.featureEnable & 0x40))
+ return;
+
+ if (!AR_SREV_9300(ah))
+ return;
+
+ bias = ar9003_modal_header(ah, is2ghz)->xlna_bias_strength;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+ bias >>= 2;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+ bias >>= 2;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+}
+
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
- ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
+ bool is2ghz = IS_CHAN_2GHZ(chan);
+ ar9003_hw_xpa_timing_control_apply(ah, is2ghz);
+ ar9003_hw_xpa_bias_level_apply(ah, is2ghz);
+ ar9003_hw_ant_ctrl_apply(ah, is2ghz);
ar9003_hw_drive_strength_apply(ah);
+ ar9003_hw_xlna_bias_strength_apply(ah, is2ghz);
ar9003_hw_atten_apply(ah, chan);
ar9003_hw_quick_drop_apply(ah, chan->channel);
- if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
+ if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9550(ah))
ar9003_hw_internal_regulator_apply(ah);
- if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
- ar9003_hw_apply_tuning_caps(ah);
- ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel);
+ ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
}
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -5096,14 +5127,9 @@ s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
}
-u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz)
+u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-
- if (is_2ghz)
- return eep->modalHeader2G.spurChans;
- else
- return eep->modalHeader5G.spurChans;
+ return ar9003_modal_header(ah, is2ghz)->spurChans;
}
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 2505ac4..3a1ff55 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -231,7 +231,8 @@ struct ar9300_modal_eep_header {
__le32 papdRateMaskHt20;
__le32 papdRateMaskHt40;
__le16 switchcomspdt;
- u8 futureModal[8];
+ u8 xlna_bias_strength;
+ u8 futureModal[7];
} __packed;
struct ar9300_cal_data_per_freq_op_loop {
@@ -334,4 +335,7 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
+
+void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index a0e3394..1e8a4da 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -21,6 +21,7 @@
#include "ar9340_initvals.h"
#include "ar9330_1p1_initvals.h"
#include "ar9330_1p2_initvals.h"
+#include "ar955x_1p0_initvals.h"
#include "ar9580_1p0_initvals.h"
#include "ar9462_2p0_initvals.h"
@@ -43,408 +44,310 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9462_2p0_baseband_core_txfir_coeff_japan_2484
if (AR_SREV_9330_11(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9331_1p1_mac_core,
- ARRAY_SIZE(ar9331_1p1_mac_core), 2);
+ ar9331_1p1_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9331_1p1_mac_postamble,
- ARRAY_SIZE(ar9331_1p1_mac_postamble), 5);
+ ar9331_1p1_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9331_1p1_baseband_core,
- ARRAY_SIZE(ar9331_1p1_baseband_core), 2);
+ ar9331_1p1_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9331_1p1_baseband_postamble,
- ARRAY_SIZE(ar9331_1p1_baseband_postamble), 5);
+ ar9331_1p1_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9331_1p1_radio_core,
- ARRAY_SIZE(ar9331_1p1_radio_core), 2);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], NULL, 0, 0);
+ ar9331_1p1_radio_core);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9331_1p1_soc_preamble,
- ARRAY_SIZE(ar9331_1p1_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9331_1p1_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9331_1p1_soc_postamble,
- ARRAY_SIZE(ar9331_1p1_soc_postamble), 2);
+ ar9331_1p1_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p1,
- ARRAY_SIZE(ar9331_common_rx_gain_1p1), 2);
+ ar9331_common_rx_gain_1p1);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p1),
- 5);
+ ar9331_modes_lowest_ob_db_tx_gain_1p1);
/* additional clock settings */
if (ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9331_1p1_xtal_25M,
- ARRAY_SIZE(ar9331_1p1_xtal_25M), 2);
+ ar9331_1p1_xtal_25M);
else
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9331_1p1_xtal_40M,
- ARRAY_SIZE(ar9331_1p1_xtal_40M), 2);
+ ar9331_1p1_xtal_40M);
} else if (AR_SREV_9330_12(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9331_1p2_mac_core,
- ARRAY_SIZE(ar9331_1p2_mac_core), 2);
+ ar9331_1p2_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9331_1p2_mac_postamble,
- ARRAY_SIZE(ar9331_1p2_mac_postamble), 5);
+ ar9331_1p2_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9331_1p2_baseband_core,
- ARRAY_SIZE(ar9331_1p2_baseband_core), 2);
+ ar9331_1p2_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9331_1p2_baseband_postamble,
- ARRAY_SIZE(ar9331_1p2_baseband_postamble), 5);
+ ar9331_1p2_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9331_1p2_radio_core,
- ARRAY_SIZE(ar9331_1p2_radio_core), 2);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], NULL, 0, 0);
+ ar9331_1p2_radio_core);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9331_1p2_soc_preamble,
- ARRAY_SIZE(ar9331_1p2_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9331_1p2_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9331_1p2_soc_postamble,
- ARRAY_SIZE(ar9331_1p2_soc_postamble), 2);
+ ar9331_1p2_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p2,
- ARRAY_SIZE(ar9331_common_rx_gain_1p2), 2);
+ ar9331_common_rx_gain_1p2);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p2),
- 5);
+ ar9331_modes_lowest_ob_db_tx_gain_1p2);
/* additional clock settings */
if (ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9331_1p2_xtal_25M,
- ARRAY_SIZE(ar9331_1p2_xtal_25M), 2);
+ ar9331_1p2_xtal_25M);
else
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9331_1p2_xtal_40M,
- ARRAY_SIZE(ar9331_1p2_xtal_40M), 2);
+ ar9331_1p2_xtal_40M);
} else if (AR_SREV_9340(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9340_1p0_mac_core,
- ARRAY_SIZE(ar9340_1p0_mac_core), 2);
+ ar9340_1p0_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9340_1p0_mac_postamble,
- ARRAY_SIZE(ar9340_1p0_mac_postamble), 5);
+ ar9340_1p0_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9340_1p0_baseband_core,
- ARRAY_SIZE(ar9340_1p0_baseband_core), 2);
+ ar9340_1p0_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9340_1p0_baseband_postamble,
- ARRAY_SIZE(ar9340_1p0_baseband_postamble), 5);
+ ar9340_1p0_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9340_1p0_radio_core,
- ARRAY_SIZE(ar9340_1p0_radio_core), 2);
+ ar9340_1p0_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9340_1p0_radio_postamble,
- ARRAY_SIZE(ar9340_1p0_radio_postamble), 5);
+ ar9340_1p0_radio_postamble);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9340_1p0_soc_preamble,
- ARRAY_SIZE(ar9340_1p0_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9340_1p0_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9340_1p0_soc_postamble,
- ARRAY_SIZE(ar9340_1p0_soc_postamble), 5);
+ ar9340_1p0_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9340Common_wo_xlna_rx_gain_table_1p0,
- ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
- 5);
+ ar9340Common_wo_xlna_rx_gain_table_1p0);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_high_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_high_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_high_ob_db_tx_gain_table_1p0);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9340Modes_fast_clock_1p0,
- ARRAY_SIZE(ar9340Modes_fast_clock_1p0),
- 3);
+ ar9340Modes_fast_clock_1p0);
if (!ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9340_1p0_radio_core_40M,
- ARRAY_SIZE(ar9340_1p0_radio_core_40M),
- 2);
+ ar9340_1p0_radio_core_40M);
} else if (AR_SREV_9485_11(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9485_1_1_mac_core,
- ARRAY_SIZE(ar9485_1_1_mac_core), 2);
+ ar9485_1_1_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9485_1_1_mac_postamble,
- ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
+ ar9485_1_1_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
- ARRAY_SIZE(ar9485_1_1), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9485_1_1_baseband_core,
- ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
+ ar9485_1_1_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9485_1_1_baseband_postamble,
- ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
+ ar9485_1_1_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9485_1_1_radio_core,
- ARRAY_SIZE(ar9485_1_1_radio_core), 2);
+ ar9485_1_1_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9485_1_1_radio_postamble,
- ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
+ ar9485_1_1_radio_postamble);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9485_1_1_soc_preamble,
- ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
+ ar9485_1_1_soc_preamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1,
- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1), 2);
+ ar9485Common_wo_xlna_rx_gain_1_1);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485_modes_lowest_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
- 5);
+ ar9485_modes_lowest_ob_db_tx_gain_1_1);
/* Load PCIE SERDES settings from INI */
/* Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9485_1_1_pcie_phy_clkreq_disable_L1,
- ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
- 2);
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
/* Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9485_1_1_pcie_phy_clkreq_disable_L1,
- ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
- 2);
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
} else if (AR_SREV_9462_20(ah)) {
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core,
- ARRAY_SIZE(ar9462_2p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9462_2p0_mac_postamble,
- ARRAY_SIZE(ar9462_2p0_mac_postamble), 5);
+ ar9462_2p0_mac_postamble);
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9462_2p0_baseband_core,
- ARRAY_SIZE(ar9462_2p0_baseband_core), 2);
+ ar9462_2p0_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9462_2p0_baseband_postamble,
- ARRAY_SIZE(ar9462_2p0_baseband_postamble), 5);
+ ar9462_2p0_baseband_postamble);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9462_2p0_radio_core,
- ARRAY_SIZE(ar9462_2p0_radio_core), 2);
+ ar9462_2p0_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9462_2p0_radio_postamble,
- ARRAY_SIZE(ar9462_2p0_radio_postamble), 5);
+ ar9462_2p0_radio_postamble);
INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
- ar9462_2p0_radio_postamble_sys2ant,
- ARRAY_SIZE(ar9462_2p0_radio_postamble_sys2ant),
- 5);
+ ar9462_2p0_radio_postamble_sys2ant);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9462_2p0_soc_preamble,
- ARRAY_SIZE(ar9462_2p0_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9462_2p0_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9462_2p0_soc_postamble,
- ARRAY_SIZE(ar9462_2p0_soc_postamble), 5);
+ ar9462_2p0_soc_postamble);
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2);
+ ar9462_common_rx_gain_table_2p0);
/* Awake -> Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- PCIE_PLL_ON_CREQ_DIS_L1_2P0,
- ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
- 2);
+ PCIE_PLL_ON_CREQ_DIS_L1_2P0);
/* Sleep -> Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- PCIE_PLL_ON_CREQ_DIS_L1_2P0,
- ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
- 2);
+ PCIE_PLL_ON_CREQ_DIS_L1_2P0);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9462_modes_fast_clock_2p0,
- ARRAY_SIZE(ar9462_modes_fast_clock_2p0), 3);
+ ar9462_modes_fast_clock_2p0);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- AR9462_BB_CTX_COEFJ(2p0),
- ARRAY_SIZE(AR9462_BB_CTX_COEFJ(2p0)), 2);
+ AR9462_BB_CTX_COEFJ(2p0));
- INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ,
- ARRAY_SIZE(AR9462_BBC_TXIFR_COEFFJ), 2);
+ INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ);
+ } else if (AR_SREV_9550(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar955x_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar955x_1p0_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar955x_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar955x_1p0_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar955x_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar955x_1p0_radio_postamble);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar955x_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar955x_1p0_soc_postamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_xpa_tx_gain_table);
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar955x_1p0_modes_fast_clock);
} else if (AR_SREV_9580(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9580_1p0_mac_core,
- ARRAY_SIZE(ar9580_1p0_mac_core), 2);
+ ar9580_1p0_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9580_1p0_mac_postamble,
- ARRAY_SIZE(ar9580_1p0_mac_postamble), 5);
+ ar9580_1p0_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9580_1p0_baseband_core,
- ARRAY_SIZE(ar9580_1p0_baseband_core), 2);
+ ar9580_1p0_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9580_1p0_baseband_postamble,
- ARRAY_SIZE(ar9580_1p0_baseband_postamble), 5);
+ ar9580_1p0_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9580_1p0_radio_core,
- ARRAY_SIZE(ar9580_1p0_radio_core), 2);
+ ar9580_1p0_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9580_1p0_radio_postamble,
- ARRAY_SIZE(ar9580_1p0_radio_postamble), 5);
+ ar9580_1p0_radio_postamble);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9580_1p0_soc_preamble,
- ARRAY_SIZE(ar9580_1p0_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9580_1p0_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9580_1p0_soc_postamble,
- ARRAY_SIZE(ar9580_1p0_soc_postamble), 5);
+ ar9580_1p0_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9580_1p0_rx_gain_table,
- ARRAY_SIZE(ar9580_1p0_rx_gain_table), 2);
+ ar9580_1p0_rx_gain_table);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_low_ob_db_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_low_ob_db_tx_gain_table),
- 5);
+ ar9580_1p0_low_ob_db_tx_gain_table);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9580_1p0_modes_fast_clock,
- ARRAY_SIZE(ar9580_1p0_modes_fast_clock),
- 3);
+ ar9580_1p0_modes_fast_clock);
} else {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9300_2p2_mac_core,
- ARRAY_SIZE(ar9300_2p2_mac_core), 2);
+ ar9300_2p2_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9300_2p2_mac_postamble,
- ARRAY_SIZE(ar9300_2p2_mac_postamble), 5);
+ ar9300_2p2_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9300_2p2_baseband_core,
- ARRAY_SIZE(ar9300_2p2_baseband_core), 2);
+ ar9300_2p2_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9300_2p2_baseband_postamble,
- ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5);
+ ar9300_2p2_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9300_2p2_radio_core,
- ARRAY_SIZE(ar9300_2p2_radio_core), 2);
+ ar9300_2p2_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9300_2p2_radio_postamble,
- ARRAY_SIZE(ar9300_2p2_radio_postamble), 5);
+ ar9300_2p2_radio_postamble);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9300_2p2_soc_preamble,
- ARRAY_SIZE(ar9300_2p2_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9300_2p2_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9300_2p2_soc_postamble,
- ARRAY_SIZE(ar9300_2p2_soc_postamble), 5);
+ ar9300_2p2_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2);
+ ar9300Common_rx_gain_table_2p2);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
- 5);
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
/* Load PCIE SERDES settings from INI */
/* Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
- ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
- 2);
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2);
/* Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
- ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
- 2);
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9300Modes_fast_clock_2p2,
- ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
- 3);
+ ar9300Modes_fast_clock_2p2);
}
}
@@ -452,146 +355,110 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p2),
- 5);
+ ar9331_modes_lowest_ob_db_tx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p1),
- 5);
+ ar9331_modes_lowest_ob_db_tx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_lowest_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485_modes_lowest_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
- 5);
+ ar9485_modes_lowest_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9550(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_xpa_tx_gain_table);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_lowest_ob_db_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_lowest_ob_db_tx_gain_table),
- 5);
+ ar9580_1p0_lowest_ob_db_tx_gain_table);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_low_ob_db_tx_gain_table_2p0,
- ARRAY_SIZE(ar9462_modes_low_ob_db_tx_gain_table_2p0),
- 5);
+ ar9462_modes_low_ob_db_tx_gain_table_2p0);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
- 5);
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
}
static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p2),
- 5);
+ ar9331_modes_high_ob_db_tx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p1),
- 5);
+ ar9331_modes_high_ob_db_tx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_high_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_high_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
- 5);
+ ar9485Modes_high_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_high_ob_db_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_high_ob_db_tx_gain_table),
- 5);
+ ar9580_1p0_high_ob_db_tx_gain_table);
+ else if (AR_SREV_9550(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_no_xpa_tx_gain_table);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_high_ob_db_tx_gain_table_2p0,
- ARRAY_SIZE(ar9462_modes_high_ob_db_tx_gain_table_2p0),
- 5);
+ ar9462_modes_high_ob_db_tx_gain_table_2p0);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
- 5);
+ ar9300Modes_high_ob_db_tx_gain_table_2p2);
}
static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_low_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p2),
- 5);
+ ar9331_modes_low_ob_db_tx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_low_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p1),
- 5);
+ ar9331_modes_low_ob_db_tx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_low_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_low_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
- 5);
+ ar9485Modes_low_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_low_ob_db_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_low_ob_db_tx_gain_table),
- 5);
+ ar9580_1p0_low_ob_db_tx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_low_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
- 5);
+ ar9300Modes_low_ob_db_tx_gain_table_2p2);
}
static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_power_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p2),
- 5);
+ ar9331_modes_high_power_tx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_power_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p1),
- 5);
+ ar9331_modes_high_power_tx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_high_power_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_high_power_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
- 5);
+ ar9485Modes_high_power_tx_gain_1_1);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_high_power_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_high_power_tx_gain_table),
- 5);
+ ar9580_1p0_high_power_tx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_power_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_high_power_tx_gain_table_2p2),
- 5);
+ ar9300Modes_high_power_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_mixed_ob_db_tx_gain_table_1p0);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_mixed_ob_db_tx_gain_table);
}
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
@@ -610,6 +477,9 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
case 3:
ar9003_tx_gain_table_mode3(ah);
break;
+ case 4:
+ ar9003_tx_gain_table_mode4(ah);
+ break;
}
}
@@ -617,86 +487,67 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p2,
- ARRAY_SIZE(ar9331_common_rx_gain_1p2),
- 2);
+ ar9331_common_rx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p1,
- ARRAY_SIZE(ar9331_common_rx_gain_1p1),
- 2);
+ ar9331_common_rx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9340Common_rx_gain_table_1p0,
- ARRAY_SIZE(ar9340Common_rx_gain_table_1p0),
- 2);
+ ar9340Common_rx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1,
- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
- 2);
- else if (AR_SREV_9580(ah))
+ ar9485Common_wo_xlna_rx_gain_1_1);
+ else if (AR_SREV_9550(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_rx_gain_bounds);
+ } else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9580_1p0_rx_gain_table,
- ARRAY_SIZE(ar9580_1p0_rx_gain_table),
- 2);
+ ar9580_1p0_rx_gain_table);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_rx_gain_table_2p0),
- 2);
+ ar9462_common_rx_gain_table_2p0);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
- 2);
+ ar9300Common_rx_gain_table_2p2);
}
static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_wo_xlna_rx_gain_1p2,
- ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p2),
- 2);
+ ar9331_common_wo_xlna_rx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_wo_xlna_rx_gain_1p1,
- ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p1),
- 2);
+ ar9331_common_wo_xlna_rx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9340Common_wo_xlna_rx_gain_table_1p0,
- ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
- 2);
+ ar9340Common_wo_xlna_rx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1,
- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
- 2);
+ ar9485Common_wo_xlna_rx_gain_1_1);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_wo_xlna_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_wo_xlna_rx_gain_table_2p0),
- 2);
- else if (AR_SREV_9580(ah))
+ ar9462_common_wo_xlna_rx_gain_table_2p0);
+ else if (AR_SREV_9550(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9580_1p0_wo_xlna_rx_gain_table,
- ARRAY_SIZE(ar9580_1p0_wo_xlna_rx_gain_table),
- 2);
+ ar9580_1p0_wo_xlna_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_wo_xlna_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
- 2);
+ ar9300Common_wo_xlna_rx_gain_table_2p2);
}
static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
{
if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_mixed_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_mixed_rx_gain_table_2p0), 2);
+ ar9462_common_mixed_rx_gain_table_2p0);
}
static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index d9e0824..78816b8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -181,11 +181,14 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
- u32 sync_cause = 0, async_cause;
+ u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ async_mask |= AR_INTR_ASYNC_MASK_MCI;
async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
+ if (async_cause & async_mask) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index ffbb180..9a34fca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -35,31 +35,30 @@ static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
struct ath_common *common = ath9k_hw_common(ah);
while (time_out) {
- if (REG_READ(ah, address) & bit_position) {
- REG_WRITE(ah, address, bit_position);
-
- if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
- if (bit_position &
- AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
- ar9003_mci_reset_req_wakeup(ah);
-
- if (bit_position &
- (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
- REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
- AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
-
- REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
- AR_MCI_INTERRUPT_RX_MSG);
- }
- break;
- }
+ if (!(REG_READ(ah, address) & bit_position)) {
+ udelay(10);
+ time_out -= 10;
- udelay(10);
- time_out -= 10;
+ if (time_out < 0)
+ break;
+ else
+ continue;
+ }
+ REG_WRITE(ah, address, bit_position);
- if (time_out < 0)
+ if (address != AR_MCI_INTERRUPT_RX_MSG_RAW)
break;
+
+ if (bit_position & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+ ar9003_mci_reset_req_wakeup(ah);
+
+ if (bit_position & (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_RX_MSG);
+ break;
}
if (time_out <= 0) {
@@ -127,14 +126,13 @@ static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
- if (!mci->bt_version_known &&
- (mci->bt_state != MCI_BT_SLEEP)) {
- MCI_GPM_SET_TYPE_OPCODE(payload,
- MCI_GPM_COEX_AGENT,
- MCI_GPM_COEX_VERSION_QUERY);
- ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
- wait_done, true);
- }
+ if (mci->bt_version_known ||
+ (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_QUERY);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
}
static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
@@ -158,15 +156,14 @@ static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 *payload = &mci->wlan_channels[0];
- if ((mci->wlan_channels_update == true) &&
- (mci->bt_state != MCI_BT_SLEEP)) {
- MCI_GPM_SET_TYPE_OPCODE(payload,
- MCI_GPM_COEX_AGENT,
- MCI_GPM_COEX_WLAN_CHANNELS);
- ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
- wait_done, true);
- MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
- }
+ if (!mci->wlan_channels_update ||
+ (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_WLAN_CHANNELS);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+ MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
}
static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
@@ -174,29 +171,30 @@ static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
- bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
- MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+ bool query_btinfo;
- if (mci->bt_state != MCI_BT_SLEEP) {
+ if (mci->bt_state == MCI_BT_SLEEP)
+ return;
- MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
- MCI_GPM_COEX_STATUS_QUERY);
+ query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_STATUS_QUERY);
- *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
-
- /*
- * If bt_status_query message is not sent successfully,
- * then need_flush_btinfo should be set again.
- */
- if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
- wait_done, true)) {
- if (query_btinfo)
- mci->need_flush_btinfo = true;
- }
+ *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+ /*
+ * If bt_status_query message is not sent successfully,
+ * then need_flush_btinfo should be set again.
+ */
+ if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true)) {
if (query_btinfo)
- mci->query_bt = false;
+ mci->need_flush_btinfo = true;
}
+
+ if (query_btinfo)
+ mci->query_bt = false;
}
static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
@@ -241,73 +239,73 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
ar9003_mci_remote_reset(ah, true);
ar9003_mci_send_req_wake(ah, true);
- if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+ if (!ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500))
+ goto clear_redunt;
- mci->bt_state = MCI_BT_AWAKE;
+ mci->bt_state = MCI_BT_AWAKE;
- /*
- * we don't need to send more remote_reset at this moment.
- * If BT receive first remote_reset, then BT HW will
- * be cleaned up and will be able to receive req_wake
- * and BT HW will respond sys_waking.
- * In this case, WLAN will receive BT's HW sys_waking.
- * Otherwise, if BT SW missed initial remote_reset,
- * that remote_reset will still clean up BT MCI RX,
- * and the req_wake will wake BT up,
- * and BT SW will respond this req_wake with a remote_reset and
- * sys_waking. In this case, WLAN will receive BT's SW
- * sys_waking. In either case, BT's RX is cleaned up. So we
- * don't need to reply BT's remote_reset now, if any.
- * Similarly, if in any case, WLAN can receive BT's sys_waking,
- * that means WLAN's RX is also fine.
- */
- ar9003_mci_send_sys_waking(ah, true);
- udelay(10);
+ /*
+ * we don't need to send more remote_reset at this moment.
+ * If BT receive first remote_reset, then BT HW will
+ * be cleaned up and will be able to receive req_wake
+ * and BT HW will respond sys_waking.
+ * In this case, WLAN will receive BT's HW sys_waking.
+ * Otherwise, if BT SW missed initial remote_reset,
+ * that remote_reset will still clean up BT MCI RX,
+ * and the req_wake will wake BT up,
+ * and BT SW will respond this req_wake with a remote_reset and
+ * sys_waking. In this case, WLAN will receive BT's SW
+ * sys_waking. In either case, BT's RX is cleaned up. So we
+ * don't need to reply BT's remote_reset now, if any.
+ * Similarly, if in any case, WLAN can receive BT's sys_waking,
+ * that means WLAN's RX is also fine.
+ */
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(10);
- /*
- * Set BT priority interrupt value to be 0xff to
- * avoid having too many BT PRIORITY interrupts.
- */
- REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
- REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
- REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
- REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
- REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+ /*
+ * Set BT priority interrupt value to be 0xff to
+ * avoid having too many BT PRIORITY interrupts.
+ */
+ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
- /*
- * A contention reset will be received after send out
- * sys_waking. Also BT priority interrupt bits will be set.
- * Clear those bits before the next step.
- */
+ /*
+ * A contention reset will be received after send out
+ * sys_waking. Also BT priority interrupt bits will be set.
+ * Clear those bits before the next step.
+ */
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
- REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
- AR_MCI_INTERRUPT_BT_PRI);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
- if (mci->is_2g) {
- ar9003_mci_send_lna_transfer(ah, true);
- udelay(5);
- }
+ if (mci->is_2g) {
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+ }
- if ((mci->is_2g && !mci->update_2g5g)) {
- if (ar9003_mci_wait_for_interrupt(ah,
- AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
- mci_timeout))
- ath_dbg(common, MCI,
- "MCI WLAN has control over the LNA & BT obeys it\n");
- else
- ath_dbg(common, MCI,
- "MCI BT didn't respond to LNA_TRANS\n");
- }
+ if ((mci->is_2g && !mci->update_2g5g)) {
+ if (ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+ mci_timeout))
+ ath_dbg(common, MCI,
+ "MCI WLAN has control over the LNA & BT obeys it\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT didn't respond to LNA_TRANS\n");
}
+clear_redunt:
/* Clear the extra redundant SYS_WAKING from BT */
if ((mci->bt_state == MCI_BT_AWAKE) &&
- (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
(REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
@@ -323,14 +321,13 @@ void ar9003_mci_set_full_sleep(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE) &&
(mci->bt_state != MCI_BT_SLEEP) &&
!mci->halted_bt_gpm) {
ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
}
mci->ready = false;
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
}
static void ar9003_mci_disable_interrupt(struct ath_hw *ah)
@@ -487,7 +484,7 @@ static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 cur_bt_state;
- cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+ cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP);
if (mci->bt_state != cur_bt_state)
mci->bt_state = cur_bt_state;
@@ -596,8 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
if (!time_out)
break;
- offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
- &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
if (offset == MCI_GPM_INVALID)
continue;
@@ -615,9 +611,9 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
}
break;
}
- } else if ((recv_type == gpm_type) && (recv_opcode == gpm_opcode)) {
+ } else if ((recv_type == gpm_type) &&
+ (recv_opcode == gpm_opcode))
break;
- }
/*
* check if it's cal_grant
@@ -661,8 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
time_out = 0;
while (more_data == MCI_GPM_MORE) {
- offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
- &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
if (offset == MCI_GPM_INVALID)
break;
@@ -731,38 +726,38 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP))
goto exit;
- if (ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
- ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+ if (!ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) &&
+ !ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE))
+ goto exit;
- /*
- * BT is sleeping. Check if BT wakes up during
- * WLAN calibration. If BT wakes up during
- * WLAN calibration, need to go through all
- * message exchanges again and recal.
- */
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
- AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+ /*
+ * BT is sleeping. Check if BT wakes up during
+ * WLAN calibration. If BT wakes up during
+ * WLAN calibration, need to go through all
+ * message exchanges again and recal.
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ (AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE));
- ar9003_mci_remote_reset(ah, true);
- ar9003_mci_send_sys_waking(ah, true);
- udelay(1);
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(1);
- if (IS_CHAN_2GHZ(chan))
- ar9003_mci_send_lna_transfer(ah, true);
+ if (IS_CHAN_2GHZ(chan))
+ ar9003_mci_send_lna_transfer(ah, true);
- mci_hw->bt_state = MCI_BT_AWAKE;
+ mci_hw->bt_state = MCI_BT_AWAKE;
- if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
- caldata->rtt_done = false;
- }
+ if (caldata) {
+ caldata->done_txiqcal_once = false;
+ caldata->done_txclcal_once = false;
+ caldata->rtt_done = false;
+ }
- if (!ath9k_hw_init_cal(ah, chan))
- return -EIO;
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
- }
exit:
ar9003_mci_enable_interrupt(ah);
return 0;
@@ -772,10 +767,6 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah)
{
/* disable all MCI messages */
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
/* wait pending HW messages to flush out */
@@ -798,29 +789,27 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 thresh;
- if (enable) {
- REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
- AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
- REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
- AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
-
- if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
- thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
- } else {
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
- }
-
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
- } else {
+ if (!enable) {
REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ return;
}
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+ if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+ thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
+ } else
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
}
void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
@@ -898,13 +887,16 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
udelay(100);
}
+ /* Check pending GPM msg before MCI Reset Rx */
+ ar9003_mci_check_gpm_offset(ah);
+
regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
udelay(1);
regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
- ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ ar9003_mci_get_next_gpm_offset(ah, true, NULL);
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
(SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
@@ -943,26 +935,27 @@ static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 new_flags, to_set, to_clear;
- if (mci->update_2g5g && (mci->bt_state != MCI_BT_SLEEP)) {
- if (mci->is_2g) {
- new_flags = MCI_2G_FLAGS;
- to_clear = MCI_2G_FLAGS_CLEAR_MASK;
- to_set = MCI_2G_FLAGS_SET_MASK;
- } else {
- new_flags = MCI_5G_FLAGS;
- to_clear = MCI_5G_FLAGS_CLEAR_MASK;
- to_set = MCI_5G_FLAGS_SET_MASK;
- }
+ if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ if (mci->is_2g) {
+ new_flags = MCI_2G_FLAGS;
+ to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+ to_set = MCI_2G_FLAGS_SET_MASK;
+ } else {
+ new_flags = MCI_5G_FLAGS;
+ to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+ to_set = MCI_5G_FLAGS_SET_MASK;
+ }
- if (to_clear)
- ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ if (to_clear)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
MCI_GPM_COEX_BT_FLAGS_CLEAR,
to_clear);
- if (to_set)
- ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ if (to_set)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
MCI_GPM_COEX_BT_FLAGS_SET,
to_set);
- }
}
static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
@@ -1014,38 +1007,36 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
}
}
-void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- if (mci->update_2g5g) {
- if (mci->is_2g) {
- ar9003_mci_send_2g5g_status(ah, true);
- ar9003_mci_send_lna_transfer(ah, true);
- udelay(5);
+ if (!mci->update_2g5g && !force)
+ return;
- REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
- AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
- REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
- AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ if (mci->is_2g) {
+ ar9003_mci_send_2g5g_status(ah, true);
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
- if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
- REG_SET_BIT(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
- }
- } else {
- ar9003_mci_send_lna_take(ah, true);
- udelay(5);
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+
+ if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
+ ar9003_mci_osla_setup(ah, true);
+ } else {
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
- REG_SET_BIT(ah, AR_MCI_TX_CTRL,
- AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
- REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
- AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
- REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
- ar9003_mci_send_2g5g_status(ah, true);
- }
+ ar9003_mci_osla_setup(ah, false);
+ ar9003_mci_send_2g5g_status(ah, true);
}
}
@@ -1132,7 +1123,7 @@ void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) {
ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n");
} else {
- is_reusable = false;
+ *is_reusable = false;
ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n");
}
}
@@ -1173,11 +1164,10 @@ void ar9003_mci_cleanup(struct ath_hw *ah)
}
EXPORT_SYMBOL(ar9003_mci_cleanup);
-u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 value = 0, more_gpm = 0, gpm_ptr;
+ u32 value = 0;
u8 query_type;
switch (state_type) {
@@ -1190,81 +1180,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
}
value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
break;
- case MCI_STATE_INIT_GPM_OFFSET:
- value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
- mci->gpm_idx = value;
- break;
- case MCI_STATE_NEXT_GPM_OFFSET:
- case MCI_STATE_LAST_GPM_OFFSET:
- /*
- * This could be useful to avoid new GPM message interrupt which
- * may lead to spurious interrupt after power sleep, or multiple
- * entry of ath_mci_intr().
- * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
- * alleviate this effect, but clearing GPM RX interrupt bit is
- * safe, because whether this is called from hw or driver code
- * there must be an interrupt bit set/triggered initially
- */
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_GPM);
-
- gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
- value = gpm_ptr;
-
- if (value == 0)
- value = mci->gpm_len - 1;
- else if (value >= mci->gpm_len) {
- if (value != 0xFFFF)
- value = 0;
- } else {
- value--;
- }
-
- if (value == 0xFFFF) {
- value = MCI_GPM_INVALID;
- more_gpm = MCI_GPM_NOMORE;
- } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
- if (gpm_ptr == mci->gpm_idx) {
- value = MCI_GPM_INVALID;
- more_gpm = MCI_GPM_NOMORE;
- } else {
- for (;;) {
- u32 temp_index;
-
- /* skip reserved GPM if any */
-
- if (value != mci->gpm_idx)
- more_gpm = MCI_GPM_MORE;
- else
- more_gpm = MCI_GPM_NOMORE;
-
- temp_index = mci->gpm_idx;
- mci->gpm_idx++;
-
- if (mci->gpm_idx >=
- mci->gpm_len)
- mci->gpm_idx = 0;
-
- if (ar9003_mci_is_gpm_valid(ah,
- temp_index)) {
- value = temp_index;
- break;
- }
-
- if (more_gpm == MCI_GPM_NOMORE) {
- value = MCI_GPM_INVALID;
- break;
- }
- }
- }
- if (p_data)
- *p_data = more_gpm;
- }
-
- if (value != MCI_GPM_INVALID)
- value <<= 4;
-
- break;
case MCI_STATE_LAST_SCHD_MSG_OFFSET:
value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@@ -1276,21 +1191,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
AR_MCI_RX_REMOTE_SLEEP) ?
MCI_BT_SLEEP : MCI_BT_AWAKE;
break;
- case MCI_STATE_CONT_RSSI_POWER:
- value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
- break;
- case MCI_STATE_CONT_PRIORITY:
- value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
- break;
- case MCI_STATE_CONT_TXRX:
- value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
- break;
- case MCI_STATE_BT:
- value = mci->bt_state;
- break;
- case MCI_STATE_SET_BT_SLEEP:
- mci->bt_state = MCI_BT_SLEEP;
- break;
case MCI_STATE_SET_BT_AWAKE:
mci->bt_state = MCI_BT_AWAKE;
ar9003_mci_send_coex_version_query(ah, true);
@@ -1299,7 +1199,7 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
if (mci->unhalt_bt_gpm)
ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
- ar9003_mci_2g5g_switch(ah, true);
+ ar9003_mci_2g5g_switch(ah, false);
break;
case MCI_STATE_SET_BT_CAL_START:
mci->bt_state = MCI_BT_CAL_START;
@@ -1323,34 +1223,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
case MCI_STATE_SEND_WLAN_COEX_VERSION:
ar9003_mci_send_coex_version_response(ah, true);
break;
- case MCI_STATE_SET_BT_COEX_VERSION:
- if (!p_data)
- ath_dbg(common, MCI,
- "MCI Set BT Coex version with NULL data!!\n");
- else {
- mci->bt_ver_major = (*p_data >> 8) & 0xff;
- mci->bt_ver_minor = (*p_data) & 0xff;
- mci->bt_version_known = true;
- ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
- mci->bt_ver_major, mci->bt_ver_minor);
- }
- break;
- case MCI_STATE_SEND_WLAN_CHANNELS:
- if (p_data) {
- if (((mci->wlan_channels[1] & 0xffff0000) ==
- (*(p_data + 1) & 0xffff0000)) &&
- (mci->wlan_channels[2] == *(p_data + 2)) &&
- (mci->wlan_channels[3] == *(p_data + 3)))
- break;
-
- mci->wlan_channels[0] = *p_data++;
- mci->wlan_channels[1] = *p_data++;
- mci->wlan_channels[2] = *p_data++;
- mci->wlan_channels[3] = *p_data++;
- }
- mci->wlan_channels_update = true;
- ar9003_mci_send_coex_wlan_channels(ah, true);
- break;
case MCI_STATE_SEND_VERSION_QUERY:
ar9003_mci_send_coex_version_query(ah, true);
break;
@@ -1358,38 +1230,16 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
break;
- case MCI_STATE_NEED_FLUSH_BT_INFO:
- /*
- * btcoex_hw.mci.unhalt_bt_gpm means whether it's
- * needed to send UNHALT message. It's set whenever
- * there's a request to send HALT message.
- * mci_halted_bt_gpm means whether HALT message is sent
- * out successfully.
- *
- * Checking (mci_unhalt_bt_gpm == false) instead of
- * checking (ah->mci_halted_bt_gpm == false) will make
- * sure currently is in UNHALT-ed mode and BT can
- * respond to status query.
- */
- value = (!mci->unhalt_bt_gpm &&
- mci->need_flush_btinfo) ? 1 : 0;
- if (p_data)
- mci->need_flush_btinfo =
- (*p_data != 0) ? true : false;
- break;
case MCI_STATE_RECOVER_RX:
ar9003_mci_prep_interface(ah);
mci->query_bt = true;
mci->need_flush_btinfo = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
- ar9003_mci_2g5g_switch(ah, true);
+ ar9003_mci_2g5g_switch(ah, false);
break;
case MCI_STATE_NEED_FTP_STOMP:
value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
break;
- case MCI_STATE_NEED_TUNING:
- value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
- break;
default:
break;
}
@@ -1397,3 +1247,173 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
return value;
}
EXPORT_SYMBOL(ar9003_mci_state);
+
+void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ ath_dbg(common, MCI, "Give LNA and SPDT control to BT\n");
+
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(50);
+
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ mci->is_2g = false;
+ mci->update_2g5g = true;
+ ar9003_mci_send_2g5g_status(ah, true);
+
+ /* Force another 2g5g update at next scanning */
+ mci->update_2g5g = true;
+}
+
+void ar9003_mci_set_power_awake(struct ath_hw *ah)
+{
+ u32 btcoex_ctrl2, diag_sw;
+ int i;
+ u8 lna_ctrl, bt_sleep;
+
+ for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
+ btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2);
+ if (btcoex_ctrl2 != 0xdeadbeef)
+ break;
+ udelay(AH_TIME_QUANTUM);
+ }
+ REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23)));
+
+ for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
+ diag_sw = REG_READ(ah, AR_DIAG_SW);
+ if (diag_sw != 0xdeadbeef)
+ break;
+ udelay(AH_TIME_QUANTUM);
+ }
+ REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
+ lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
+ bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
+ REG_WRITE(ah, AR_DIAG_SW, diag_sw);
+
+ if (bt_sleep && (lna_ctrl == 2)) {
+ REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1);
+ REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1);
+ udelay(50);
+ }
+}
+
+void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 offset;
+
+ /*
+ * This should only be called before "MAC Warm Reset" or "MCI Reset Rx".
+ */
+ offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ if (mci->gpm_idx == offset)
+ return;
+ ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n",
+ mci->gpm_idx, offset);
+ mci->query_bt = true;
+ mci->need_flush_btinfo = true;
+ mci->gpm_idx = 0;
+}
+
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 offset, more_gpm = 0, gpm_ptr;
+
+ if (first) {
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ mci->gpm_idx = gpm_ptr;
+ return gpm_ptr;
+ }
+
+ /*
+ * This could be useful to avoid new GPM message interrupt which
+ * may lead to spurious interrupt after power sleep, or multiple
+ * entry of ath_mci_intr().
+ * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+ * alleviate this effect, but clearing GPM RX interrupt bit is
+ * safe, because whether this is called from hw or driver code
+ * there must be an interrupt bit set/triggered initially
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ offset = gpm_ptr;
+
+ if (!offset)
+ offset = mci->gpm_len - 1;
+ else if (offset >= mci->gpm_len) {
+ if (offset != 0xFFFF)
+ offset = 0;
+ } else {
+ offset--;
+ }
+
+ if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) {
+ offset = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+ goto out;
+ }
+ for (;;) {
+ u32 temp_index;
+
+ /* skip reserved GPM if any */
+
+ if (offset != mci->gpm_idx)
+ more_gpm = MCI_GPM_MORE;
+ else
+ more_gpm = MCI_GPM_NOMORE;
+
+ temp_index = mci->gpm_idx;
+ mci->gpm_idx++;
+
+ if (mci->gpm_idx >= mci->gpm_len)
+ mci->gpm_idx = 0;
+
+ if (ar9003_mci_is_gpm_valid(ah, temp_index)) {
+ offset = temp_index;
+ break;
+ }
+
+ if (more_gpm == MCI_GPM_NOMORE) {
+ offset = MCI_GPM_INVALID;
+ break;
+ }
+ }
+
+ if (offset != MCI_GPM_INVALID)
+ offset <<= 4;
+out:
+ if (more)
+ *more = more_gpm;
+
+ return offset;
+}
+EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset);
+
+void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ mci->bt_ver_major = major;
+ mci->bt_ver_minor = minor;
+ mci->bt_version_known = true;
+ ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+}
+EXPORT_SYMBOL(ar9003_mci_set_bt_version);
+
+void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+}
+EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 4842f6c..d33b8e1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -189,30 +189,18 @@ enum mci_bt_state {
/* Type of state query */
enum mci_state_type {
MCI_STATE_ENABLE,
- MCI_STATE_INIT_GPM_OFFSET,
- MCI_STATE_NEXT_GPM_OFFSET,
- MCI_STATE_LAST_GPM_OFFSET,
- MCI_STATE_BT,
- MCI_STATE_SET_BT_SLEEP,
MCI_STATE_SET_BT_AWAKE,
MCI_STATE_SET_BT_CAL_START,
MCI_STATE_SET_BT_CAL,
MCI_STATE_LAST_SCHD_MSG_OFFSET,
MCI_STATE_REMOTE_SLEEP,
- MCI_STATE_CONT_RSSI_POWER,
- MCI_STATE_CONT_PRIORITY,
- MCI_STATE_CONT_TXRX,
MCI_STATE_RESET_REQ_WAKE,
MCI_STATE_SEND_WLAN_COEX_VERSION,
- MCI_STATE_SET_BT_COEX_VERSION,
- MCI_STATE_SEND_WLAN_CHANNELS,
MCI_STATE_SEND_VERSION_QUERY,
MCI_STATE_SEND_STATUS_QUERY,
- MCI_STATE_NEED_FLUSH_BT_INFO,
MCI_STATE_SET_CONCUR_TX_PRI,
MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP,
- MCI_STATE_NEED_TUNING,
MCI_STATE_DEBUG,
MCI_STATE_MAX
};
@@ -260,28 +248,26 @@ enum mci_gpm_coex_opcode {
bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
u32 *payload, u8 len, bool wait_done,
bool check_bt);
-u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
u16 len, u32 sched_addr);
void ar9003_mci_cleanup(struct ath_hw *ah);
void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
u32 *rx_msg_intr);
-
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
+void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
+void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
/*
* These functions are used by ath9k_hw.
*/
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
-static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
-{
- return ah->btcoex_hw.mci.ready;
-}
void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep);
void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable);
void ar9003_mci_init_cal_done(struct ath_hw *ah);
void ar9003_mci_set_full_sleep(struct ath_hw *ah);
-void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force);
void ar9003_mci_check_bt(struct ath_hw *ah);
bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -289,13 +275,12 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
bool is_full_sleep);
void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
+void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
+void ar9003_mci_set_power_awake(struct ath_hw *ah);
+void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
#else
-static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
-{
- return false;
-}
static inline void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
{
}
@@ -330,6 +315,15 @@ static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
{
}
+static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
+{
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 3d400e8..2c9f7d7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -211,7 +211,7 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
- if (AR_SREV_9485(ah) || AR_SREV_9462(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9550(ah))
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
-3);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 11abb97..e476f9f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -99,7 +99,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = (freq * 4) / 120;
chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
channelSel = (channelSel << 17) | chan_frac;
- } else if (AR_SREV_9340(ah)) {
+ } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
if (ah->is_clk_25mhz) {
u32 chan_frac;
@@ -113,11 +113,12 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
/* Set to 2G mode */
bMode = 1;
} else {
- if (AR_SREV_9340(ah) && ah->is_clk_25mhz) {
+ if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
+ ah->is_clk_25mhz) {
u32 chan_frac;
- channelSel = (freq * 2) / 75;
- chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
+ channelSel = freq / 75;
+ chan_frac = ((freq % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
} else {
channelSel = CHANSEL_5G(freq);
@@ -173,16 +174,15 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
int cur_bb_spur, negative = 0, cck_spur_freq;
int i;
int range, max_spur_cnts, synth_freq;
- u8 *spur_fbin_ptr = NULL;
+ u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan));
/*
* Need to verify range +/- 10 MHz in control channel, otherwise spur
* is out-of-band and can be ignored.
*/
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) {
- spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah,
- IS_CHAN_2GHZ(chan));
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah)) {
if (spur_fbin_ptr[0] == 0) /* No spur */
return;
max_spur_cnts = 5;
@@ -207,7 +207,8 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
if (AR_SREV_9462(ah) && (i == 0 || i == 3))
continue;
negative = 0;
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah))
cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
IS_CHAN_2GHZ(chan));
else
@@ -620,6 +621,50 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
}
}
+static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int ret;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ if (chan->channel <= 5350)
+ ret = 1;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 3;
+ else
+ ret = 5;
+ break;
+
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ if (chan->channel <= 5350)
+ ret = 2;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 4;
+ else
+ ret = 6;
+ break;
+
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ ret = 8;
+ break;
+
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ ret = 7;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int ar9003_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -661,7 +706,22 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
}
REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
- REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+ if (AR_SREV_9550(ah))
+ REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
+ regWrites);
+
+ if (AR_SREV_9550(ah)) {
+ int modes_txgain_index;
+
+ modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
+ if (modes_txgain_index < 0)
+ return -EINVAL;
+
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modes_txgain_index,
+ regWrites);
+ } else {
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+ }
/*
* For 5GHz channels requiring Fast Clock, apply
@@ -676,6 +736,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
if (chan->channel == 2484)
ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
+ if (AR_SREV_9462(ah))
+ REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
+ AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
+
ah->modes_index = modesIndex;
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
@@ -821,18 +885,18 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on != aniState->ofdmWeakSigDetect) {
ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
- !aniState->ofdmWeakSigDetectOff ?
+ aniState->ofdmWeakSigDetect ?
"on" : "off",
on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
+ aniState->ofdmWeakSigDetect = on;
}
break;
}
@@ -851,7 +915,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* from INI file & cap value
*/
value = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstep;
if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -866,7 +930,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* from INI file & cap value
*/
value2 = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstepLow;
if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -882,7 +946,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
chan->channel,
aniState->firstepLevel,
level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
+ ATH9K_ANI_FIRSTEP_LVL,
value,
aniState->iniDef.firstep);
ath_dbg(common, ANI,
@@ -890,7 +954,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
chan->channel,
aniState->firstepLevel,
level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
+ ATH9K_ANI_FIRSTEP_LVL,
value2,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
@@ -915,7 +979,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* from INI file & cap value
*/
value = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1;
if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -931,7 +995,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* from INI file & cap value
*/
value2 = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1Ext;
if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -946,7 +1010,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
chan->channel,
aniState->spurImmunityLevel,
level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
value,
aniState->iniDef.cycpwrThr1);
ath_dbg(common, ANI,
@@ -954,7 +1018,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
chan->channel,
aniState->spurImmunityLevel,
level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
value2,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
@@ -975,16 +1039,16 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_MRC_CCK_ENABLE, is_on);
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_MUX_REG, is_on);
- if (!is_on != aniState->mrcCCKOff) {
+ if (is_on != aniState->mrcCCK) {
ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
chan->channel,
- !aniState->mrcCCKOff ? "on" : "off",
+ aniState->mrcCCK ? "on" : "off",
is_on ? "on" : "off");
if (is_on)
ah->stats.ast_ani_ccklow++;
else
ah->stats.ast_ani_cckhigh++;
- aniState->mrcCCKOff = !is_on;
+ aniState->mrcCCK = is_on;
}
break;
}
@@ -998,9 +1062,9 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+ aniState->ofdmWeakSigDetect ? "on" : "off",
aniState->firstepLevel,
- !aniState->mrcCCKOff ? "on" : "off",
+ aniState->mrcCCK ? "on" : "off",
aniState->listenTime,
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
@@ -1107,10 +1171,10 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
AR_PHY_EXT_CYCPWR_THR1);
/* these levels just got reset to defaults by the INI */
- aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
- aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
- aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
- aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->mrcCCK = true;
}
static void ar9003_hw_set_radar_params(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 7268a48..7bfbaf0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -633,11 +633,13 @@
#define AR_PHY_65NM_CH0_BIAS2 0x160c4
#define AR_PHY_65NM_CH0_BIAS4 0x160cc
#define AR_PHY_65NM_CH0_RXTX4 0x1610c
+#define AR_PHY_65NM_CH1_RXTX4 0x1650c
+#define AR_PHY_65NM_CH2_RXTX4 0x1690c
#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
((AR_SREV_9462(ah) ? 0x1628c : 0x16280)))
-#define AR_CH0_TOP_XPABIASLVL (0x300)
-#define AR_CH0_TOP_XPABIASLVL_S (8)
+#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
+#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \
((AR_SREV_9485(ah) ? 0x1628c : 0x16294)))
@@ -650,6 +652,8 @@
#define AR_SWITCH_TABLE_COM_ALL_S (0)
#define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff)
#define AR_SWITCH_TABLE_COM_AR9462_ALL_S (0)
+#define AR_SWITCH_TABLE_COM_AR9550_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM_AR9550_ALL_S (0)
#define AR_SWITCH_TABLE_COM_SPDT (0x00f00000)
#define AR_SWITCH_TABLE_COM_SPDT_ALL (0x0000fff0)
#define AR_SWITCH_TABLE_COM_SPDT_ALL_S (4)
@@ -820,18 +824,26 @@
#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
-#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
-#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
-#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
-#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
-#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000
-#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
+
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
+#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x0FFF0000
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x10000000
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 28
+#define AR_PHY_SPECTRAL_SCAN_PRIORITY 0x20000000
+#define AR_PHY_SPECTRAL_SCAN_PRIORITY_S 29
+#define AR_PHY_SPECTRAL_SCAN_USE_ERR5 0x40000000
+#define AR_PHY_SPECTRAL_SCAN_USE_ERR5_S 30
+#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT 0x80000000
+#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT_S 31
+
#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION 0x00000001
#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION_S 0
@@ -866,6 +878,9 @@
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
+#define AR_PHY_65NM_RXTX4_XLNA_BIAS 0xC0000000
+#define AR_PHY_65NM_RXTX4_XLNA_BIAS_S 30
+
/*
* Channel 1 Register Map
*/
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f11d9b2..6e1756b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
#define INITVALS_9330_1P1_H
static const u32 ar9331_1p1_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
{0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
{0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
+ {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
};
static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
};
static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
{0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
};
static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -336,12 +337,7 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
{0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
};
-static const u32 ar9331_1p1_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
+#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
static const u32 ar9331_1p1_xtal_25M[][2] = {
/* Addr allmodes */
@@ -377,14 +373,14 @@ static const u32 ar9331_1p1_radio_core[][2] = {
{0x000160b4, 0x92480040},
{0x000160c0, 0x006db6db},
{0x000160c4, 0x0186db60},
- {0x000160c8, 0x6db6db6c},
+ {0x000160c8, 0x6db4db6c},
{0x000160cc, 0x6de6c300},
{0x000160d0, 0x14500820},
{0x00016100, 0x04cb0001},
{0x00016104, 0xfff80015},
{0x00016108, 0x00080010},
{0x0001610c, 0x00170000},
- {0x00016140, 0x10804000},
+ {0x00016140, 0x10800000},
{0x00016144, 0x01884080},
{0x00016148, 0x000080c0},
{0x00016280, 0x01000015},
@@ -417,7 +413,7 @@ static const u32 ar9331_1p1_radio_core[][2] = {
};
static const u32 ar9331_1p1_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
};
@@ -691,7 +687,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
};
static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
{0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -782,17 +778,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
{0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
};
-static const u32 ar9331_1p1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9331_1p1_mac_postamble ar9300_2p2_mac_postamble
static const u32 ar9331_1p1_soc_preamble[][2] = {
/* Addr allmodes */
@@ -973,26 +959,27 @@ static const u32 ar9331_1p1_mac_core[][2] = {
static const u32 ar9331_common_rx_gain_1p1[][2] = {
/* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
+ {0x00009e18, 0x05000000},
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
{0x0000a050, 0x00000000},
{0x0000a054, 0x00000000},
{0x0000a058, 0x00000000},
@@ -1005,15 +992,15 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a074, 0x00000000},
{0x0000a078, 0x00000000},
{0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
{0x0000a0a4, 0x00000000},
{0x0000a0a8, 0x00000000},
{0x0000a0ac, 0x00000000},
@@ -1021,27 +1008,27 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a0b4, 0x00000000},
{0x0000a0b8, 0x00000000},
{0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
{0x0000a114, 0x00000000},
{0x0000a118, 0x00000000},
{0x0000a11c, 0x00000000},
@@ -1054,26 +1041,26 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a138, 0x00000000},
{0x0000a13c, 0x00000000},
{0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
{0x0000a194, 0x00000000},
{0x0000a198, 0x00000000},
{0x0000a19c, 0x00000000},
@@ -1100,48 +1087,14 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
{0x0000a1f0, 0x00000396},
{0x0000a1f4, 0x00000396},
{0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
+ {0x0000a1fc, 0x00000296},
};
static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
- {0},
- {3},
- {0},
- {0},
-};
-
-static const u32 ar9331_1p1_chansel_xtal_25M[] = {
- 0x0101479e,
- 0x0101d027,
- 0x010258af,
- 0x0102e138,
- 0x010369c0,
- 0x0103f249,
- 0x01047ad1,
- 0x0105035a,
- 0x01058be2,
- 0x0106146b,
- 0x01069cf3,
- 0x0107257c,
- 0x0107ae04,
- 0x0108f5b2,
-};
-
-static const u32 ar9331_1p1_chansel_xtal_40M[] = {
- 0x00a0ccbe,
- 0x00a12213,
- 0x00a17769,
- 0x00a1ccbe,
- 0x00a22213,
- 0x00a27769,
- 0x00a2ccbe,
- 0x00a32213,
- 0x00a37769,
- 0x00a3ccbe,
- 0x00a42213,
- 0x00a47769,
- 0x00a4ccbe,
- 0x00a5998b,
+ {0x00000000},
+ {0x00000003},
+ {0x00000000},
+ {0x00000000},
};
#endif /* INITVALS_9330_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0e6ca08..57ed8a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,8 +18,8 @@
#ifndef INITVALS_9330_1P2_H
#define INITVALS_9330_1P2_H
-static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -102,8 +103,14 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = {
{0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
};
+#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_power_tx_gain_1p2
+
+#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_low_ob_db_tx_gain_1p2
+
static const u32 ar9331_1p2_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -147,191 +154,6 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
-static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
- {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
- {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
- {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
- {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
- {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
- {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
- {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
- {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
- {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
- {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
- {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
- {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
- {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
- {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
- {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
- {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
- {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
- {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
- {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
- {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
- {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
- {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
- {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
- {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
- {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
-};
-
-static const u32 ar9331_modes_low_ob_db_tx_gain_1p2[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
- {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
- {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
- {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
- {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
- {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
- {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
- {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
- {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
- {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
- {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
- {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
- {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
- {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
- {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
- {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
- {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
- {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
- {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
- {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
- {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
- {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
- {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
- {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
- {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
- {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
-};
-
-static const u32 ar9331_1p2_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
-static const u32 ar9331_1p2_xtal_25M[][2] = {
- /* Addr allmodes */
- {0x00007038, 0x000002f8},
- {0x00008244, 0x0010f3d7},
- {0x0000824c, 0x0001e7ae},
- {0x0001609c, 0x0f508f29},
-};
-
static const u32 ar9331_1p2_radio_core[][2] = {
/* Addr allmodes */
{0x00016000, 0x36db6db6},
@@ -397,684 +219,24 @@ static const u32 ar9331_1p2_radio_core[][2] = {
{0x000163d4, 0x00000000},
};
-static const u32 ar9331_1p2_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
-};
+#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
-static const u32 ar9331_common_wo_xlna_rx_gain_1p2[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00060005},
- {0x0000a004, 0x00810080},
- {0x0000a008, 0x00830082},
- {0x0000a00c, 0x00850084},
- {0x0000a010, 0x01820181},
- {0x0000a014, 0x01840183},
- {0x0000a018, 0x01880185},
- {0x0000a01c, 0x018a0189},
- {0x0000a020, 0x02850284},
- {0x0000a024, 0x02890288},
- {0x0000a028, 0x028b028a},
- {0x0000a02c, 0x03850384},
- {0x0000a030, 0x03890388},
- {0x0000a034, 0x038b038a},
- {0x0000a038, 0x038d038c},
- {0x0000a03c, 0x03910390},
- {0x0000a040, 0x03930392},
- {0x0000a044, 0x03950394},
- {0x0000a048, 0x00000396},
- {0x0000a04c, 0x00000000},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x28282828},
- {0x0000a084, 0x28282828},
- {0x0000a088, 0x28282828},
- {0x0000a08c, 0x28282828},
- {0x0000a090, 0x28282828},
- {0x0000a094, 0x24242428},
- {0x0000a098, 0x171e1e1e},
- {0x0000a09c, 0x02020b0b},
- {0x0000a0a0, 0x02020202},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x22072208},
- {0x0000a0c4, 0x22052206},
- {0x0000a0c8, 0x22032204},
- {0x0000a0cc, 0x22012202},
- {0x0000a0d0, 0x221f2200},
- {0x0000a0d4, 0x221d221e},
- {0x0000a0d8, 0x33023303},
- {0x0000a0dc, 0x33003301},
- {0x0000a0e0, 0x331e331f},
- {0x0000a0e4, 0x4402331d},
- {0x0000a0e8, 0x44004401},
- {0x0000a0ec, 0x441e441f},
- {0x0000a0f0, 0x55025503},
- {0x0000a0f4, 0x55005501},
- {0x0000a0f8, 0x551e551f},
- {0x0000a0fc, 0x6602551d},
- {0x0000a100, 0x66006601},
- {0x0000a104, 0x661e661f},
- {0x0000a108, 0x7703661d},
- {0x0000a10c, 0x77017702},
- {0x0000a110, 0x00007700},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x111f1100},
- {0x0000a148, 0x111d111e},
- {0x0000a14c, 0x111b111c},
- {0x0000a150, 0x22032204},
- {0x0000a154, 0x22012202},
- {0x0000a158, 0x221f2200},
- {0x0000a15c, 0x221d221e},
- {0x0000a160, 0x33013302},
- {0x0000a164, 0x331f3300},
- {0x0000a168, 0x4402331e},
- {0x0000a16c, 0x44004401},
- {0x0000a170, 0x441e441f},
- {0x0000a174, 0x55015502},
- {0x0000a178, 0x551f5500},
- {0x0000a17c, 0x6602551e},
- {0x0000a180, 0x66006601},
- {0x0000a184, 0x661e661f},
- {0x0000a188, 0x7703661d},
- {0x0000a18c, 0x77017702},
- {0x0000a190, 0x00007700},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000296},
-};
+#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
-static const u32 ar9331_1p2_baseband_core[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafe68e30},
- {0x00009804, 0xfd14e000},
- {0x00009808, 0x9c0a8f6b},
- {0x0000980c, 0x04800000},
- {0x00009814, 0x9280c00a},
- {0x00009818, 0x00000000},
- {0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
- {0x00009838, 0x0108ecff},
- {0x0000983c, 0x14750600},
- {0x00009880, 0x201fff00},
- {0x00009884, 0x00001042},
- {0x000098a4, 0x00200400},
- {0x000098b0, 0x32840bbe},
- {0x000098d0, 0x004b6a8e},
- {0x000098d4, 0x00000820},
- {0x000098dc, 0x00000000},
- {0x000098f0, 0x00000000},
- {0x000098f4, 0x00000000},
- {0x00009c04, 0x00000000},
- {0x00009c08, 0x03200000},
- {0x00009c0c, 0x00000000},
- {0x00009c10, 0x00000000},
- {0x00009c14, 0x00046384},
- {0x00009c18, 0x05b6b440},
- {0x00009c1c, 0x00b6b440},
- {0x00009d00, 0xc080a333},
- {0x00009d04, 0x40206c10},
- {0x00009d08, 0x009c4060},
- {0x00009d0c, 0x1883800a},
- {0x00009d10, 0x01834061},
- {0x00009d14, 0x00c00400},
- {0x00009d18, 0x00000000},
- {0x00009e08, 0x0038233c},
- {0x00009e24, 0x9927b515},
- {0x00009e28, 0x12ef0200},
- {0x00009e30, 0x06336f77},
- {0x00009e34, 0x6af6532f},
- {0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
- {0x00009e4c, 0x00001004},
- {0x00009e50, 0x00ff03f1},
- {0x00009fc0, 0x803e4788},
- {0x00009fc4, 0x0001efb5},
- {0x00009fcc, 0x40000014},
- {0x0000a20c, 0x00000000},
- {0x0000a220, 0x00000000},
- {0x0000a224, 0x00000000},
- {0x0000a228, 0x10002310},
- {0x0000a23c, 0x00000000},
- {0x0000a244, 0x0c000000},
- {0x0000a2a0, 0x00000001},
- {0x0000a2c0, 0x00000001},
- {0x0000a2c8, 0x00000000},
- {0x0000a2cc, 0x18c43433},
- {0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
- {0x0000a2ec, 0x00000000},
- {0x0000a2f0, 0x00000000},
- {0x0000a2f4, 0x00000000},
- {0x0000a2f8, 0x00000000},
- {0x0000a344, 0x00000000},
- {0x0000a34c, 0x00000000},
- {0x0000a350, 0x0000a000},
- {0x0000a364, 0x00000000},
- {0x0000a370, 0x00000000},
- {0x0000a390, 0x00000001},
- {0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
- {0x0000a3c0, 0x20202020},
- {0x0000a3c4, 0x22222220},
- {0x0000a3c8, 0x20200020},
- {0x0000a3cc, 0x20202020},
- {0x0000a3d0, 0x20202020},
- {0x0000a3d4, 0x20202020},
- {0x0000a3d8, 0x20202020},
- {0x0000a3dc, 0x20202020},
- {0x0000a3e0, 0x20202020},
- {0x0000a3e4, 0x20202020},
- {0x0000a3e8, 0x20202020},
- {0x0000a3ec, 0x20202020},
- {0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000006},
- {0x0000a3f8, 0x0cdbd380},
- {0x0000a3fc, 0x000f0f01},
- {0x0000a400, 0x8fa91f01},
- {0x0000a404, 0x00000000},
- {0x0000a408, 0x0e79e5c6},
- {0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
- {0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
- {0x0000a434, 0x00000000},
- {0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
- {0x0000a440, 0x00000000},
- {0x0000a444, 0x00000000},
- {0x0000a448, 0x04000000},
- {0x0000a44c, 0x00000001},
- {0x0000a450, 0x00010000},
- {0x0000a458, 0x00000000},
- {0x0000a640, 0x00000000},
- {0x0000a644, 0x3fad9d74},
- {0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00003c37},
- {0x0000a670, 0x03020100},
- {0x0000a674, 0x09080504},
- {0x0000a678, 0x0d0c0b0a},
- {0x0000a67c, 0x13121110},
- {0x0000a680, 0x31301514},
- {0x0000a684, 0x35343332},
- {0x0000a688, 0x00000036},
- {0x0000a690, 0x00000838},
- {0x0000a7c0, 0x00000000},
- {0x0000a7c4, 0xfffffffc},
- {0x0000a7c8, 0x00000000},
- {0x0000a7cc, 0x00000000},
- {0x0000a7d0, 0x00000000},
- {0x0000a7d4, 0x00000004},
- {0x0000a7dc, 0x00000001},
-};
+#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
-static const u32 ar9331_modes_high_power_tx_gain_1p2[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
- {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
- {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
- {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
- {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
- {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
- {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
- {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
- {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
- {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
- {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
- {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
- {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
- {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
- {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
- {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
- {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
- {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
- {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
- {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
- {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
- {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
- {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
- {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
- {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
- {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
-};
+#define ar9331_1p2_baseband_core ar9331_1p1_baseband_core
-static const u32 ar9331_1p2_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
-static const u32 ar9331_1p2_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000002f8},
-};
+#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
-static const u32 ar9331_1p2_xtal_40M[][2] = {
- /* Addr allmodes */
- {0x00007038, 0x000004c2},
- {0x00008244, 0x0010f400},
- {0x0000824c, 0x0001e800},
- {0x0001609c, 0x0b283f31},
-};
+#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
-static const u32 ar9331_1p2_mac_core[][2] = {
- /* Addr allmodes */
- {0x00000008, 0x00000000},
- {0x00000030, 0x00020085},
- {0x00000034, 0x00000005},
- {0x00000040, 0x00000000},
- {0x00000044, 0x00000000},
- {0x00000048, 0x00000008},
- {0x0000004c, 0x00000010},
- {0x00000050, 0x00000000},
- {0x00001040, 0x002ffc0f},
- {0x00001044, 0x002ffc0f},
- {0x00001048, 0x002ffc0f},
- {0x0000104c, 0x002ffc0f},
- {0x00001050, 0x002ffc0f},
- {0x00001054, 0x002ffc0f},
- {0x00001058, 0x002ffc0f},
- {0x0000105c, 0x002ffc0f},
- {0x00001060, 0x002ffc0f},
- {0x00001064, 0x002ffc0f},
- {0x000010f0, 0x00000100},
- {0x00001270, 0x00000000},
- {0x000012b0, 0x00000000},
- {0x000012f0, 0x00000000},
- {0x0000143c, 0x00000000},
- {0x0000147c, 0x00000000},
- {0x00008000, 0x00000000},
- {0x00008004, 0x00000000},
- {0x00008008, 0x00000000},
- {0x0000800c, 0x00000000},
- {0x00008018, 0x00000000},
- {0x00008020, 0x00000000},
- {0x00008038, 0x00000000},
- {0x0000803c, 0x00000000},
- {0x00008040, 0x00000000},
- {0x00008044, 0x00000000},
- {0x00008048, 0x00000000},
- {0x0000804c, 0xffffffff},
- {0x00008054, 0x00000000},
- {0x00008058, 0x00000000},
- {0x0000805c, 0x000fc78f},
- {0x00008060, 0x0000000f},
- {0x00008064, 0x00000000},
- {0x00008070, 0x00000310},
- {0x00008074, 0x00000020},
- {0x00008078, 0x00000000},
- {0x0000809c, 0x0000000f},
- {0x000080a0, 0x00000000},
- {0x000080a4, 0x02ff0000},
- {0x000080a8, 0x0e070605},
- {0x000080ac, 0x0000000d},
- {0x000080b0, 0x00000000},
- {0x000080b4, 0x00000000},
- {0x000080b8, 0x00000000},
- {0x000080bc, 0x00000000},
- {0x000080c0, 0x2a800000},
- {0x000080c4, 0x06900168},
- {0x000080c8, 0x13881c20},
- {0x000080cc, 0x01f40000},
- {0x000080d0, 0x00252500},
- {0x000080d4, 0x00a00000},
- {0x000080d8, 0x00400000},
- {0x000080dc, 0x00000000},
- {0x000080e0, 0xffffffff},
- {0x000080e4, 0x0000ffff},
- {0x000080e8, 0x3f3f3f3f},
- {0x000080ec, 0x00000000},
- {0x000080f0, 0x00000000},
- {0x000080f4, 0x00000000},
- {0x000080fc, 0x00020000},
- {0x00008100, 0x00000000},
- {0x00008108, 0x00000052},
- {0x0000810c, 0x00000000},
- {0x00008110, 0x00000000},
- {0x00008114, 0x000007ff},
- {0x00008118, 0x000000aa},
- {0x0000811c, 0x00003210},
- {0x00008124, 0x00000000},
- {0x00008128, 0x00000000},
- {0x0000812c, 0x00000000},
- {0x00008130, 0x00000000},
- {0x00008134, 0x00000000},
- {0x00008138, 0x00000000},
- {0x0000813c, 0x0000ffff},
- {0x00008144, 0xffffffff},
- {0x00008168, 0x00000000},
- {0x0000816c, 0x00000000},
- {0x00008170, 0x18486200},
- {0x00008174, 0x33332210},
- {0x00008178, 0x00000000},
- {0x0000817c, 0x00020000},
- {0x000081c0, 0x00000000},
- {0x000081c4, 0x33332210},
- {0x000081c8, 0x00000000},
- {0x000081cc, 0x00000000},
- {0x000081d4, 0x00000000},
- {0x000081ec, 0x00000000},
- {0x000081f0, 0x00000000},
- {0x000081f4, 0x00000000},
- {0x000081f8, 0x00000000},
- {0x000081fc, 0x00000000},
- {0x00008240, 0x00100000},
- {0x00008248, 0x00000800},
- {0x00008250, 0x00000000},
- {0x00008254, 0x00000000},
- {0x00008258, 0x00000000},
- {0x0000825c, 0x40000000},
- {0x00008260, 0x00080922},
- {0x00008264, 0x9d400010},
- {0x00008268, 0xffffffff},
- {0x0000826c, 0x0000ffff},
- {0x00008270, 0x00000000},
- {0x00008274, 0x40000000},
- {0x00008278, 0x003e4180},
- {0x0000827c, 0x00000004},
- {0x00008284, 0x0000002c},
- {0x00008288, 0x0000002c},
- {0x0000828c, 0x000000ff},
- {0x00008294, 0x00000000},
- {0x00008298, 0x00000000},
- {0x0000829c, 0x00000000},
- {0x00008300, 0x00000140},
- {0x00008314, 0x00000000},
- {0x0000831c, 0x0000010d},
- {0x00008328, 0x00000000},
- {0x0000832c, 0x00000007},
- {0x00008330, 0x00000302},
- {0x00008334, 0x00000700},
- {0x00008338, 0x00ff0000},
- {0x0000833c, 0x02400000},
- {0x00008340, 0x000107ff},
- {0x00008344, 0xaa48105b},
- {0x00008348, 0x008f0000},
- {0x0000835c, 0x00000000},
- {0x00008360, 0xffffffff},
- {0x00008364, 0xffffffff},
- {0x00008368, 0x00000000},
- {0x00008370, 0x00000000},
- {0x00008374, 0x000000ff},
- {0x00008378, 0x00000000},
- {0x0000837c, 0x00000000},
- {0x00008380, 0xffffffff},
- {0x00008384, 0xffffffff},
- {0x00008390, 0xffffffff},
- {0x00008394, 0xffffffff},
- {0x00008398, 0x00000000},
- {0x0000839c, 0x00000000},
- {0x000083a0, 0x00000000},
- {0x000083a4, 0x0000fa14},
- {0x000083a8, 0x000f0c00},
- {0x000083ac, 0x33332210},
- {0x000083b0, 0x33332210},
- {0x000083b4, 0x33332210},
- {0x000083b8, 0x33332210},
- {0x000083bc, 0x00000000},
- {0x000083c0, 0x00000000},
- {0x000083c4, 0x00000000},
- {0x000083c8, 0x00000000},
- {0x000083cc, 0x00000200},
- {0x000083d0, 0x000301ff},
-};
+#define ar9331_1p2_mac_core ar9331_1p1_mac_core
-static const u32 ar9331_common_rx_gain_1p2[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x01800082},
- {0x0000a014, 0x01820181},
- {0x0000a018, 0x01840183},
- {0x0000a01c, 0x01880185},
- {0x0000a020, 0x018a0189},
- {0x0000a024, 0x02850284},
- {0x0000a028, 0x02890288},
- {0x0000a02c, 0x03850384},
- {0x0000a030, 0x03890388},
- {0x0000a034, 0x038b038a},
- {0x0000a038, 0x038d038c},
- {0x0000a03c, 0x03910390},
- {0x0000a040, 0x03930392},
- {0x0000a044, 0x03950394},
- {0x0000a048, 0x00000396},
- {0x0000a04c, 0x00000000},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x28282828},
- {0x0000a084, 0x28282828},
- {0x0000a088, 0x28282828},
- {0x0000a08c, 0x28282828},
- {0x0000a090, 0x28282828},
- {0x0000a094, 0x21212128},
- {0x0000a098, 0x171c1c1c},
- {0x0000a09c, 0x02020212},
- {0x0000a0a0, 0x00000202},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x111f1100},
- {0x0000a0c8, 0x111d111e},
- {0x0000a0cc, 0x111b111c},
- {0x0000a0d0, 0x22032204},
- {0x0000a0d4, 0x22012202},
- {0x0000a0d8, 0x221f2200},
- {0x0000a0dc, 0x221d221e},
- {0x0000a0e0, 0x33013302},
- {0x0000a0e4, 0x331f3300},
- {0x0000a0e8, 0x4402331e},
- {0x0000a0ec, 0x44004401},
- {0x0000a0f0, 0x441e441f},
- {0x0000a0f4, 0x55015502},
- {0x0000a0f8, 0x551f5500},
- {0x0000a0fc, 0x6602551e},
- {0x0000a100, 0x66006601},
- {0x0000a104, 0x661e661f},
- {0x0000a108, 0x7703661d},
- {0x0000a10c, 0x77017702},
- {0x0000a110, 0x00007700},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x111f1100},
- {0x0000a148, 0x111d111e},
- {0x0000a14c, 0x111b111c},
- {0x0000a150, 0x22032204},
- {0x0000a154, 0x22012202},
- {0x0000a158, 0x221f2200},
- {0x0000a15c, 0x221d221e},
- {0x0000a160, 0x33013302},
- {0x0000a164, 0x331f3300},
- {0x0000a168, 0x4402331e},
- {0x0000a16c, 0x44004401},
- {0x0000a170, 0x441e441f},
- {0x0000a174, 0x55015502},
- {0x0000a178, 0x551f5500},
- {0x0000a17c, 0x6602551e},
- {0x0000a180, 0x66006601},
- {0x0000a184, 0x661e661f},
- {0x0000a188, 0x7703661d},
- {0x0000a18c, 0x77017702},
- {0x0000a190, 0x00007700},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000296},
-};
+#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+
+#define ar9331_common_rx_gain_1p2 ar9485_common_rx_gain_1_1
#endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 815a8af..1d8235e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,16 +19,16 @@
#define INITVALS_9340_H
static const u32 ar9340_1p0_radio_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
- {0x0001610c, 0x08000000, 0x08000000, 0x00000000, 0x00000000},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016140, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
- {0x0001650c, 0x08000000, 0x08000000, 0x00000000, 0x00000000},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016540, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
};
static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,21 +100,10 @@ static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
{0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
};
-static const u32 ar9340Modes_fast_clock_1p0[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00001030, 0x00000268, 0x000004d0},
- {0x00001070, 0x0000018c, 0x00000318},
- {0x000010b0, 0x00000fd0, 0x00001fa0},
- {0x00008014, 0x044c044c, 0x08980898},
- {0x0000801c, 0x148ec02b, 0x148ec057},
- {0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x03721821, 0x03721821},
- {0x0000a230, 0x0000000b, 0x00000016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
+#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
static const u32 ar9340_1p0_radio_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00016000, 0x36db6db6},
{0x00016004, 0x6db6db40},
{0x00016008, 0x73f00000},
@@ -146,15 +136,13 @@ static const u32 ar9340_1p0_radio_core[][2] = {
{0x00016100, 0x04cb0001},
{0x00016104, 0xfff80000},
{0x00016108, 0x00080010},
- {0x0001610c, 0x00000000},
{0x00016140, 0x50804008},
{0x00016144, 0x01884080},
{0x00016148, 0x000080c0},
{0x00016280, 0x01000015},
- {0x00016284, 0x05530000},
+ {0x00016284, 0x15530000},
{0x00016288, 0x00318000},
{0x0001628c, 0x50000000},
- {0x00016290, 0x4080294f},
{0x00016380, 0x00000000},
{0x00016384, 0x00000000},
{0x00016388, 0x00800700},
@@ -219,52 +207,43 @@ static const u32 ar9340_1p0_radio_core[][2] = {
};
static const u32 ar9340_1p0_radio_core_40M[][2] = {
+ /* Addr allmodes */
{0x0001609c, 0x02566f3a},
{0x000160ac, 0xa4647c00},
{0x000160b0, 0x01885f5a},
};
-static const u32 ar9340_1p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
-static const u32 ar9340_1p0_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
-};
+#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
static const u32 ar9340_1p0_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
- {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+ {0x0000a204, 0x00003ec0, 0x00003ec4, 0x00003ec4, 0x00003ec0},
{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
{0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
{0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
{0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
{0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
@@ -277,11 +256,11 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
{0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
{0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae04, 0x00180000, 0x00180000, 0x00180000, 0x00180000},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -289,21 +268,21 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
};
static const u32 ar9340_1p0_baseband_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00009800, 0xafe68e30},
{0x00009804, 0xfd14e000},
{0x00009808, 0x9c0a9f6b},
{0x0000980c, 0x04900000},
- {0x00009814, 0xb280c00a},
+ {0x00009814, 0x3280c00a},
{0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
+ {0x00009834, 0x6400a190},
{0x00009838, 0x0108ecff},
- {0x0000983c, 0x14750600},
+ {0x0000983c, 0x14000600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
- {0x000098b0, 0x52440bbe},
+ {0x000098b0, 0x32840bbe},
{0x000098d0, 0x004b6a8e},
{0x000098d4, 0x00000820},
{0x000098dc, 0x00000000},
@@ -329,7 +308,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e3c, 0xcf946222},
{0x00009e40, 0x0d261820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
@@ -342,8 +320,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a220, 0x00000000},
{0x0000a224, 0x00000000},
{0x0000a228, 0x10002310},
- {0x0000a22c, 0x01036a1e},
- {0x0000a234, 0x10000fff},
{0x0000a23c, 0x00000000},
{0x0000a244, 0x0c000000},
{0x0000a2a0, 0x00000001},
@@ -351,10 +327,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a2c8, 0x00000000},
{0x0000a2cc, 0x18c43433},
{0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
{0x0000a2ec, 0x00000000},
{0x0000a2f0, 0x00000000},
{0x0000a2f4, 0x00000000},
@@ -385,7 +357,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a3e8, 0x20202020},
{0x0000a3ec, 0x20202020},
{0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000246},
+ {0x0000a3f4, 0x00000000},
{0x0000a3f8, 0x0cdbd380},
{0x0000a3fc, 0x000f0f01},
{0x0000a400, 0x8fa91f01},
@@ -402,33 +374,17 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
+ {0x0000a43c, 0x00100000},
{0x0000a440, 0x00000000},
{0x0000a444, 0x00000000},
- {0x0000a448, 0x04000080},
+ {0x0000a448, 0x05000080},
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
- {0x0000a600, 0x00000000},
- {0x0000a604, 0x00000000},
- {0x0000a608, 0x00000000},
- {0x0000a60c, 0x00000000},
- {0x0000a610, 0x00000000},
- {0x0000a614, 0x00000000},
- {0x0000a618, 0x00000000},
- {0x0000a61c, 0x00000000},
- {0x0000a620, 0x00000000},
- {0x0000a624, 0x00000000},
- {0x0000a628, 0x00000000},
- {0x0000a62c, 0x00000000},
- {0x0000a630, 0x00000000},
- {0x0000a634, 0x00000000},
- {0x0000a638, 0x00000000},
- {0x0000a63c, 0x00000000},
{0x0000a640, 0x00000000},
{0x0000a644, 0x3fad9d74},
{0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00000637},
+ {0x0000a64c, 0x00003c37},
{0x0000a670, 0x03020100},
{0x0000a674, 0x09080504},
{0x0000a678, 0x0d0c0b0a},
@@ -451,10 +407,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a8f4, 0x00000000},
{0x0000b2d0, 0x00000080},
{0x0000b2d4, 0x00000000},
- {0x0000b2dc, 0x00000000},
- {0x0000b2e0, 0x00000000},
- {0x0000b2e4, 0x00000000},
- {0x0000b2e8, 0x00000000},
{0x0000b2ec, 0x00000000},
{0x0000b2f0, 0x00000000},
{0x0000b2f4, 0x00000000},
@@ -465,80 +417,108 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
};
static const u32 ar9340Modes_high_power_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
- {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
- {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
- {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
- {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
- {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a504, 0x04002222, 0x04002222, 0x02000001, 0x02000001},
+ {0x0000a508, 0x09002421, 0x09002421, 0x05000003, 0x05000003},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0a000005, 0x0a000005},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0e000201, 0x0e000201},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000203, 0x11000203},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x14000401, 0x14000401},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x18000403, 0x18000403},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000602, 0x1b000602},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000802, 0x1f000802},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x21000620, 0x21000620},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x25000820, 0x25000820},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x29000822, 0x29000822},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2d000824, 0x2d000824},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x30000828, 0x30000828},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x3400082a, 0x3400082a},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38000849, 0x38000849},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b000a2c, 0x3b000a2c},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e000e2b, 0x3e000e2b},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42000e2d, 0x42000e2d},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x4500124a, 0x4500124a},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x4900124c, 0x4900124c},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c00126c, 0x4c00126c},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x4f00128c, 0x4f00128c},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x52001290, 0x52001290},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
- {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
- {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
- {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
+ {0x0000a584, 0x04802222, 0x04802222, 0x02800001, 0x02800001},
+ {0x0000a588, 0x09802421, 0x09802421, 0x05800003, 0x05800003},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0a800005, 0x0a800005},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0e800201, 0x0e800201},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800203, 0x11800203},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x14800401, 0x14800401},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x18800403, 0x18800403},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800602, 0x1b800602},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800802, 0x1f800802},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x21800620, 0x21800620},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x25800820, 0x25800820},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x29800822, 0x29800822},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2d800824, 0x2d800824},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x30800828, 0x30800828},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x3480082a, 0x3480082a},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38800849, 0x38800849},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b800a2c, 0x3b800a2c},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e800e2b, 0x3e800e2b},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42800e2d, 0x42800e2d},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x4580124a, 0x4580124a},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x4980124c, 0x4980124c},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c80126c, 0x4c80126c},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x4f80128c, 0x4f80128c},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x52801290, 0x52801290},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
{0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
- {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
{0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
};
static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -559,7 +539,7 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -604,13 +584,43 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
- {0x00016048, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266},
+ {0x00016048, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
{0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
- {0x00016448, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266},
+ {0x00016448, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
};
+
static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a00ae, 0x206a00ae},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000a2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -676,15 +686,34 @@ static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x00016044, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db},
- {0x00016048, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266},
- {0x00016444, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db},
- {0x00016448, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266},
+ {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016048, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
+ {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016448, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
+ {0x0000b2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
+ {0x0000b2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
+ {0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
};
-
static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@@ -845,14 +874,14 @@ static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
{0x0000b0a0, 0x00000000},
{0x0000b0a4, 0x00000000},
{0x0000b0a8, 0x00000000},
@@ -944,7 +973,11 @@ static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
};
static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -952,8 +985,8 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
{0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
{0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
{0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
{0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
@@ -965,19 +998,19 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
{0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
{0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1010,14 +1043,40 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
- {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
+ {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000},
{0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
- {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
+ {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
};
static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1025,8 +1084,8 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
{0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x15000402, 0x15000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
{0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
{0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
{0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
@@ -1038,19 +1097,19 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
{0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
{0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1083,14 +1142,36 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
- {0x00016048, 0x24927266, 0x24927266, 0x8e483266, 0x8e483266},
+ {0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
{0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
- {0x00016448, 0x24927266, 0x24927266, 0x8e482266, 0x8e482266},
+ {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
};
static const u32 ar9340_1p0_mac_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00000008, 0x00000000},
{0x00000030, 0x00020085},
{0x00000034, 0x00000005},
@@ -1119,6 +1200,7 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x00008004, 0x00000000},
{0x00008008, 0x00000000},
{0x0000800c, 0x00000000},
+ {0x00008010, 0x00080800},
{0x00008018, 0x00000000},
{0x00008020, 0x00000000},
{0x00008038, 0x00000000},
@@ -1146,7 +1228,7 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x000080bc, 0x00000000},
{0x000080c0, 0x2a800000},
{0x000080c4, 0x06900168},
- {0x000080c8, 0x13881c20},
+ {0x000080c8, 0x13881c22},
{0x000080cc, 0x01f40000},
{0x000080d0, 0x00252500},
{0x000080d4, 0x00a00000},
@@ -1250,276 +1332,17 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x000083c4, 0x00000000},
{0x000083c8, 0x00000000},
{0x000083cc, 0x00000200},
- {0x000083d0, 0x000301ff},
+ {0x000083d0, 0x000101ff},
};
-static const u32 ar9340Common_wo_xlna_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
+#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
static const u32 ar9340_1p0_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x000040a4, 0x00a0c1c9},
+ /* Addr allmodes */
{0x00007008, 0x00000000},
{0x00007020, 0x00000000},
{0x00007034, 0x00000002},
{0x00007038, 0x000004c2},
};
-#endif
+#endif /* INITVALS_9340_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1d6658e..4ef7dcc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2010 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -52,7 +53,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
{0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -61,7 +62,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
+ {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
{0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
{0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
@@ -958,7 +959,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
{0x0001604c, 0x2699e04f},
{0x00016050, 0x6db6db6c},
{0x00016058, 0x6c200000},
- {0x00016080, 0x00040000},
+ {0x00016080, 0x000c0000},
{0x00016084, 0x9a68048c},
{0x00016088, 0x54214514},
{0x0001608c, 0x1203040b},
@@ -981,7 +982,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
{0x00016144, 0x02084080},
{0x00016148, 0x000080c0},
{0x00016280, 0x050a0001},
- {0x00016284, 0x3d841400},
+ {0x00016284, 0x3d841418},
{0x00016288, 0x00000000},
{0x0001628c, 0xe3000000},
{0x00016290, 0xa1005080},
@@ -1007,6 +1008,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
static const u32 ar9462_2p0_soc_preamble[][2] = {
/* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
{0x00007020, 0x00000000},
{0x00007034, 0x00000002},
{0x00007038, 0x000004c2},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index d16d029..fb4497f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,360 +18,151 @@
#ifndef INITVALS_9485_H
#define INITVALS_9485_H
-static const u32 ar9485_1_1_mac_core[][2] = {
- /* Addr allmodes */
- {0x00000008, 0x00000000},
- {0x00000030, 0x00020085},
- {0x00000034, 0x00000005},
- {0x00000040, 0x00000000},
- {0x00000044, 0x00000000},
- {0x00000048, 0x00000008},
- {0x0000004c, 0x00000010},
- {0x00000050, 0x00000000},
- {0x00001040, 0x002ffc0f},
- {0x00001044, 0x002ffc0f},
- {0x00001048, 0x002ffc0f},
- {0x0000104c, 0x002ffc0f},
- {0x00001050, 0x002ffc0f},
- {0x00001054, 0x002ffc0f},
- {0x00001058, 0x002ffc0f},
- {0x0000105c, 0x002ffc0f},
- {0x00001060, 0x002ffc0f},
- {0x00001064, 0x002ffc0f},
- {0x000010f0, 0x00000100},
- {0x00001270, 0x00000000},
- {0x000012b0, 0x00000000},
- {0x000012f0, 0x00000000},
- {0x0000143c, 0x00000000},
- {0x0000147c, 0x00000000},
- {0x00008000, 0x00000000},
- {0x00008004, 0x00000000},
- {0x00008008, 0x00000000},
- {0x0000800c, 0x00000000},
- {0x00008018, 0x00000000},
- {0x00008020, 0x00000000},
- {0x00008038, 0x00000000},
- {0x0000803c, 0x00000000},
- {0x00008040, 0x00000000},
- {0x00008044, 0x00000000},
- {0x00008048, 0x00000000},
- {0x0000804c, 0xffffffff},
- {0x00008054, 0x00000000},
- {0x00008058, 0x00000000},
- {0x0000805c, 0x000fc78f},
- {0x00008060, 0x0000000f},
- {0x00008064, 0x00000000},
- {0x00008070, 0x00000310},
- {0x00008074, 0x00000020},
- {0x00008078, 0x00000000},
- {0x0000809c, 0x0000000f},
- {0x000080a0, 0x00000000},
- {0x000080a4, 0x02ff0000},
- {0x000080a8, 0x0e070605},
- {0x000080ac, 0x0000000d},
- {0x000080b0, 0x00000000},
- {0x000080b4, 0x00000000},
- {0x000080b8, 0x00000000},
- {0x000080bc, 0x00000000},
- {0x000080c0, 0x2a800000},
- {0x000080c4, 0x06900168},
- {0x000080c8, 0x13881c22},
- {0x000080cc, 0x01f40000},
- {0x000080d0, 0x00252500},
- {0x000080d4, 0x00a00000},
- {0x000080d8, 0x00400000},
- {0x000080dc, 0x00000000},
- {0x000080e0, 0xffffffff},
- {0x000080e4, 0x0000ffff},
- {0x000080e8, 0x3f3f3f3f},
- {0x000080ec, 0x00000000},
- {0x000080f0, 0x00000000},
- {0x000080f4, 0x00000000},
- {0x000080fc, 0x00020000},
- {0x00008100, 0x00000000},
- {0x00008108, 0x00000052},
- {0x0000810c, 0x00000000},
- {0x00008110, 0x00000000},
- {0x00008114, 0x000007ff},
- {0x00008118, 0x000000aa},
- {0x0000811c, 0x00003210},
- {0x00008124, 0x00000000},
- {0x00008128, 0x00000000},
- {0x0000812c, 0x00000000},
- {0x00008130, 0x00000000},
- {0x00008134, 0x00000000},
- {0x00008138, 0x00000000},
- {0x0000813c, 0x0000ffff},
- {0x00008144, 0xffffffff},
- {0x00008168, 0x00000000},
- {0x0000816c, 0x00000000},
- {0x00008170, 0x18486200},
- {0x00008174, 0x33332210},
- {0x00008178, 0x00000000},
- {0x0000817c, 0x00020000},
- {0x000081c0, 0x00000000},
- {0x000081c4, 0x33332210},
- {0x000081d4, 0x00000000},
- {0x000081ec, 0x00000000},
- {0x000081f0, 0x00000000},
- {0x000081f4, 0x00000000},
- {0x000081f8, 0x00000000},
- {0x000081fc, 0x00000000},
- {0x00008240, 0x00100000},
- {0x00008244, 0x0010f400},
- {0x00008248, 0x00000800},
- {0x0000824c, 0x0001e800},
- {0x00008250, 0x00000000},
- {0x00008254, 0x00000000},
- {0x00008258, 0x00000000},
- {0x0000825c, 0x40000000},
- {0x00008260, 0x00080922},
- {0x00008264, 0x9ca00010},
- {0x00008268, 0xffffffff},
- {0x0000826c, 0x0000ffff},
- {0x00008270, 0x00000000},
- {0x00008274, 0x40000000},
- {0x00008278, 0x003e4180},
- {0x0000827c, 0x00000004},
- {0x00008284, 0x0000002c},
- {0x00008288, 0x0000002c},
- {0x0000828c, 0x000000ff},
- {0x00008294, 0x00000000},
- {0x00008298, 0x00000000},
- {0x0000829c, 0x00000000},
- {0x00008300, 0x00000140},
- {0x00008314, 0x00000000},
- {0x0000831c, 0x0000010d},
- {0x00008328, 0x00000000},
- {0x0000832c, 0x00000007},
- {0x00008330, 0x00000302},
- {0x00008334, 0x00000700},
- {0x00008338, 0x00ff0000},
- {0x0000833c, 0x02400000},
- {0x00008340, 0x000107ff},
- {0x00008344, 0xa248105b},
- {0x00008348, 0x008f0000},
- {0x0000835c, 0x00000000},
- {0x00008360, 0xffffffff},
- {0x00008364, 0xffffffff},
- {0x00008368, 0x00000000},
- {0x00008370, 0x00000000},
- {0x00008374, 0x000000ff},
- {0x00008378, 0x00000000},
- {0x0000837c, 0x00000000},
- {0x00008380, 0xffffffff},
- {0x00008384, 0xffffffff},
- {0x00008390, 0xffffffff},
- {0x00008394, 0xffffffff},
- {0x00008398, 0x00000000},
- {0x0000839c, 0x00000000},
- {0x000083a0, 0x00000000},
- {0x000083a4, 0x0000fa14},
- {0x000083a8, 0x000f0c00},
- {0x000083ac, 0x33332210},
- {0x000083b0, 0x33332210},
- {0x000083b4, 0x33332210},
- {0x000083b8, 0x33332210},
- {0x000083bc, 0x00000000},
- {0x000083c0, 0x00000000},
- {0x000083c4, 0x00000000},
- {0x000083c8, 0x00000000},
- {0x000083cc, 0x00000200},
- {0x000083d0, 0x000301ff},
-};
+/* AR9485 1.0 */
-static const u32 ar9485_1_1_baseband_core[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafe68e30},
- {0x00009804, 0xfd14e000},
- {0x00009808, 0x9c0a8f6b},
- {0x0000980c, 0x04800000},
- {0x00009814, 0x9280c00a},
- {0x00009818, 0x00000000},
- {0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
- {0x00009838, 0x0108ecff},
- {0x0000983c, 0x14750600},
- {0x00009880, 0x201fff00},
- {0x00009884, 0x00001042},
- {0x000098a4, 0x00200400},
- {0x000098b0, 0x52440bbe},
- {0x000098d0, 0x004b6a8e},
- {0x000098d4, 0x00000820},
- {0x000098dc, 0x00000000},
- {0x000098f0, 0x00000000},
- {0x000098f4, 0x00000000},
- {0x00009c04, 0x00000000},
- {0x00009c08, 0x03200000},
- {0x00009c0c, 0x00000000},
- {0x00009c10, 0x00000000},
- {0x00009c14, 0x00046384},
- {0x00009c18, 0x05b6b440},
- {0x00009c1c, 0x00b6b440},
- {0x00009d00, 0xc080a333},
- {0x00009d04, 0x40206c10},
- {0x00009d08, 0x009c4060},
- {0x00009d0c, 0x1883800a},
- {0x00009d10, 0x01834061},
- {0x00009d14, 0x00c00400},
- {0x00009d18, 0x00000000},
- {0x00009d1c, 0x00000000},
- {0x00009e08, 0x0038233c},
- {0x00009e24, 0x9927b515},
- {0x00009e28, 0x12ef0200},
- {0x00009e30, 0x06336f77},
- {0x00009e34, 0x6af6532f},
- {0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
- {0x00009e4c, 0x00001004},
- {0x00009e50, 0x00ff03f1},
- {0x00009fc0, 0x80be4788},
- {0x00009fc4, 0x0001efb5},
- {0x00009fcc, 0x40000014},
- {0x0000a20c, 0x00000000},
- {0x0000a210, 0x00000000},
- {0x0000a220, 0x00000000},
- {0x0000a224, 0x00000000},
- {0x0000a228, 0x10002310},
- {0x0000a23c, 0x00000000},
- {0x0000a244, 0x0c000000},
- {0x0000a2a0, 0x00000001},
- {0x0000a2c0, 0x00000001},
- {0x0000a2c8, 0x00000000},
- {0x0000a2cc, 0x18c43433},
- {0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
- {0x0000a2ec, 0x00000000},
- {0x0000a2f0, 0x00000000},
- {0x0000a2f4, 0x00000000},
- {0x0000a2f8, 0x00000000},
- {0x0000a344, 0x00000000},
- {0x0000a34c, 0x00000000},
- {0x0000a350, 0x0000a000},
- {0x0000a364, 0x00000000},
- {0x0000a370, 0x00000000},
- {0x0000a390, 0x00000001},
- {0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x000000ff},
- {0x0000a3a8, 0x3b3b3b3b},
- {0x0000a3ac, 0x2f2f2f2f},
- {0x0000a3c0, 0x20202020},
- {0x0000a3c4, 0x22222220},
- {0x0000a3c8, 0x20200020},
- {0x0000a3cc, 0x20202020},
- {0x0000a3d0, 0x20202020},
- {0x0000a3d4, 0x20202020},
- {0x0000a3d8, 0x20202020},
- {0x0000a3dc, 0x20202020},
- {0x0000a3e0, 0x20202020},
- {0x0000a3e4, 0x20202020},
- {0x0000a3e8, 0x20202020},
- {0x0000a3ec, 0x20202020},
- {0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000006},
- {0x0000a3f8, 0x0cdbd380},
- {0x0000a3fc, 0x000f0f01},
- {0x0000a400, 0x8fa91f01},
- {0x0000a404, 0x00000000},
- {0x0000a408, 0x0e79e5c6},
- {0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739cf},
- {0x0000a418, 0x2d0019ce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
- {0x0000a434, 0x00000000},
- {0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
- {0x0000a440, 0x00000000},
- {0x0000a444, 0x00000000},
- {0x0000a448, 0x04000000},
- {0x0000a44c, 0x00000001},
- {0x0000a450, 0x00010000},
- {0x0000a5c4, 0xbfad9d74},
- {0x0000a5c8, 0x0048060a},
- {0x0000a5cc, 0x00000637},
- {0x0000a760, 0x03020100},
- {0x0000a764, 0x09080504},
- {0x0000a768, 0x0d0c0b0a},
- {0x0000a76c, 0x13121110},
- {0x0000a770, 0x31301514},
- {0x0000a774, 0x35343332},
- {0x0000a778, 0x00000036},
- {0x0000a780, 0x00000838},
- {0x0000a7c0, 0x00000000},
- {0x0000a7c4, 0xfffffffc},
- {0x0000a7c8, 0x00000000},
- {0x0000a7cc, 0x00000000},
- {0x0000a7d0, 0x00000000},
- {0x0000a7d4, 0x00000004},
- {0x0000a7dc, 0x00000000},
-};
+#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
-static const u32 ar9485Common_1_1[][2] = {
- /* Addr allmodes */
- {0x00007010, 0x00000022},
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18012e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
};
-static const u32 ar9485_1_1_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
- {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
- {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
- {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
- {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
- {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
- {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
- {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
- {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
- {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
- {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
- {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
- {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
- {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
- {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
- {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
- {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
- {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
- {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
- {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
- {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
- {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
};
-static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -442,102 +234,34 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
+#define ar9485Modes_high_ob_db_tx_gain_1_1 ar9485Modes_high_power_tx_gain_1_1
-static const u32 ar9485_1_1_radio_postamble[][2] = {
- /* Addr allmodes */
- {0x0001609c, 0x0b283f31},
- {0x000160ac, 0x24611800},
- {0x000160b0, 0x03284f3e},
- {0x0001610c, 0x00170000},
- {0x00016140, 0x50804008},
-};
+#define ar9485Modes_low_ob_db_tx_gain_1_1 ar9485Modes_high_ob_db_tx_gain_1_1
-static const u32 ar9485_1_1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+
+static const u32 ar9485_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a580, 0x00000000},
+ {0x0000a584, 0x00000000},
+ {0x0000a588, 0x00000000},
+ {0x0000a58c, 0x00000000},
+ {0x0000a590, 0x00000000},
+ {0x0000a594, 0x00000000},
+ {0x0000a598, 0x00000000},
+ {0x0000a59c, 0x00000000},
+ {0x0000a5a0, 0x00000000},
+ {0x0000a5a4, 0x00000000},
+ {0x0000a5a8, 0x00000000},
+ {0x0000a5ac, 0x00000000},
+ {0x0000a5b0, 0x00000000},
+ {0x0000a5b4, 0x00000000},
+ {0x0000a5b8, 0x00000000},
+ {0x0000a5bc, 0x00000000},
};
static const u32 ar9485_1_1_radio_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00016000, 0x36db6db6},
{0x00016004, 0x6db6db40},
{0x00016008, 0x73800000},
@@ -601,294 +325,145 @@ static const u32 ar9485_1_1_radio_core[][2] = {
{0x00016c44, 0x12000000},
};
-static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18052e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
-};
-
-static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
-static const u32 ar9485_1_1[][2] = {
- /* Addr allmodes */
- {0x0000a580, 0x00000000},
- {0x0000a584, 0x00000000},
- {0x0000a588, 0x00000000},
- {0x0000a58c, 0x00000000},
- {0x0000a590, 0x00000000},
- {0x0000a594, 0x00000000},
- {0x0000a598, 0x00000000},
- {0x0000a59c, 0x00000000},
- {0x0000a5a0, 0x00000000},
- {0x0000a5a4, 0x00000000},
- {0x0000a5a8, 0x00000000},
- {0x0000a5ac, 0x00000000},
- {0x0000a5b0, 0x00000000},
- {0x0000a5b4, 0x00000000},
- {0x0000a5b8, 0x00000000},
- {0x0000a5bc, 0x00000000},
-};
-
-static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
- {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
- {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
- {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
- {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
-static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18013e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
-};
-
-static const u32 ar9485_1_1_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x00004014, 0xba280400},
- {0x00004090, 0x00aa10aa},
- {0x000040a4, 0x00a0c9c9},
- {0x00007010, 0x00000022},
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
- {0x00007048, 0x00000002},
-};
-
-static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
-static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
-static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
- /* Addr 5G_HT2 5G_HT40 */
- {0x00009e00, 0x03721821, 0x03721821},
- {0x0000a230, 0x0000400b, 0x00004016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
-
-static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18012e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
+static const u32 ar9485_1_1_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009d1c, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x80be4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a210, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x000000ff},
+ {0x0000a3a8, 0x3b3b3b3b},
+ {0x0000a3ac, 0x2f2f2f2f},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739cf},
+ {0x0000a418, 0x2d0019ce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a5c4, 0xbfad9d74},
+ {0x0000a5c8, 0x0048060a},
+ {0x0000a5cc, 0x00000637},
+ {0x0000a760, 0x03020100},
+ {0x0000a764, 0x09080504},
+ {0x0000a768, 0x0d0c0b0a},
+ {0x0000a76c, 0x13121110},
+ {0x0000a770, 0x31301514},
+ {0x0000a774, 0x35343332},
+ {0x0000a778, 0x00000036},
+ {0x0000a780, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
};
static const u32 ar9485_common_rx_gain_1_1[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@@ -1019,143 +594,260 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x0000a1fc, 0x00000296},
};
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18052e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00018c00, 0x18053e5e},
{0x00018c04, 0x000801d8},
{0x00018c08, 0x0000080c},
};
-static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00060005},
- {0x0000a004, 0x00810080},
- {0x0000a008, 0x00830082},
- {0x0000a00c, 0x00850084},
- {0x0000a010, 0x01820181},
- {0x0000a014, 0x01840183},
- {0x0000a018, 0x01880185},
- {0x0000a01c, 0x018a0189},
- {0x0000a020, 0x02850284},
- {0x0000a024, 0x02890288},
- {0x0000a028, 0x028b028a},
- {0x0000a02c, 0x03850384},
- {0x0000a030, 0x03890388},
- {0x0000a034, 0x038b038a},
- {0x0000a038, 0x038d038c},
- {0x0000a03c, 0x03910390},
- {0x0000a040, 0x03930392},
- {0x0000a044, 0x03950394},
- {0x0000a048, 0x00000396},
- {0x0000a04c, 0x00000000},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x28282828},
- {0x0000a084, 0x28282828},
- {0x0000a088, 0x28282828},
- {0x0000a08c, 0x28282828},
- {0x0000a090, 0x28282828},
- {0x0000a094, 0x24242428},
- {0x0000a098, 0x171e1e1e},
- {0x0000a09c, 0x02020b0b},
- {0x0000a0a0, 0x02020202},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x22072208},
- {0x0000a0c4, 0x22052206},
- {0x0000a0c8, 0x22032204},
- {0x0000a0cc, 0x22012202},
- {0x0000a0d0, 0x221f2200},
- {0x0000a0d4, 0x221d221e},
- {0x0000a0d8, 0x33023303},
- {0x0000a0dc, 0x33003301},
- {0x0000a0e0, 0x331e331f},
- {0x0000a0e4, 0x4402331d},
- {0x0000a0e8, 0x44004401},
- {0x0000a0ec, 0x441e441f},
- {0x0000a0f0, 0x55025503},
- {0x0000a0f4, 0x55005501},
- {0x0000a0f8, 0x551e551f},
- {0x0000a0fc, 0x6602551d},
- {0x0000a100, 0x66006601},
- {0x0000a104, 0x661e661f},
- {0x0000a108, 0x7703661d},
- {0x0000a10c, 0x77017702},
- {0x0000a110, 0x00007700},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x111f1100},
- {0x0000a148, 0x111d111e},
- {0x0000a14c, 0x111b111c},
- {0x0000a150, 0x22032204},
- {0x0000a154, 0x22012202},
- {0x0000a158, 0x221f2200},
- {0x0000a15c, 0x221d221e},
- {0x0000a160, 0x33013302},
- {0x0000a164, 0x331f3300},
- {0x0000a168, 0x4402331e},
- {0x0000a16c, 0x44004401},
- {0x0000a170, 0x441e441f},
- {0x0000a174, 0x55015502},
- {0x0000a178, 0x551f5500},
- {0x0000a17c, 0x6602551e},
- {0x0000a180, 0x66006601},
- {0x0000a184, 0x661e661f},
- {0x0000a188, 0x7703661d},
- {0x0000a18c, 0x77017702},
- {0x0000a190, 0x00007700},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000296},
+static const u32 ar9485_1_1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00004014, 0xba280400},
+ {0x00004090, 0x00aa10aa},
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007010, 0x00000022},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000002},
+};
+
+static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9485_1_1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18013e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_1_1_radio_postamble[][2] = {
+ /* Addr allmodes */
+ {0x0001609c, 0x0b283f31},
+ {0x000160ac, 0x24611800},
+ {0x000160b0, 0x03284f3e},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x50804008},
+};
+
+static const u32 ar9485_1_1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9ca00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xa248105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
};
-#endif
+#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
new file mode 100644
index 0000000..df97f21
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -0,0 +1,1284 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_955X_1P0_H
+#define INITVALS_955X_1P0_H
+
+/* AR955X 1.0 */
+
+static const u32 ar955x_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x06345f2a, 0x06345f2a},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0xa4646800, 0xa4646800},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x04accf3a, 0x04accf3a},
+ {0x00016104, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x0001610c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+ {0x00016504, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x0001650c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+ {0x00016904, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x0001690c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+};
+
+static const u32 ar955x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar955x_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 ar955x_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x557cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x00016068, 0x6db6db6c},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f101e},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x006db6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x11999601},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x01800804},
+ {0x00016284, 0x00038dc5},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0x00000040},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00400705},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x557cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x00016468, 0x6db6db6c},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x11999601},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x000080c0},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00400705},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x557cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x00016868, 0x6db6db6c},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x11999601},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x000080c0},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00400705},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = {
+ /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000a2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000a2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x0000a410, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050da, 0x000050da},
+ {0x0000a500, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000002, 0x04000002},
+ {0x0000a508, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c000006, 0x0c000006},
+ {0x0000a510, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x0f00000a, 0x0f00000a},
+ {0x0000a514, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x1300000c, 0x1300000c},
+ {0x0000a518, 0x19004008, 0x19004008, 0x19004008, 0x19004008, 0x18004008, 0x18004008, 0x1700000e, 0x1700000e},
+ {0x0000a51c, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1c00400a, 0x1c00400a, 0x1b000064, 0x1b000064},
+ {0x0000a520, 0x230020a2, 0x230020a2, 0x210020a2, 0x210020a2, 0x200020a2, 0x200020a2, 0x1f000242, 0x1f000242},
+ {0x0000a524, 0x2500006e, 0x2500006e, 0x2500006e, 0x2500006e, 0x2400006e, 0x2400006e, 0x23000229, 0x23000229},
+ {0x0000a528, 0x29022221, 0x29022221, 0x28022221, 0x28022221, 0x27022221, 0x27022221, 0x270002a2, 0x270002a2},
+ {0x0000a52c, 0x2d00062a, 0x2d00062a, 0x2c00062a, 0x2c00062a, 0x2a00062a, 0x2a00062a, 0x2c001203, 0x2c001203},
+ {0x0000a530, 0x340220a5, 0x340220a5, 0x320220a5, 0x320220a5, 0x2f0220a5, 0x2f0220a5, 0x30001803, 0x30001803},
+ {0x0000a534, 0x380022c5, 0x380022c5, 0x350022c5, 0x350022c5, 0x320022c5, 0x320022c5, 0x33000881, 0x33000881},
+ {0x0000a538, 0x3b002486, 0x3b002486, 0x39002486, 0x39002486, 0x36002486, 0x36002486, 0x38001809, 0x38001809},
+ {0x0000a53c, 0x3f00248a, 0x3f00248a, 0x3d00248a, 0x3d00248a, 0x3a00248a, 0x3a00248a, 0x3a000814, 0x3a000814},
+ {0x0000a540, 0x4202242c, 0x4202242c, 0x4102242c, 0x4102242c, 0x3f02242c, 0x3f02242c, 0x3f001a0c, 0x3f001a0c},
+ {0x0000a544, 0x490044c6, 0x490044c6, 0x460044c6, 0x460044c6, 0x420044c6, 0x420044c6, 0x43001a0e, 0x43001a0e},
+ {0x0000a548, 0x4d024485, 0x4d024485, 0x4a024485, 0x4a024485, 0x46024485, 0x46024485, 0x46001812, 0x46001812},
+ {0x0000a54c, 0x51044483, 0x51044483, 0x4e044483, 0x4e044483, 0x4a044483, 0x4a044483, 0x49001884, 0x49001884},
+ {0x0000a550, 0x5404a40c, 0x5404a40c, 0x5204a40c, 0x5204a40c, 0x4d04a40c, 0x4d04a40c, 0x4d001e84, 0x4d001e84},
+ {0x0000a554, 0x57024632, 0x57024632, 0x55024632, 0x55024632, 0x52024632, 0x52024632, 0x50001e69, 0x50001e69},
+ {0x0000a558, 0x5c00a634, 0x5c00a634, 0x5900a634, 0x5900a634, 0x5600a634, 0x5600a634, 0x550006f4, 0x550006f4},
+ {0x0000a55c, 0x5f026832, 0x5f026832, 0x5d026832, 0x5d026832, 0x5a026832, 0x5a026832, 0x59000ad3, 0x59000ad3},
+ {0x0000a560, 0x6602b012, 0x6602b012, 0x6202b012, 0x6202b012, 0x5d02b012, 0x5d02b012, 0x5e000ad5, 0x5e000ad5},
+ {0x0000a564, 0x6e02d0e1, 0x6e02d0e1, 0x6802d0e1, 0x6802d0e1, 0x6002d0e1, 0x6002d0e1, 0x61001ced, 0x61001ced},
+ {0x0000a568, 0x7202b4c4, 0x7202b4c4, 0x6c02b4c4, 0x6c02b4c4, 0x6502b4c4, 0x6502b4c4, 0x660018d4, 0x660018d4},
+ {0x0000a56c, 0x75007894, 0x75007894, 0x70007894, 0x70007894, 0x6b007894, 0x6b007894, 0x660018d4, 0x660018d4},
+ {0x0000a570, 0x7b025c74, 0x7b025c74, 0x75025c74, 0x75025c74, 0x70025c74, 0x70025c74, 0x660018d4, 0x660018d4},
+ {0x0000a574, 0x8300bcb5, 0x8300bcb5, 0x7a00bcb5, 0x7a00bcb5, 0x7600bcb5, 0x7600bcb5, 0x660018d4, 0x660018d4},
+ {0x0000a578, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
+ {0x0000a57c, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03804000, 0x03804000},
+ {0x0000a610, 0x04c08c01, 0x04c08c01, 0x04808b01, 0x04808b01, 0x04808a01, 0x04808a01, 0x0300ca02, 0x0300ca02},
+ {0x0000a614, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00000e04, 0x00000e04},
+ {0x0000a618, 0x04010c01, 0x04010c01, 0x03c10b01, 0x03c10b01, 0x03810a01, 0x03810a01, 0x03014000, 0x03014000},
+ {0x0000a61c, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x00000000, 0x00000000},
+ {0x0000a620, 0x04010303, 0x04010303, 0x03c10303, 0x03c10303, 0x03810303, 0x03810303, 0x00000000, 0x00000000},
+ {0x0000a624, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03014000, 0x03014000},
+ {0x0000a628, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x03804c05, 0x03804c05},
+ {0x0000a62c, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x0701de06, 0x0701de06},
+ {0x0000a630, 0x03418000, 0x03418000, 0x03018000, 0x03018000, 0x02c18000, 0x02c18000, 0x07819c07, 0x07819c07},
+ {0x0000a634, 0x03815004, 0x03815004, 0x03414f04, 0x03414f04, 0x03414e04, 0x03414e04, 0x0701dc07, 0x0701dc07},
+ {0x0000a638, 0x03005302, 0x03005302, 0x02c05202, 0x02c05202, 0x02805202, 0x02805202, 0x0701dc07, 0x0701dc07},
+ {0x0000a63c, 0x04c09302, 0x04c09302, 0x04809202, 0x04809202, 0x04809202, 0x04809202, 0x0701dc07, 0x0701dc07},
+ {0x0000b2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000b2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000b2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000b2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x0000c2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000c2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000c2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000c2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016048, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016280, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01808e84, 0x01808e84},
+ {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016448, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016848, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+};
+
+static const u32 ar955x_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008140, 0x000000fe},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00001d40},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48107b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x8c7901ff},
+};
+
+static const u32 ar955x_1p0_common_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar955x_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x1f020503},
+ {0x0000a39c, 0x29180c03},
+ {0x0000a3a0, 0x9a8b6844},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar955x_1p0_common_wo_xlna_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar955x_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007000, 0x00000000},
+ {0x00007004, 0x00000000},
+ {0x00007008, 0x00000000},
+ {0x0000700c, 0x00000000},
+ {0x0000701c, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007024, 0x00000000},
+ {0x00007028, 0x00000000},
+ {0x0000702c, 0x00000000},
+ {0x00007030, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000000},
+};
+
+static const u32 ar955x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 ar955x_1p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar955x_1p0_common_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
+};
+
+static const u32 ar955x_1p0_modes_no_xpa_tx_gain_table[][9] = {
+ /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0b000006, 0x0b000006},
+ {0x0000a510, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x0f00000a, 0x0f00000a},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x1300000c, 0x1300000c},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1700000e, 0x1700000e},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x1b000012, 0x1b000012},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x1f00004a, 0x1f00004a},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x23000244, 0x23000244},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2700022b, 0x2700022b},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x2b000625, 0x2b000625},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x2f001006, 0x2f001006},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x330008a0, 0x330008a0},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x37000a2a, 0x37000a2a},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x3b001c23, 0x3b001c23},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x3f0014a0, 0x3f0014a0},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x43001882, 0x43001882},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x47001ca2, 0x47001ca2},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x4b001ec3, 0x4b001ec3},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x4f00148c, 0x4f00148c},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x53001c6e, 0x53001c6e},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x57001c92, 0x57001c92},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x5c001af6, 0x5c001af6},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x04005001, 0x04005001},
+ {0x0000a614, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x03808e02, 0x03808e02},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0300c000, 0x0300c000},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x03808e02, 0x03808e02},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x03410c03, 0x03410c03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04014c03, 0x04014c03},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x05818d04, 0x05818d04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801cd04, 0x0801cd04},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x0000c2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000c2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016048, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+ {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016448, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+ {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+};
+
+static const u32 ar955x_1p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar955x_1p0_modes_fast_clock[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+#endif /* INITVALS_955X_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 06b3f0d..6e1915a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2010 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +20,7 @@
/* AR9580 1.0 */
-static const u32 ar9580_1p0_modes_fast_clock[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00001030, 0x00000268, 0x000004d0},
- {0x00001070, 0x0000018c, 0x00000318},
- {0x000010b0, 0x00000fd0, 0x00001fa0},
- {0x00008014, 0x044c044c, 0x08980898},
- {0x0000801c, 0x148ec02b, 0x148ec057},
- {0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x0372131c, 0x0372131c},
- {0x0000a230, 0x0000000b, 0x00000016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
+#define ar9580_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
static const u32 ar9580_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -208,17 +198,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x0000c420, 0x00000000},
};
-static const u32 ar9580_1p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -326,111 +306,7 @@ static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
- {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
- {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
- {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
- {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
- {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
- {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
- {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
- {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
- {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
- {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
- {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
- {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
- {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
- {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
- {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
- {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
- {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
- {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
- {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
- {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
- {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
- {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
- {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
- {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
- {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
- {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
- {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
- {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
-};
+#define ar9580_1p0_high_power_tx_gain_table ar9580_1p0_low_ob_db_tx_gain_table
static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -538,12 +414,7 @@ static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-static const u32 ar9580_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
+#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
static const u32 ar9580_1p0_mac_core[][2] = {
/* Addr allmodes */
@@ -808,376 +679,11 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-static const u32 ar9580_1p0_wo_xlna_rx_gain_table[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
+#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
-static const u32 ar9580_1p0_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
-};
+#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
-static const u32 ar9580_1p0_high_ob_db_tx_gain_table[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
- {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
- {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
- {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
- {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
- {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
- {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
- {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
- {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
- {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
- {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
- {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
- {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
- {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
- {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
- {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
- {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
-};
+#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
static const u32 ar9580_1p0_soc_preamble[][2] = {
/* Addr allmodes */
@@ -1189,265 +695,7 @@ static const u32 ar9580_1p0_soc_preamble[][2] = {
{0x00007048, 0x00000008},
};
-static const u32 ar9580_1p0_rx_gain_table[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
+#define ar9580_1p0_rx_gain_table ar9462_common_rx_gain_table_2p0
static const u32 ar9580_1p0_radio_core[][2] = {
/* Addr allmodes */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a277cf6..b09285c 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -214,6 +214,7 @@ struct ath_frame_info {
enum ath9k_key_type keytype;
u8 keyix;
u8 retries;
+ u8 rtscts_rate;
};
struct ath_buf_state {
@@ -296,6 +297,8 @@ struct ath_tx {
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
struct ath_txq *txq_map[WME_NUM_AC];
+ u32 txq_max_pending[WME_NUM_AC];
+ u16 max_aggr_framelen[WME_NUM_AC][4][32];
};
struct ath_rx_edma {
@@ -307,6 +310,7 @@ struct ath_rx {
u8 defant;
u8 rxotherant;
u32 *rxlink;
+ u32 num_pkts;
unsigned int rxfilter;
spinlock_t rxbuflock;
struct list_head rxbuf;
@@ -325,6 +329,9 @@ int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
+void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
+void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
+void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
void ath_draintxq(struct ath_softc *sc,
@@ -336,6 +343,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs);
void ath_tx_cleanup(struct ath_softc *sc);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
+void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_tasklet(struct ath_softc *sc);
@@ -355,7 +363,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_vif {
int av_bslot;
- bool is_bslot_active, primary_sta_vif;
+ bool primary_sta_vif;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
struct ath_buf *av_bcbuf;
};
@@ -381,6 +389,7 @@ struct ath_beacon_config {
u16 dtim_period;
u16 bmiss_timeout;
u8 dtim_count;
+ bool enable_beacon;
};
struct ath_beacon {
@@ -392,7 +401,6 @@ struct ath_beacon {
u32 beaconq;
u32 bmisscnt;
- u32 ast_be_xmit;
u32 bc_tstamp;
struct ieee80211_vif *bslot[ATH_BCBUF];
int slottime;
@@ -406,17 +414,19 @@ struct ath_beacon {
bool tx_last;
};
-void ath_beacon_tasklet(unsigned long data);
-void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
-int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
-void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
-int ath_beaconq_config(struct ath_softc *sc);
-void ath_set_beacon(struct ath_softc *sc);
+void ath9k_beacon_tasklet(unsigned long data);
+bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
+ u32 changed);
+void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_set_beacon(struct ath_softc *sc);
void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
-/*******/
-/* ANI */
-/*******/
+/*******************/
+/* Link Monitoring */
+/*******************/
#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
@@ -427,7 +437,9 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
#define ATH_PAPRD_TIMEOUT 100 /* msecs */
+#define ATH_PLL_WORK_INTERVAL 100
+void ath_tx_complete_poll_work(struct work_struct *work);
void ath_reset_work(struct work_struct *work);
void ath_hw_check(struct work_struct *work);
void ath_hw_pll_work(struct work_struct *work);
@@ -435,23 +447,35 @@ void ath_rx_poll(unsigned long data);
void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
void ath_paprd_calibrate(struct work_struct *work);
void ath_ani_calibrate(unsigned long data);
-void ath_start_ani(struct ath_common *common);
+void ath_start_ani(struct ath_softc *sc);
+void ath_stop_ani(struct ath_softc *sc);
+void ath_check_ani(struct ath_softc *sc);
+int ath_update_survey_stats(struct ath_softc *sc);
+void ath_update_survey_nf(struct ath_softc *sc, int channel);
+void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
/**********/
/* BTCOEX */
/**********/
+enum bt_op_flags {
+ BT_OP_PRIORITY_DETECTED,
+ BT_OP_SCAN,
+};
+
struct ath_btcoex {
bool hw_timer_enabled;
spinlock_t btcoex_lock;
struct timer_list period_timer; /* Timer for BT period */
u32 bt_priority_cnt;
unsigned long bt_priority_time;
+ unsigned long op_flags;
int bt_stomp_type; /* Types of BT stomping */
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
u32 btscan_no_stomp; /* in usec */
u32 duty_cycle;
+ u32 bt_wait_time;
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
struct ath_mci_profile mci;
};
@@ -465,6 +489,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc);
void ath9k_btcoex_timer_pause(struct ath_softc *sc);
void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
+void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
#else
static inline int ath9k_init_btcoex(struct ath_softc *sc)
{
@@ -488,8 +513,17 @@ static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
{
return 0;
}
+static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
+{
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+struct ath9k_wow_pattern {
+ u8 pattern_bytes[MAX_PATTERN_SIZE];
+ u8 mask_bytes[MAX_PATTERN_SIZE];
+ u32 pattern_len;
+};
+
/********************/
/* LED Control */
/********************/
@@ -513,8 +547,10 @@ static inline void ath_deinit_leds(struct ath_softc *sc)
}
#endif
-
+/*******************************/
/* Antenna diversity/combining */
+/*******************************/
+
#define ATH_ANT_RX_CURRENT_SHIFT 4
#define ATH_ANT_RX_MAIN_SHIFT 2
#define ATH_ANT_RX_MASK 0x3
@@ -567,6 +603,9 @@ struct ath_ant_comb {
unsigned long scan_start_time;
};
+void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
+void ath_ant_comb_update(struct ath_softc *sc);
+
/********************/
/* Main driver core */
/********************/
@@ -584,15 +623,14 @@ struct ath_ant_comb {
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
-#define SC_OP_INVALID BIT(0)
-#define SC_OP_BEACONS BIT(1)
-#define SC_OP_OFFCHANNEL BIT(2)
-#define SC_OP_RXFLUSH BIT(3)
-#define SC_OP_TSF_RESET BIT(4)
-#define SC_OP_BT_PRIORITY_DETECTED BIT(5)
-#define SC_OP_BT_SCAN BIT(6)
-#define SC_OP_ANI_RUN BIT(7)
-#define SC_OP_PRIM_STA_VIF BIT(8)
+enum sc_op_flags {
+ SC_OP_INVALID,
+ SC_OP_BEACONS,
+ SC_OP_RXFLUSH,
+ SC_OP_ANI_RUN,
+ SC_OP_PRIM_STA_VIF,
+ SC_OP_HW_RESET,
+};
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
@@ -638,9 +676,9 @@ struct ath_softc {
struct completion paprd_complete;
unsigned int hw_busy_count;
+ unsigned long sc_flags;
u32 intrstatus;
- u32 sc_flags; /* SC_OP_* */
u16 ps_flags; /* PS_* */
u16 curtxpow;
bool ps_enabled;
@@ -678,6 +716,7 @@ struct ath_softc {
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex btcoex;
struct ath_mci_coex mci_coex;
+ struct work_struct mci_work;
#endif
struct ath_descdma txsdma;
@@ -685,6 +724,13 @@ struct ath_softc {
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
struct dfs_pattern_detector *dfs_detector;
+ u32 wow_enabled;
+
+#ifdef CONFIG_PM_SLEEP
+ atomic_t wow_got_bmiss_intr;
+ atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
+ u32 wow_intr_before_sleep;
+#endif
};
void ath9k_tasklet(unsigned long data);
@@ -700,6 +746,7 @@ extern int ath9k_modparam_nohwcrypt;
extern int led_blink;
extern bool is_ath9k_unloaded;
+u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
int ath9k_init_device(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops);
@@ -736,5 +783,4 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ath9k_vif_iter_data *iter_data);
-
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 11bc55e..76f07d8 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -30,7 +30,7 @@ static void ath9k_reset_beacon_status(struct ath_softc *sc)
* the operating mode of the station (AP or AdHoc). Parameters are AIFS
* settings and channel width min/max
*/
-int ath_beaconq_config(struct ath_softc *sc)
+static void ath9k_beaconq_config(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -38,6 +38,7 @@ int ath_beaconq_config(struct ath_softc *sc)
struct ath_txq *txq;
ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
+
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
/* Always burst out beacon and CAB traffic. */
qi.tqi_aifs = 1;
@@ -48,17 +49,17 @@ int ath_beaconq_config(struct ath_softc *sc)
txq = sc->tx.txq_map[WME_AC_BE];
ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
- qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
+ if (ah->slottime == ATH9K_SLOT_TIME_20)
+ qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
+ else
+ qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
qi.tqi_cwmax = qi_be.tqi_cwmax;
}
if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
- ath_err(common,
- "Unable to update h/w beacon queue parameters\n");
- return 0;
+ ath_err(common, "Unable to update h/w beacon queue parameters\n");
} else {
ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
- return 1;
}
}
@@ -67,7 +68,7 @@ int ath_beaconq_config(struct ath_softc *sc)
* up rate codes, and channel flags. Beacons are always sent out at the
* lowest rate, and are not retried.
*/
-static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
+static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
struct ath_buf *bf, int rateidx)
{
struct sk_buff *skb = bf->bf_mpdu;
@@ -78,8 +79,6 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
u8 chainmask = ah->txchainmask;
u8 rate = 0;
- ath9k_reset_beacon_status(sc);
-
sband = &sc->sbands[common->hw->conf.channel->band];
rate = sband->bitrates[rateidx].hw_value;
if (vif->bss_conf.use_short_preamble)
@@ -108,7 +107,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
}
-static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -125,28 +124,22 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
}
}
-static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_buf *bf;
- struct ath_vif *avp;
+ struct ath_vif *avp = (void *)vif->drv_priv;
struct sk_buff *skb;
- struct ath_txq *cabq;
+ struct ath_txq *cabq = sc->beacon.cabq;
struct ieee80211_tx_info *info;
+ struct ieee80211_mgmt *mgmt_hdr;
int cabq_depth;
- ath9k_reset_beacon_status(sc);
-
- avp = (void *)vif->drv_priv;
- cabq = sc->beacon.cabq;
-
- if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
+ if (avp->av_bcbuf == NULL)
return NULL;
- /* Release the old beacon first */
-
bf = avp->av_bcbuf;
skb = bf->bf_mpdu;
if (skb) {
@@ -156,14 +149,14 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
bf->bf_buf_addr = 0;
}
- /* Get a new beacon from mac80211 */
-
skb = ieee80211_beacon_get(hw, vif);
- bf->bf_mpdu = skb;
if (skb == NULL)
return NULL;
- ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
- avp->tsf_adjust;
+
+ bf->bf_mpdu = skb;
+
+ mgmt_hdr = (struct ieee80211_mgmt *)skb->data;
+ mgmt_hdr->u.beacon.timestamp = avp->tsf_adjust;
info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -209,61 +202,52 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
}
}
- ath_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
+ ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
while (skb) {
- ath_tx_cabq(hw, skb);
+ ath9k_tx_cabq(hw, skb);
skb = ieee80211_get_buffered_bc(hw, vif);
}
return bf;
}
-int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
+void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp;
- struct ath_buf *bf;
- struct sk_buff *skb;
- struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
- __le64 tstamp;
-
- avp = (void *)vif->drv_priv;
-
- /* Allocate a beacon descriptor if we haven't done so. */
- if (!avp->av_bcbuf) {
- /* Allocate beacon state for hostap/ibss. We know
- * a buffer is available. */
- avp->av_bcbuf = list_first_entry(&sc->beacon.bbuf,
- struct ath_buf, list);
- list_del(&avp->av_bcbuf->list);
-
- if (ath9k_uses_beacons(vif->type)) {
- int slot;
- /*
- * Assign the vif to a beacon xmit slot. As
- * above, this cannot fail to find one.
- */
- avp->av_bslot = 0;
- for (slot = 0; slot < ATH_BCBUF; slot++)
- if (sc->beacon.bslot[slot] == NULL) {
- avp->av_bslot = slot;
- avp->is_bslot_active = false;
-
- /* NB: keep looking for a double slot */
- if (slot == 0 || !sc->beacon.bslot[slot-1])
- break;
- }
- BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
- sc->beacon.bslot[avp->av_bslot] = vif;
- sc->nbcnvifs++;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ int slot;
+
+ avp->av_bcbuf = list_first_entry(&sc->beacon.bbuf, struct ath_buf, list);
+ list_del(&avp->av_bcbuf->list);
+
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (sc->beacon.bslot[slot] == NULL) {
+ avp->av_bslot = slot;
+ break;
}
}
- /* release the previous beacon frame, if it already exists. */
- bf = avp->av_bcbuf;
- if (bf->bf_mpdu != NULL) {
- skb = bf->bf_mpdu;
+ sc->beacon.bslot[avp->av_bslot] = vif;
+ sc->nbcnvifs++;
+
+ ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
+ avp->av_bslot);
+}
+
+void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_buf *bf = avp->av_bcbuf;
+
+ ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
+ avp->av_bslot);
+
+ tasklet_disable(&sc->bcon_tasklet);
+
+ if (bf && bf->bf_mpdu) {
+ struct sk_buff *skb = bf->bf_mpdu;
dma_unmap_single(sc->dev, bf->bf_buf_addr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
@@ -271,99 +255,74 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
bf->bf_buf_addr = 0;
}
- /* NB: the beacon data buffer must be 32-bit aligned. */
- skb = ieee80211_beacon_get(sc->hw, vif);
- if (skb == NULL)
- return -ENOMEM;
+ avp->av_bcbuf = NULL;
+ sc->beacon.bslot[avp->av_bslot] = NULL;
+ sc->nbcnvifs--;
+ list_add_tail(&bf->list, &sc->beacon.bbuf);
- tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- sc->beacon.bc_tstamp = (u32) le64_to_cpu(tstamp);
- /* Calculate a TSF adjustment factor required for staggered beacons. */
- if (avp->av_bslot > 0) {
- u64 tsfadjust;
- int intval;
-
- intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ tasklet_enable(&sc->bcon_tasklet);
+}
- /*
- * Calculate the TSF offset for this beacon slot, i.e., the
- * number of usecs that need to be added to the timestamp field
- * in Beacon and Probe Response frames. Beacon slot 0 is
- * processed at the correct offset, so it does not require TSF
- * adjustment. Other slots are adjusted to get the timestamp
- * close to the TBTT for the BSS.
- */
- tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF;
- avp->tsf_adjust = cpu_to_le64(tsfadjust);
+static int ath9k_beacon_choose_slot(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ u16 intval;
+ u32 tsftu;
+ u64 tsf;
+ int slot;
- ath_dbg(common, BEACON,
- "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
- avp->av_bslot, intval, (unsigned long long)tsfadjust);
+ if (sc->sc_ah->opmode != NL80211_IFTYPE_AP) {
+ ath_dbg(common, BEACON, "slot 0, tsf: %llu\n",
+ ath9k_hw_gettsf64(sc->sc_ah));
+ return 0;
+ }
- ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
- avp->tsf_adjust;
- } else
- avp->tsf_adjust = cpu_to_le64(0);
+ intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf += TU_TO_USEC(sc->sc_ah->config.sw_beacon_response_time);
+ tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF);
+ slot = (tsftu % (intval * ATH_BCBUF)) / intval;
- bf->bf_mpdu = skb;
- bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
- dev_kfree_skb_any(skb);
- bf->bf_mpdu = NULL;
- bf->bf_buf_addr = 0;
- ath_err(common, "dma_mapping_error on beacon alloc\n");
- return -ENOMEM;
- }
- avp->is_bslot_active = true;
+ ath_dbg(common, BEACON, "slot: %d tsf: %llu tsftu: %u\n",
+ slot, tsf, tsftu / ATH_BCBUF);
- return 0;
+ return slot;
}
-void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
+void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- if (avp->av_bcbuf != NULL) {
- struct ath_buf *bf;
-
- avp->is_bslot_active = false;
- if (avp->av_bslot != -1) {
- sc->beacon.bslot[avp->av_bslot] = NULL;
- sc->nbcnvifs--;
- avp->av_bslot = -1;
- }
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ u64 tsfadjust;
- bf = avp->av_bcbuf;
- if (bf->bf_mpdu != NULL) {
- struct sk_buff *skb = bf->bf_mpdu;
- dma_unmap_single(sc->dev, bf->bf_buf_addr,
- skb->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(skb);
- bf->bf_mpdu = NULL;
- bf->bf_buf_addr = 0;
- }
- list_add_tail(&bf->list, &sc->beacon.bbuf);
+ if (avp->av_bslot == 0)
+ return;
- avp->av_bcbuf = NULL;
- }
+ tsfadjust = cur_conf->beacon_interval * avp->av_bslot / ATH_BCBUF;
+ avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
+
+ ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
+ (unsigned long long)tsfadjust, avp->av_bslot);
}
-void ath_beacon_tasklet(unsigned long data)
+void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
- struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
- u32 bfaddr, bc = 0;
- if (work_pending(&sc->hw_reset_work)) {
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
ath_dbg(common, RESET,
"reset work is pending, skip beaconing now\n");
return;
}
+
/*
* Check if the previous beacon has gone out. If
* not don't try to post another, skip this period
@@ -387,55 +346,25 @@ void ath_beacon_tasklet(unsigned long data)
} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
sc->beacon.bmisscnt = 0;
- sc->sc_flags |= SC_OP_TSF_RESET;
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ ath9k_queue_reset(sc, RESET_TYPE_BEACON_STUCK);
}
return;
}
- /*
- * Generate beacon frames. we are sending frames
- * staggered so calculate the slot for this frame based
- * on the tsf to safeguard against missing an swba.
- */
-
-
- if (ah->opmode == NL80211_IFTYPE_AP) {
- u16 intval;
- u32 tsftu;
- u64 tsf;
-
- intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
- tsf = ath9k_hw_gettsf64(ah);
- tsf += TU_TO_USEC(ah->config.sw_beacon_response_time);
- tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF);
- slot = (tsftu % (intval * ATH_BCBUF)) / intval;
- vif = sc->beacon.bslot[slot];
-
- ath_dbg(common, BEACON,
- "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
- slot, tsf, tsftu / ATH_BCBUF, intval, vif);
- } else {
- slot = 0;
- vif = sc->beacon.bslot[slot];
- }
+ slot = ath9k_beacon_choose_slot(sc);
+ vif = sc->beacon.bslot[slot];
+ if (!vif || !vif->bss_conf.enable_beacon)
+ return;
- bfaddr = 0;
- if (vif) {
- bf = ath_beacon_generate(sc->hw, vif);
- if (bf != NULL) {
- bfaddr = bf->bf_daddr;
- bc = 1;
- }
+ bf = ath9k_beacon_generate(sc->hw, vif);
+ WARN_ON(!bf);
- if (sc->beacon.bmisscnt != 0) {
- ath_dbg(common, BSTUCK,
- "resume beacon xmit after %u misses\n",
- sc->beacon.bmisscnt);
- sc->beacon.bmisscnt = 0;
- }
+ if (sc->beacon.bmisscnt != 0) {
+ ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
+ sc->beacon.bmisscnt);
+ sc->beacon.bmisscnt = 0;
}
/*
@@ -455,39 +384,40 @@ void ath_beacon_tasklet(unsigned long data)
* set to ATH_BCBUF so this check is a noop.
*/
if (sc->beacon.updateslot == UPDATE) {
- sc->beacon.updateslot = COMMIT; /* commit next beacon */
+ sc->beacon.updateslot = COMMIT;
sc->beacon.slotupdate = slot;
- } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
+ } else if (sc->beacon.updateslot == COMMIT &&
+ sc->beacon.slotupdate == slot) {
ah->slottime = sc->beacon.slottime;
ath9k_hw_init_global_settings(ah);
sc->beacon.updateslot = OK;
}
- if (bfaddr != 0) {
+
+ if (bf) {
+ ath9k_reset_beacon_status(sc);
+
+ ath_dbg(common, BEACON,
+ "Transmitting beacon for slot: %d\n", slot);
+
/* NB: cabq traffic should already be queued and primed */
- ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
+ ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
if (!edma)
ath9k_hw_txstart(ah, sc->beacon.beaconq);
-
- sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
}
}
-static void ath9k_beacon_init(struct ath_softc *sc,
- u32 next_beacon,
- u32 beacon_period)
+static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, u32 intval)
{
- if (sc->sc_flags & SC_OP_TSF_RESET) {
- ath9k_ps_wakeup(sc);
- ath9k_hw_reset_tsf(sc->sc_ah);
- }
-
- ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
+ struct ath_hw *ah = sc->sc_ah;
- if (sc->sc_flags & SC_OP_TSF_RESET) {
- ath9k_ps_restore(sc);
- sc->sc_flags &= ~SC_OP_TSF_RESET;
- }
+ ath9k_hw_disable_interrupts(ah);
+ ath9k_hw_reset_tsf(ah);
+ ath9k_beaconq_config(sc);
+ ath9k_hw_beaconinit(ah, nexttbtt, intval);
+ sc->beacon.bmisscnt = 0;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
}
/*
@@ -495,32 +425,27 @@ static void ath9k_beacon_init(struct ath_softc *sc,
* burst together. For the former arrange for the SWBA to be delivered for each
* slot. Slots that are not occupied will generate nothing.
*/
-static void ath_beacon_config_ap(struct ath_softc *sc,
- struct ath_beacon_config *conf)
+static void ath9k_beacon_config_ap(struct ath_softc *sc,
+ struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
intval = TU_TO_USEC(conf->beacon_interval);
- intval /= ATH_BCBUF; /* for staggered beacons */
+ intval /= ATH_BCBUF;
nexttbtt = intval;
- /*
- * In AP mode we enable the beacon timers and SWBA interrupts to
- * prepare beacon frames.
- */
- ah->imask |= ATH9K_INT_SWBA;
- ath_beaconq_config(sc);
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
- /* Set the computed AP beacon timers */
+ ath_dbg(common, BEACON, "AP nexttbtt: %u intval: %u conf_intval: %u\n",
+ nexttbtt, intval, conf->beacon_interval);
- ath9k_hw_disable_interrupts(ah);
- sc->sc_flags |= SC_OP_TSF_RESET;
ath9k_beacon_init(sc, nexttbtt, intval);
- sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
}
/*
@@ -531,8 +456,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
* we'll receive a BMISS interrupt when we stop seeing beacons from the AP
* we've associated with.
*/
-static void ath_beacon_config_sta(struct ath_softc *sc,
- struct ath_beacon_config *conf)
+static void ath9k_beacon_config_sta(struct ath_softc *sc,
+ struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -544,7 +469,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
int num_beacons, offset, dtim_dec_count, cfp_dec_count;
/* No need to configure beacon if we are not associated */
- if (!common->curaid) {
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
ath_dbg(common, BEACON,
"STA is not yet associated..skipping beacon config\n");
return;
@@ -651,97 +576,65 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
ath9k_hw_enable_interrupts(ah);
}
-static void ath_beacon_config_adhoc(struct ath_softc *sc,
- struct ath_beacon_config *conf)
+static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
+ struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u32 tsf, intval, nexttbtt;
+ u32 intval, nexttbtt;
ath9k_reset_beacon_status(sc);
- if (!(sc->sc_flags & SC_OP_BEACONS))
- ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp);
intval = TU_TO_USEC(conf->beacon_interval);
- tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
- nexttbtt = tsf + intval;
-
- ath_dbg(common, BEACON, "IBSS nexttbtt %u intval %u (%u)\n",
- nexttbtt, intval, conf->beacon_interval);
-
- /*
- * In IBSS mode enable the beacon timers but only enable SWBA interrupts
- * if we need to manually prepare beacon frames. Otherwise we use a
- * self-linked tx descriptor and let the hardware deal with things.
- */
- ah->imask |= ATH9K_INT_SWBA;
+ nexttbtt = intval;
- ath_beaconq_config(sc);
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
- /* Set the computed ADHOC beacon timers */
+ ath_dbg(common, BEACON, "IBSS nexttbtt: %u intval: %u conf_intval: %u\n",
+ nexttbtt, intval, conf->beacon_interval);
- ath9k_hw_disable_interrupts(ah);
ath9k_beacon_init(sc, nexttbtt, intval);
- sc->beacon.bmisscnt = 0;
-
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
}
-static bool ath9k_allow_beacon_config(struct ath_softc *sc,
- struct ieee80211_vif *vif)
+bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
- /*
- * Can not have different beacon interval on multiple
- * AP interface case
- */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
- (sc->nbcnvifs > 1) &&
- (vif->type == NL80211_IFTYPE_AP) &&
- (cur_conf->beacon_interval != bss_conf->beacon_int)) {
- ath_dbg(common, CONFIG,
- "Changing beacon interval of multiple AP interfaces !\n");
- return false;
- }
- /*
- * Can not configure station vif's beacon config
- * while on AP opmode
- */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
- (vif->type != NL80211_IFTYPE_AP)) {
- ath_dbg(common, CONFIG,
- "STA vif's beacon not allowed on AP mode\n");
- return false;
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
+ if ((vif->type != NL80211_IFTYPE_AP) ||
+ (sc->nbcnvifs > 1)) {
+ ath_dbg(common, CONFIG,
+ "An AP interface is already present !\n");
+ return false;
+ }
}
- /*
- * Do not allow beacon config if HW was already configured
- * with another STA vif
- */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
- (vif->type == NL80211_IFTYPE_STATION) &&
- (sc->sc_flags & SC_OP_BEACONS) &&
- !avp->primary_sta_vif) {
- ath_dbg(common, CONFIG,
- "Beacon already configured for a station interface\n");
- return false;
+
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
+ if ((vif->type == NL80211_IFTYPE_STATION) &&
+ test_bit(SC_OP_BEACONS, &sc->sc_flags) &&
+ !avp->primary_sta_vif) {
+ ath_dbg(common, CONFIG,
+ "Beacon already configured for a station interface\n");
+ return false;
+ }
}
+
return true;
}
-void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_cache_beacon_config(struct ath_softc *sc,
+ struct ieee80211_bss_conf *bss_conf)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- if (!ath9k_allow_beacon_config(sc, vif))
- return;
+ ath_dbg(common, BEACON,
+ "Caching beacon data for BSS: %pM\n", bss_conf->bssid);
- /* Setup the beacon configuration parameters */
cur_conf->beacon_interval = bss_conf->beacon_int;
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->listen_interval = 1;
@@ -766,73 +659,62 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
if (cur_conf->dtim_period == 0)
cur_conf->dtim_period = 1;
- ath_set_beacon(sc);
}
-static bool ath_has_valid_bslot(struct ath_softc *sc)
+void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
+ u32 changed)
{
- struct ath_vif *avp;
- int slot;
- bool found = false;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
- for (slot = 0; slot < ATH_BCBUF; slot++) {
- if (sc->beacon.bslot[slot]) {
- avp = (void *)sc->beacon.bslot[slot]->drv_priv;
- if (avp->is_bslot_active) {
- found = true;
- break;
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
+ ath9k_cache_beacon_config(sc, bss_conf);
+ ath9k_set_beacon(sc);
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ } else {
+ /*
+ * Take care of multiple interfaces when
+ * enabling/disabling SWBA.
+ */
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon &&
+ (sc->nbcnvifs <= 1)) {
+ cur_conf->enable_beacon = false;
+ } else if (bss_conf->enable_beacon) {
+ cur_conf->enable_beacon = true;
+ ath9k_cache_beacon_config(sc, bss_conf);
}
}
+
+ if (cur_conf->beacon_interval) {
+ ath9k_set_beacon(sc);
+
+ if (cur_conf->enable_beacon)
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ else
+ clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+ }
}
- return found;
}
-
-void ath_set_beacon(struct ath_softc *sc)
+void ath9k_set_beacon(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
switch (sc->sc_ah->opmode) {
case NL80211_IFTYPE_AP:
- if (ath_has_valid_bslot(sc))
- ath_beacon_config_ap(sc, cur_conf);
+ ath9k_beacon_config_ap(sc, cur_conf);
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
- ath_beacon_config_adhoc(sc, cur_conf);
+ ath9k_beacon_config_adhoc(sc, cur_conf);
break;
case NL80211_IFTYPE_STATION:
- ath_beacon_config_sta(sc, cur_conf);
+ ath9k_beacon_config_sta(sc, cur_conf);
break;
default:
ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
-
- sc->sc_flags |= SC_OP_BEACONS;
-}
-
-void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (!ath_has_valid_bslot(sc)) {
- sc->sc_flags &= ~SC_OP_BEACONS;
- return;
- }
-
- ath9k_ps_wakeup(sc);
- if (status) {
- /* Re-enable beaconing */
- ah->imask |= ATH9K_INT_SWBA;
- ath9k_hw_set_interrupts(ah);
- } else {
- /* Disable SWBA interrupt */
- ah->imask &= ~ATH9K_INT_SWBA;
- ath9k_hw_set_interrupts(ah);
- tasklet_kill(&sc->bcon_tasklet);
- ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
- }
- ath9k_ps_restore(sc);
}
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 1ca6da80..acd4373 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -336,10 +336,16 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
- const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
- ar9462_wlan_weights[stomp_type];
+ const u32 *weight = ar9003_wlan_weights[stomp_type];
int i;
+ if (AR_SREV_9462(ah)) {
+ if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+ btcoex->mci.stomp_ftp)
+ stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
+ weight = ar9462_wlan_weights[stomp_type];
+ }
+
for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
btcoex->bt_weight[i] = AR9300_BT_WGHT;
btcoex->wlan_weight[i] = weight[i];
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 3a1e1cf..20092f9 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,6 +36,9 @@
#define ATH_BT_CNT_THRESHOLD 3
#define ATH_BT_CNT_SCAN_THRESHOLD 15
+#define ATH_BTCOEX_RX_WAIT_TIME 100
+#define ATH_BTCOEX_STOMP_FTP_THRESH 5
+
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */
@@ -80,6 +83,7 @@ struct ath9k_hw_mci {
u8 bt_ver_major;
u8 bt_ver_minor;
u8 bt_state;
+ u8 stomp_ftp;
};
struct ath_btcoex_hw {
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 3b33996d..1060c19 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -30,10 +30,10 @@ struct ar5416IniArray {
u32 ia_columns;
};
-#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
+#define INIT_INI_ARRAY(iniarray, array) do { \
(iniarray)->ia_array = (u32 *)(array); \
- (iniarray)->ia_rows = (rows); \
- (iniarray)->ia_columns = (columns); \
+ (iniarray)->ia_rows = ARRAY_SIZE(array); \
+ (iniarray)->ia_columns = ARRAY_SIZE(array[0]); \
} while (0)
#define INI_RA(iniarray, row, column) \
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index fde700c..68b643c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -205,11 +205,10 @@ static ssize_t write_file_disable_ani(struct file *file,
common->disable_ani = !!disable_ani;
if (disable_ani) {
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
+ clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ ath_stop_ani(sc);
} else {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
+ ath_check_ani(sc);
}
return count;
@@ -348,8 +347,6 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
sc->debug.stats.istats.txok++;
if (status & ATH9K_INT_TXURN)
sc->debug.stats.istats.txurn++;
- if (status & ATH9K_INT_MIB)
- sc->debug.stats.istats.mib++;
if (status & ATH9K_INT_RXPHY)
sc->debug.stats.istats.rxphyerr++;
if (status & ATH9K_INT_RXKCM)
@@ -374,6 +371,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
sc->debug.stats.istats.dtim++;
if (status & ATH9K_INT_TSFOOR)
sc->debug.stats.istats.tsfoor++;
+ if (status & ATH9K_INT_MCI)
+ sc->debug.stats.istats.mci++;
}
static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +417,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("DTIMSYNC", dtimsync);
PR_IS("DTIM", dtim);
PR_IS("TSFOOR", tsfoor);
+ PR_IS("MCI", mci);
PR_IS("TOTAL", total);
len += snprintf(buf + len, mxlen - len,
@@ -1318,7 +1318,7 @@ static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
u8 nread;
- if (sc->sc_flags & SC_OP_INVALID)
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EAGAIN;
buf = vmalloc(size);
@@ -1555,6 +1555,14 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_interrupt);
debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_xmit);
+ debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[WME_AC_BK]);
+ debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[WME_AC_BE]);
+ debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[WME_AC_VI]);
+ debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[WME_AC_VO]);
debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_stations);
debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index c34da09..8b9d080 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -32,6 +32,19 @@ struct ath_buf;
#define RESET_STAT_INC(sc, type) do { } while (0)
#endif
+enum ath_reset_type {
+ RESET_TYPE_BB_HANG,
+ RESET_TYPE_BB_WATCHDOG,
+ RESET_TYPE_FATAL_INT,
+ RESET_TYPE_TX_ERROR,
+ RESET_TYPE_TX_HANG,
+ RESET_TYPE_PLL_HANG,
+ RESET_TYPE_MAC_HANG,
+ RESET_TYPE_BEACON_STUCK,
+ RESET_TYPE_MCI,
+ __RESET_TYPE_MAX
+};
+
#ifdef CONFIG_ATH9K_DEBUGFS
/**
@@ -86,6 +99,7 @@ struct ath_interrupt_stats {
u32 dtim;
u32 bb_watchdog;
u32 tsfoor;
+ u32 mci;
/* Sync-cause stats */
u32 sync_cause_all;
@@ -208,17 +222,6 @@ struct ath_rx_stats {
u32 rx_frags;
};
-enum ath_reset_type {
- RESET_TYPE_BB_HANG,
- RESET_TYPE_BB_WATCHDOG,
- RESET_TYPE_FATAL_INT,
- RESET_TYPE_TX_ERROR,
- RESET_TYPE_TX_HANG,
- RESET_TYPE_PLL_HANG,
- RESET_TYPE_MAC_HANG,
- __RESET_TYPE_MAX
-};
-
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 33acb92..484b313 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -241,16 +241,12 @@ enum eeprom_param {
EEP_TEMPSENSE_SLOPE,
EEP_TEMPSENSE_SLOPE_PAL_ON,
EEP_PWR_TABLE_OFFSET,
- EEP_DRIVE_STRENGTH,
- EEP_INTERNAL_REGULATOR,
- EEP_SWREG,
EEP_PAPRD,
EEP_MODAL_VER,
EEP_ANT_DIV_CTL1,
EEP_CHAIN_MASK_REDUCE,
EEP_ANTENNA_GAIN_2G,
EEP_ANTENNA_GAIN_5G,
- EEP_QUICK_DROP
};
enum ar5416_rates {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 4322ac8..7d07510 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -135,7 +135,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
- len += ath9k_dump_4k_modal_eeprom(buf, len, size,
+ len = ath9k_dump_4k_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
}
@@ -188,8 +188,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
{
#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
struct ath_common *common = ath9k_hw_common(ah);
- struct ar5416_eeprom_4k *eep =
- (struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
u16 *eepdata, temp, magic, magic2;
u32 sum = 0, el;
bool need_swap = false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index aa61476..cd742fb 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -132,7 +132,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
- len += ar9287_dump_modal_eeprom(buf, len, size,
+ len = ar9287_dump_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index b5fba8b..a8ac30a 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -211,11 +211,11 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
- len += ath9k_def_dump_modal_eeprom(buf, len, size,
+ len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[0]);
len += snprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
- len += ath9k_def_dump_modal_eeprom(buf, len, size,
+ len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[1]);
goto out;
}
@@ -264,8 +264,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
- struct ar5416_eeprom_def *eep =
- (struct ar5416_eeprom_def *) &ah->eeprom.def;
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
struct ath_common *common = ath9k_hw_common(ah);
u16 *eepdata, temp, magic, magic2;
u32 sum = 0, el;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 281a9af..bacdb8f 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -132,17 +132,18 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
if (time_after(jiffies, btcoex->bt_priority_time +
msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+ clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ clear_bit(BT_OP_SCAN, &btcoex->op_flags);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT scan detected\n");
- sc->sc_flags |= (SC_OP_BT_SCAN |
- SC_OP_BT_PRIORITY_DETECTED);
+ set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ set_bit(BT_OP_SCAN, &btcoex->op_flags);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT priority traffic detected\n");
- sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
+ set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
}
btcoex->bt_priority_cnt = 0;
@@ -190,13 +191,34 @@ static void ath_btcoex_period_timer(unsigned long data)
struct ath_softc *sc = (struct ath_softc *) data;
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
u32 timer_period;
bool is_btscan;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ goto skip_hw_wakeup;
+ }
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_ps_wakeup(sc);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
ath_detect_bt_priority(sc);
- is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+ is_btscan = test_bit(BT_OP_SCAN, &btcoex->op_flags);
+
+ btcoex->bt_wait_time += btcoex->btcoex_period;
+ if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
+ if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
+ (mci->num_pan || mci->num_other_acl))
+ ah->btcoex_hw.mci.stomp_ftp =
+ (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
+ else
+ ah->btcoex_hw.mci.stomp_ftp = false;
+ btcoex->bt_wait_time = 0;
+ sc->rx.num_pkts = 0;
+ }
spin_lock_bh(&btcoex->btcoex_lock);
@@ -218,9 +240,9 @@ static void ath_btcoex_period_timer(unsigned long data)
}
ath9k_ps_restore(sc);
- timer_period = btcoex->btcoex_period / 1000;
- mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(timer_period));
+skip_hw_wakeup:
+ timer_period = btcoex->btcoex_period;
+ mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
}
/*
@@ -233,14 +255,14 @@ static void ath_btcoex_no_stomp_timer(void *arg)
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_common *common = ath9k_hw_common(ah);
- bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
ath_dbg(common, BTCOEX, "no stomp timer running\n");
ath9k_ps_wakeup(sc);
spin_lock_bh(&btcoex->btcoex_lock);
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
+ test_bit(BT_OP_SCAN, &btcoex->op_flags))
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
@@ -254,10 +276,10 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
- btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
btcoex->btcoex_period / 100;
- btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
btcoex->btcoex_period / 100;
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
@@ -292,7 +314,8 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
- sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+ clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ clear_bit(BT_OP_SCAN, &btcoex->op_flags);
mod_timer(&btcoex->period_timer, jiffies);
}
@@ -314,14 +337,22 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
btcoex->hw_timer_enabled = false;
}
+void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+}
+
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
{
+ struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &sc->btcoex.mci;
u16 aggr_limit = 0;
if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
- else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
+ else if (test_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags))
aggr_limit = min((max_4ms_framelen * 3) / 8,
(u32)ATH_AMPDU_LIMIT_MAX);
@@ -362,9 +393,9 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
if (ah->btcoex_hw.enabled &&
ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
- ath9k_hw_btcoex_disable(ah);
if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_pause(sc);
+ ath9k_hw_btcoex_disable(ah);
if (AR_SREV_9462(ah))
ath_mci_flush_profile(&sc->btcoex.mci);
}
@@ -372,11 +403,13 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
void ath9k_deinit_btcoex(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
+
if ((sc->btcoex.no_stomp_timer) &&
ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
- if (AR_SREV_9462(sc->sc_ah))
+ if (ath9k_hw_mci_is_enabled(ah))
ath_mci_cleanup(sc);
}
@@ -402,7 +435,7 @@ int ath9k_init_btcoex(struct ath_softc *sc)
txq = sc->tx.txq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- if (AR_SREV_9462(ah)) {
+ if (ath9k_hw_mci_is_enabled(ah)) {
sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
INIT_LIST_HEAD(&sc->btcoex.mci.info);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 1357952..936e920 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -453,7 +453,6 @@ struct ath9k_htc_priv {
u8 num_sta_assoc_vif;
u8 num_ap_vif;
- u16 op_flags;
u16 curtxpow;
u16 txpowlimit;
u16 nvifs;
@@ -461,6 +460,7 @@ struct ath9k_htc_priv {
bool rearm_ani;
bool reconfig_beacon;
unsigned int rxfilter;
+ unsigned long op_flags;
struct ath9k_hw_cal_data caldata;
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
@@ -572,8 +572,6 @@ bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
-void ath9k_htc_radio_enable(struct ieee80211_hw *hw);
-void ath9k_htc_radio_disable(struct ieee80211_hw *hw);
#ifdef CONFIG_MAC80211_LEDS
void ath9k_init_leds(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 2eadffb..77d541f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -207,9 +207,9 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
else
priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
- if (priv->op_flags & OP_TSF_RESET) {
+ if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
ath9k_hw_reset_tsf(priv->ah);
- priv->op_flags &= ~OP_TSF_RESET;
+ clear_bit(OP_TSF_RESET, &priv->op_flags);
} else {
/*
* Pull nexttbtt forward to reflect the current TSF.
@@ -221,7 +221,7 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
} while (nexttbtt < tsftu);
}
- if (priv->op_flags & OP_ENABLE_BEACON)
+ if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
imask |= ATH9K_INT_SWBA;
ath_dbg(common, CONFIG,
@@ -269,7 +269,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
else
priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
- if (priv->op_flags & OP_ENABLE_BEACON)
+ if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
imask |= ATH9K_INT_SWBA;
ath_dbg(common, CONFIG,
@@ -365,7 +365,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
vif = priv->cur_beacon_conf.bslot[slot];
avp = (struct ath9k_htc_vif *)vif->drv_priv;
- if (unlikely(priv->op_flags & OP_SCANNING)) {
+ if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) {
spin_unlock_bh(&priv->beacon_lock);
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 1c10e2e..07df279 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -37,17 +37,18 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
if (time_after(jiffies, btcoex->bt_priority_time +
msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
+ clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ clear_bit(OP_BT_SCAN, &priv->op_flags);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT scan detected\n");
- priv->op_flags |= (OP_BT_SCAN |
- OP_BT_PRIORITY_DETECTED);
+ set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ set_bit(OP_BT_SCAN, &priv->op_flags);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT priority traffic detected\n");
- priv->op_flags |= OP_BT_PRIORITY_DETECTED;
+ set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
}
btcoex->bt_priority_cnt = 0;
@@ -67,26 +68,23 @@ static void ath_btcoex_period_work(struct work_struct *work)
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_common *common = ath9k_hw_common(priv->ah);
u32 timer_period;
- bool is_btscan;
int ret;
ath_detect_bt_priority(priv);
- is_btscan = !!(priv->op_flags & OP_BT_SCAN);
-
ret = ath9k_htc_update_cap_target(priv,
- !!(priv->op_flags & OP_BT_PRIORITY_DETECTED));
+ test_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags));
if (ret) {
ath_err(common, "Unable to set BTCOEX parameters\n");
return;
}
- ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
- btcoex->bt_stomp_type);
+ ath9k_hw_btcoex_bt_stomp(priv->ah, test_bit(OP_BT_SCAN, &priv->op_flags) ?
+ ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type);
ath9k_hw_btcoex_enable(priv->ah);
- timer_period = is_btscan ? btcoex->btscan_no_stomp :
- btcoex->btcoex_no_stomp;
+ timer_period = test_bit(OP_BT_SCAN, &priv->op_flags) ?
+ btcoex->btscan_no_stomp : btcoex->btcoex_no_stomp;
ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
msecs_to_jiffies(timer_period));
ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work,
@@ -104,14 +102,15 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
struct ath_hw *ah = priv->ah;
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_common *common = ath9k_hw_common(ah);
- bool is_btscan = priv->op_flags & OP_BT_SCAN;
ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
+ test_bit(OP_BT_SCAN, &priv->op_flags))
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+
ath9k_hw_btcoex_enable(priv->ah);
}
@@ -141,7 +140,8 @@ static void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
- priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
+ clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ clear_bit(OP_BT_SCAN, &priv->op_flags);
ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0);
}
@@ -310,95 +310,3 @@ void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
wiphy_rfkill_start_polling(priv->hw->wiphy);
}
-
-void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int ret;
- u8 cmd_rsp;
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- /* Reset the HW */
- ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (ret) {
- ath_err(common,
- "Unable to reset hardware; reset status %d (freq %u MHz)\n",
- ret, ah->curchan->channel);
- }
-
- ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
- &priv->curtxpow);
-
- /* Start RX */
- WMI_CMD(WMI_START_RECV_CMDID);
- ath9k_host_rx_init(priv);
-
- /* Start TX */
- htc_start(priv->htc);
- spin_lock_bh(&priv->tx.tx_lock);
- priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
- spin_unlock_bh(&priv->tx.tx_lock);
- ieee80211_wake_queues(hw);
-
- WMI_CMD(WMI_ENABLE_INTR_CMDID);
-
- /* Enable LED */
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-}
-
-void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int ret;
- u8 cmd_rsp;
-
- ath9k_htc_ps_wakeup(priv);
-
- /* Disable LED */
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-
- WMI_CMD(WMI_DISABLE_INTR_CMDID);
-
- /* Stop TX */
- ieee80211_stop_queues(hw);
- ath9k_htc_tx_drain(priv);
- WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
-
- /* Stop RX */
- WMI_CMD(WMI_STOP_RECV_CMDID);
-
- /* Clear the WMI event queue */
- ath9k_wmi_event_drain(priv);
-
- /*
- * The MIB counters have to be disabled here,
- * since the target doesn't do it.
- */
- ath9k_hw_disable_mib_counters(ah);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- /* Reset the HW */
- ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (ret) {
- ath_err(common,
- "Unable to reset hardware; reset status %d (freq %u MHz)\n",
- ret, ah->curchan->channel);
- }
-
- /* Disable the PHY */
- ath9k_hw_phy_disable(ah);
-
- ath9k_htc_ps_restore(priv);
- ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
-}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 25213d5..a035a38 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -611,7 +611,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
struct ath_common *common;
int i, ret = 0, csz = 0;
- priv->op_flags |= OP_INVALID;
+ set_bit(OP_INVALID, &priv->op_flags);
ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
if (!ah)
@@ -718,7 +718,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
hw->queues = 4;
hw->channel_change_time = 5000;
- hw->max_listen_interval = 10;
+ hw->max_listen_interval = 1;
hw->vif_data_size = sizeof(struct ath9k_htc_vif);
hw->sta_data_size = sizeof(struct ath9k_htc_sta);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 2b8f61c..c785129 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -75,14 +75,19 @@ unlock:
void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
{
+ bool reset;
+
mutex_lock(&priv->htc_pm_lock);
if (--priv->ps_usecount != 0)
goto unlock;
- if (priv->ps_idle)
+ if (priv->ps_idle) {
+ ath9k_hw_setrxabort(priv->ah, true);
+ ath9k_hw_stopdmarecv(priv->ah, &reset);
ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
- else if (priv->ps_enabled)
+ } else if (priv->ps_enabled) {
ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
+ }
unlock:
mutex_unlock(&priv->htc_pm_lock);
@@ -250,7 +255,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
u8 cmd_rsp;
int ret;
- if (priv->op_flags & OP_INVALID)
+ if (test_bit(OP_INVALID, &priv->op_flags))
return -EIO;
fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -304,7 +309,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
htc_start(priv->htc);
- if (!(priv->op_flags & OP_SCANNING) &&
+ if (!test_bit(OP_SCANNING, &priv->op_flags) &&
!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
ath9k_htc_vif_reconfig(priv);
@@ -750,7 +755,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
common->ani.shortcal_timer = timestamp;
common->ani.checkani_timer = timestamp;
- priv->op_flags |= OP_ANI_RUNNING;
+ set_bit(OP_ANI_RUNNING, &priv->op_flags);
ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@@ -759,7 +764,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
{
cancel_delayed_work_sync(&priv->ani_work);
- priv->op_flags &= ~OP_ANI_RUNNING;
+ clear_bit(OP_ANI_RUNNING, &priv->op_flags);
}
void ath9k_htc_ani_work(struct work_struct *work)
@@ -944,7 +949,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG,
"Failed to update capability in target\n");
- priv->op_flags &= ~OP_INVALID;
+ clear_bit(OP_INVALID, &priv->op_flags);
htc_start(priv->htc);
spin_lock_bh(&priv->tx.tx_lock);
@@ -973,7 +978,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (priv->op_flags & OP_INVALID) {
+ if (test_bit(OP_INVALID, &priv->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
@@ -1015,7 +1020,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
ath9k_htc_ps_restore(priv);
ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
- priv->op_flags |= OP_INVALID;
+ set_bit(OP_INVALID, &priv->op_flags);
ath_dbg(common, CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
@@ -1105,8 +1110,8 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
- !(priv->op_flags & OP_ANI_RUNNING)) {
- ath9k_hw_set_tsfadjust(priv->ah, 1);
+ !test_bit(OP_ANI_RUNNING, &priv->op_flags)) {
+ ath9k_hw_set_tsfadjust(priv->ah, true);
ath9k_htc_start_ani(priv);
}
@@ -1178,24 +1183,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ieee80211_conf *conf = &hw->conf;
+ bool chip_reset = false;
+ int ret = 0;
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- bool enable_radio = false;
- bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-
mutex_lock(&priv->htc_pm_lock);
- if (!idle && priv->ps_idle)
- enable_radio = true;
- priv->ps_idle = idle;
- mutex_unlock(&priv->htc_pm_lock);
- if (enable_radio) {
- ath_dbg(common, CONFIG, "not-idle: enabling radio\n");
- ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
- ath9k_htc_radio_enable(hw);
- }
+ priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+ if (priv->ps_idle)
+ chip_reset = true;
+
+ mutex_unlock(&priv->htc_pm_lock);
}
/*
@@ -1210,7 +1211,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
ath9k_htc_remove_monitor_interface(priv);
}
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -1223,8 +1224,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
- mutex_unlock(&priv->mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
}
@@ -1246,21 +1247,10 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
priv->txpowlimit, &priv->curtxpow);
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- mutex_lock(&priv->htc_pm_lock);
- if (!priv->ps_idle) {
- mutex_unlock(&priv->htc_pm_lock);
- goto out;
- }
- mutex_unlock(&priv->htc_pm_lock);
-
- ath_dbg(common, CONFIG, "idle: disabling radio\n");
- ath9k_htc_radio_disable(hw);
- }
-
out:
+ ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
- return 0;
+ return ret;
}
#define SUPPORTED_FILTERS \
@@ -1285,7 +1275,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
- if (priv->op_flags & OP_INVALID) {
+ if (test_bit(OP_INVALID, &priv->op_flags)) {
ath_dbg(ath9k_hw_common(priv->ah), ANY,
"Unable to configure filter on invalid state\n");
mutex_unlock(&priv->mutex);
@@ -1361,7 +1351,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
qi.tqi_aifs = params->aifs;
qi.tqi_cwmin = params->cw_min;
qi.tqi_cwmax = params->cw_max;
- qi.tqi_burstTime = params->txop;
+ qi.tqi_burstTime = params->txop * 32;
qnum = get_hw_qnum(queue, priv->hwq_map);
@@ -1496,6 +1486,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--;
if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
+ ath9k_htc_choose_set_bssid(priv);
if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1))
ath9k_htc_start_ani(priv);
else if (priv->num_sta_assoc_vif == 0)
@@ -1503,13 +1494,11 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BSSID) {
+ if (changed & BSS_CHANGED_IBSS) {
if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) {
common->curaid = bss_conf->aid;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
ath9k_htc_set_bssid(priv);
- } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) {
- ath9k_htc_choose_set_bssid(priv);
}
}
@@ -1517,7 +1506,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
bss_conf->bssid);
ath9k_htc_set_tsfadjust(priv, vif);
- priv->op_flags |= OP_ENABLE_BEACON;
+ set_bit(OP_ENABLE_BEACON, &priv->op_flags);
ath9k_htc_beacon_config(priv, vif);
}
@@ -1530,7 +1519,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
- priv->op_flags &= ~OP_ENABLE_BEACON;
+ clear_bit(OP_ENABLE_BEACON, &priv->op_flags);
ath9k_htc_beacon_config(priv, vif);
}
}
@@ -1543,7 +1532,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
(priv->nvifs == 1) &&
(priv->num_ap_vif == 1) &&
(vif->type == NL80211_IFTYPE_AP)) {
- priv->op_flags |= OP_TSF_RESET;
+ set_bit(OP_TSF_RESET, &priv->op_flags);
}
ath_dbg(common, CONFIG,
"Beacon interval changed for BSS: %pM\n",
@@ -1655,7 +1644,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
- priv->op_flags |= OP_SCANNING;
+ set_bit(OP_SCANNING, &priv->op_flags);
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
ath9k_htc_stop_ani(priv);
@@ -1668,7 +1657,7 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
- priv->op_flags &= ~OP_SCANNING;
+ clear_bit(OP_SCANNING, &priv->op_flags);
spin_unlock_bh(&priv->beacon_lock);
ath9k_htc_ps_wakeup(priv);
ath9k_htc_vif_reconfig(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3e40a64..47e61d0 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -916,7 +916,7 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
{
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
- ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING));
+ ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index abe05ec..cfa91ab 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -342,6 +342,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
val = REG_READ(ah, AR_SREV);
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
return;
+ case AR9300_DEVID_QCA955X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9550;
+ return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -390,14 +393,6 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
}
-static void ath9k_hw_aspm_init(struct ath_hw *ah)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (common->bus_ops->aspm_init)
- common->bus_ops->aspm_init(common);
-}
-
/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
@@ -622,7 +617,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
- ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+ ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
!ah->is_pciexpress)) {
ah->config.serialize_regmode =
SER_REG_MODE_ON;
@@ -654,6 +649,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
case AR_SREV_VERSION_9485:
case AR_SREV_VERSION_9340:
case AR_SREV_VERSION_9462:
+ case AR_SREV_VERSION_9550:
break;
default:
ath_err(common,
@@ -663,7 +659,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
- AR_SREV_9330(ah))
+ AR_SREV_9330(ah) || AR_SREV_9550(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
@@ -675,10 +671,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
- /* disable ANI for 9340 */
- if (AR_SREV_9340(ah))
- ah->config.enable_ani = false;
-
ath9k_hw_init_mode_regs(ah);
if (!ah->is_pciexpress)
@@ -693,9 +685,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- if (ah->is_pciexpress)
- ath9k_hw_aspm_init(ah);
-
r = ath9k_hw_init_macaddr(ah);
if (r) {
ath_err(common, "Failed to initialize MAC address\n");
@@ -738,6 +727,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_AR9485_PCIE:
case AR9300_DEVID_AR9330:
case AR9300_DEVID_AR9340:
+ case AR9300_DEVID_QCA955X:
case AR9300_DEVID_AR9580:
case AR9300_DEVID_AR9462:
break;
@@ -784,13 +774,25 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int i = 0;
+
REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
udelay(100);
REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
- while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
+ while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0) {
+
udelay(100);
+ if (WARN_ON_ONCE(i >= 100)) {
+ ath_err(common, "PLL4 meaurement not done\n");
+ break;
+ }
+
+ i++;
+ }
+
return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
}
EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
@@ -864,7 +866,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
/* program BB PLL phase_shift */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
- } else if (AR_SREV_9340(ah)) {
+ } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
@@ -878,9 +880,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
pll2_divfrac = 0x1eb85;
refdiv = 3;
} else {
- pll2_divint = 88;
- pll2_divfrac = 0;
- refdiv = 5;
+ if (AR_SREV_9340(ah)) {
+ pll2_divint = 88;
+ pll2_divfrac = 0;
+ refdiv = 5;
+ } else {
+ pll2_divint = 0x11;
+ pll2_divfrac = 0x26666;
+ refdiv = 1;
+ }
}
regval = REG_READ(ah, AR_PHY_PLL_MODE);
@@ -893,8 +901,12 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(100);
regval = REG_READ(ah, AR_PHY_PLL_MODE);
- regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) |
- (0x4 << 26) | (0x18 << 19);
+ if (AR_SREV_9340(ah))
+ regval = (regval & 0x80071fff) | (0x1 << 30) |
+ (0x1 << 13) | (0x4 << 26) | (0x18 << 19);
+ else
+ regval = (regval & 0x80071fff) | (0x3 << 30) |
+ (0x1 << 13) | (0x4 << 26) | (0x60 << 19);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
REG_WRITE(ah, AR_PHY_PLL_MODE,
REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
@@ -905,7 +917,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah))
udelay(1000);
/* Switch the core clock for ar9271 to 117Mhz */
@@ -918,7 +931,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
- if (AR_SREV_9340(ah)) {
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
if (ah->is_clk_25mhz) {
REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
@@ -942,7 +955,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -1359,6 +1372,9 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
}
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_check_gpm_offset(ah);
+
REG_WRITE(ah, AR_RTC_RC, rst_flags);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1443,9 +1459,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
break;
}
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
-
return ret;
}
@@ -1468,6 +1481,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
return false;
ah->chip_fullsleep = false;
+
+ if (AR_SREV_9330(ah))
+ ar9003_hw_internal_regulator_apply(ah);
ath9k_hw_init_pll(ah, chan);
ath9k_hw_set_rfmode(ah, chan);
@@ -1718,8 +1734,8 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && ar9003_mci_is_ready(ah))
- ar9003_mci_2g5g_switch(ah, true);
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_2g5g_switch(ah, false);
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
@@ -1739,10 +1755,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u64 tsf = 0;
int i, r;
bool start_mci_reset = false;
- bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
bool save_fullsleep = ah->chip_fullsleep;
- if (mci) {
+ if (ath9k_hw_mci_is_enabled(ah)) {
start_mci_reset = ar9003_mci_start_reset(ah, chan);
if (start_mci_reset)
return 0;
@@ -1771,7 +1786,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
return r;
}
- if (mci)
+ if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_stop_bt(ah, save_fullsleep);
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
@@ -1829,7 +1844,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
- if (mci)
+ if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
/*
@@ -1924,7 +1939,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_set_dma(ah);
- REG_WRITE(ah, AR_OBS, 8);
+ if (!ath9k_hw_mci_is_enabled(ah))
+ REG_WRITE(ah, AR_OBS, 8);
if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
@@ -1945,10 +1961,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
- ath9k_hw_loadnf(ah, chan);
- ath9k_hw_start_nfcal(ah, true);
-
- if (mci && ar9003_mci_end_reset(ah, chan, caldata))
+ if (ath9k_hw_mci_is_enabled(ah) && ar9003_mci_end_reset(ah, chan, caldata))
return -EIO;
ENABLE_REGWRITE_BUFFER(ah);
@@ -1983,7 +1996,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
}
#ifdef __BIG_ENDIAN
- else if (AR_SREV_9330(ah) || AR_SREV_9340(ah))
+ else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9550(ah))
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -1993,9 +2007,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_btcoex_is_enabled(ah))
ath9k_hw_btcoex_enable(ah);
- if (mci)
+ if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_check_bt(ah);
+ ath9k_hw_loadnf(ah, chan);
+ ath9k_hw_start_nfcal(ah, true);
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@@ -2016,39 +2033,35 @@ EXPORT_SYMBOL(ath9k_hw_reset);
* Notify Power Mgt is disabled in self-generated frames.
* If requested, force chip to sleep.
*/
-static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
+static void ath9k_set_power_sleep(struct ath_hw *ah)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
- if (setChip) {
- if (AR_SREV_9462(ah)) {
- REG_WRITE(ah, AR_TIMER_MODE,
- REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00);
- REG_WRITE(ah, AR_NDP2_TIMER_MODE, REG_READ(ah,
- AR_NDP2_TIMER_MODE) & 0xFFFFFF00);
- REG_WRITE(ah, AR_SLP32_INC,
- REG_READ(ah, AR_SLP32_INC) & 0xFFF00000);
- /* xxx Required for WLAN only case ? */
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
- udelay(100);
- }
- /*
- * Clear the RTC force wake bit to allow the
- * mac to go to sleep.
- */
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+ if (AR_SREV_9462(ah)) {
+ REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
+ REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
+ REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
+ /* xxx Required for WLAN only case ? */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+ udelay(100);
+ }
- if (AR_SREV_9462(ah))
- udelay(100);
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
- if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
+ if (ath9k_hw_mci_is_enabled(ah))
+ udelay(100);
- /* Shutdown chip. Active low */
- if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
- REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
- udelay(2);
- }
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
+
+ /* Shutdown chip. Active low */
+ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
+ REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
+ udelay(2);
}
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
@@ -2061,44 +2074,38 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
* frames. If request, set power mode of chip to
* auto/normal. Duration in units of 128us (1/8 TU).
*/
-static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
+static void ath9k_set_power_network_sleep(struct ath_hw *ah)
{
- u32 val;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
- if (setChip) {
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- /* Set WakeOnInterrupt bit; clear ForceWake bit */
- REG_WRITE(ah, AR_RTC_FORCE_WAKE,
- AR_RTC_FORCE_WAKE_ON_INT);
- } else {
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ /* Set WakeOnInterrupt bit; clear ForceWake bit */
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_ON_INT);
+ } else {
- /* When chip goes into network sleep, it could be waken
- * up by MCI_INT interrupt caused by BT's HW messages
- * (LNA_xxx, CONT_xxx) which chould be in a very fast
- * rate (~100us). This will cause chip to leave and
- * re-enter network sleep mode frequently, which in
- * consequence will have WLAN MCI HW to generate lots of
- * SYS_WAKING and SYS_SLEEPING messages which will make
- * BT CPU to busy to process.
- */
- if (AR_SREV_9462(ah)) {
- val = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) &
- ~AR_MCI_INTERRUPT_RX_HW_MSG_MASK;
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, val);
- }
- /*
- * Clear the RTC force wake bit to allow the
- * mac to go to sleep.
- */
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
- AR_RTC_FORCE_WAKE_EN);
-
- if (AR_SREV_9462(ah))
- udelay(30);
- }
+ /* When chip goes into network sleep, it could be waken
+ * up by MCI_INT interrupt caused by BT's HW messages
+ * (LNA_xxx, CONT_xxx) which chould be in a very fast
+ * rate (~100us). This will cause chip to leave and
+ * re-enter network sleep mode frequently, which in
+ * consequence will have WLAN MCI HW to generate lots of
+ * SYS_WAKING and SYS_SLEEPING messages which will make
+ * BT CPU to busy to process.
+ */
+ if (ath9k_hw_mci_is_enabled(ah))
+ REG_CLR_BIT(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+ AR_MCI_INTERRUPT_RX_HW_MSG_MASK);
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ udelay(30);
}
/* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
@@ -2106,7 +2113,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
-static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
+static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
{
u32 val;
int i;
@@ -2117,37 +2124,38 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
udelay(10);
}
- if (setChip) {
- if ((REG_READ(ah, AR_RTC_STATUS) &
- AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
- if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
- return false;
- }
- if (!AR_SREV_9300_20_OR_LATER(ah))
- ath9k_hw_init_pll(ah, NULL);
+ if ((REG_READ(ah, AR_RTC_STATUS) &
+ AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
+ return false;
}
- if (AR_SREV_9100(ah))
- REG_SET_BIT(ah, AR_RTC_RESET,
- AR_RTC_RESET_EN);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_init_pll(ah, NULL);
+ }
+ if (AR_SREV_9100(ah))
+ REG_SET_BIT(ah, AR_RTC_RESET,
+ AR_RTC_RESET_EN);
+
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN);
+ udelay(50);
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_set_power_awake(ah);
+
+ for (i = POWER_UP_TIME / 50; i > 0; i--) {
+ val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
+ if (val == AR_RTC_STATUS_ON)
+ break;
+ udelay(50);
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
- udelay(50);
-
- for (i = POWER_UP_TIME / 50; i > 0; i--) {
- val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
- if (val == AR_RTC_STATUS_ON)
- break;
- udelay(50);
- REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
- AR_RTC_FORCE_WAKE_EN);
- }
- if (i == 0) {
- ath_err(ath9k_hw_common(ah),
- "Failed to wakeup in %uus\n",
- POWER_UP_TIME / 20);
- return false;
- }
+ }
+ if (i == 0) {
+ ath_err(ath9k_hw_common(ah),
+ "Failed to wakeup in %uus\n",
+ POWER_UP_TIME / 20);
+ return false;
}
REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2158,7 +2166,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
- int status = true, setChip = true;
+ int status = true;
static const char *modes[] = {
"AWAKE",
"FULL-SLEEP",
@@ -2174,25 +2182,17 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
switch (mode) {
case ATH9K_PM_AWAKE:
- status = ath9k_hw_set_power_awake(ah, setChip);
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
-
+ status = ath9k_hw_set_power_awake(ah);
break;
case ATH9K_PM_FULL_SLEEP:
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_set_full_sleep(ah);
- ath9k_set_power_sleep(ah, setChip);
+ ath9k_set_power_sleep(ah);
ah->chip_fullsleep = true;
break;
case ATH9K_PM_NETWORK_SLEEP:
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
-
- ath9k_set_power_network_sleep(ah, setChip);
+ ath9k_set_power_network_sleep(ah);
break;
default:
ath_err(common, "Unknown power mode %u\n", mode);
@@ -2585,6 +2585,14 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE |
+ ATH9K_HW_WOW_PATTERN_MATCH_EXACT;
+
+ if (AR_SREV_9280(ah))
+ pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD;
+ }
+
return 0;
}
@@ -2762,6 +2770,9 @@ EXPORT_SYMBOL(ath9k_hw_setrxfilter);
bool ath9k_hw_phy_disable(struct ath_hw *ah)
{
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_bt_gain_ctrl(ah);
+
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
return false;
@@ -2901,9 +2912,9 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_reset_tsf);
-void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
+void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
{
- if (setting)
+ if (set)
ah->misc_mode |= AR_PCU_TX_ADD_TSF;
else
ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
@@ -3147,6 +3158,7 @@ static struct {
{ AR_SREV_VERSION_9340, "9340" },
{ AR_SREV_VERSION_9485, "9485" },
{ AR_SREV_VERSION_9462, "9462" },
+ { AR_SREV_VERSION_9550, "9550" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b620c55..dd0c146 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -48,6 +48,7 @@
#define AR9300_DEVID_AR9580 0x0033
#define AR9300_DEVID_AR9462 0x0034
#define AR9300_DEVID_AR9330 0x0035
+#define AR9300_DEVID_QCA955X 0x0038
#define AR5416_AR9100_DEVID 0x000b
@@ -179,6 +180,37 @@
#define PAPRD_TABLE_SZ 24
#define PAPRD_IDEAL_AGC2_PWR_RANGE 0xe0
+/*
+ * Wake on Wireless
+ */
+
+/* Keep Alive Frame */
+#define KAL_FRAME_LEN 28
+#define KAL_FRAME_TYPE 0x2 /* data frame */
+#define KAL_FRAME_SUB_TYPE 0x4 /* null data frame */
+#define KAL_DURATION_ID 0x3d
+#define KAL_NUM_DATA_WORDS 6
+#define KAL_NUM_DESC_WORDS 12
+#define KAL_ANTENNA_MODE 1
+#define KAL_TO_DS 1
+#define KAL_DELAY 4 /*delay of 4ms between 2 KAL frames */
+#define KAL_TIMEOUT 900
+
+#define MAX_PATTERN_SIZE 256
+#define MAX_PATTERN_MASK_SIZE 32
+#define MAX_NUM_PATTERN 8
+#define MAX_NUM_USER_PATTERN 6 /* deducting the disassociate and
+ deauthenticate packets */
+
+/*
+ * WoW trigger mapping to hardware code
+ */
+
+#define AH_WOW_USER_PATTERN_EN BIT(0)
+#define AH_WOW_MAGIC_PATTERN_EN BIT(1)
+#define AH_WOW_LINK_CHANGE BIT(2)
+#define AH_WOW_BEACON_MISS BIT(3)
+
enum ath_hw_txq_subtype {
ATH_TXQ_AC_BE = 0,
ATH_TXQ_AC_BK = 1,
@@ -211,8 +243,22 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_RTT = BIT(14),
ATH9K_HW_CAP_MCI = BIT(15),
ATH9K_HW_CAP_DFS = BIT(16),
+ ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
+ ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18),
+ ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19),
};
+/*
+ * WoW device capabilities
+ * @ATH9K_HW_WOW_DEVICE_CAPABLE: device revision is capable of WoW.
+ * @ATH9K_HW_WOW_PATTERN_MATCH_EXACT: device is capable of matching
+ * an exact user defined pattern or de-authentication/disassoc pattern.
+ * @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four
+ * bytes of the pattern for user defined pattern, de-authentication and
+ * disassociation patterns for all types of possible frames recieved
+ * of those types.
+ */
+
struct ath9k_hw_capabilities {
u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
u16 rts_aggr_limit;
@@ -814,17 +860,20 @@ struct ath_hw {
struct ar5416IniArray iniBank7;
struct ar5416IniArray iniAddac;
struct ar5416IniArray iniPcieSerdes;
+#ifdef CONFIG_PM_SLEEP
+ struct ar5416IniArray iniPcieSerdesWow;
+#endif
struct ar5416IniArray iniPcieSerdesLowPower;
struct ar5416IniArray iniModesFastClock;
struct ar5416IniArray iniAdditional;
struct ar5416IniArray iniModesRxGain;
+ struct ar5416IniArray ini_modes_rx_gain_bounds;
struct ar5416IniArray iniModesTxGain;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
struct ar5416IniArray ini_japan2484;
struct ar5416IniArray iniModes_9271_ANI_reg;
struct ar5416IniArray ini_radio_post_sys2ant;
- struct ar5416IniArray ini_BTCOEX_MAX_TXPWR;
struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
@@ -862,6 +911,9 @@ struct ath_hw {
/* Enterprise mode cap */
u32 ent_mode;
+#ifdef CONFIG_PM_SLEEP
+ u32 wow_event_mask;
+#endif
bool is_clk_25mhz;
int (*get_mac_revision)(void);
int (*external_reset)(void);
@@ -942,7 +994,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
-void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
+void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
@@ -1020,16 +1072,8 @@ void ar9002_hw_attach_ops(struct ath_hw *ah);
void ar9003_hw_attach_ops(struct ath_hw *ah);
void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
-/*
- * ANI work can be shared between all families but a next
- * generation implementation of ANI will be used only for AR9003 only
- * for now as the other families still need to be tested with the same
- * next generation ANI. Feel free to start testing it though for the
- * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
- */
-extern int modparam_force_new_ani;
+
void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
-void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -1037,6 +1081,12 @@ static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
{
return ah->btcoex_hw.enabled;
}
+static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
+{
+ return ah->common.btcoex_enabled &&
+ (ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+
+}
void ath9k_hw_btcoex_enable(struct ath_hw *ah);
static inline enum ath_btcoex_scheme
ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
@@ -1048,6 +1098,10 @@ static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
{
return false;
}
+static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
+{
+ return false;
+}
static inline void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{
}
@@ -1058,6 +1112,37 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+#ifdef CONFIG_PM_SLEEP
+const char *ath9k_hw_wow_event_to_string(u32 wow_event);
+void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len);
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah);
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable);
+#else
+static inline const char *ath9k_hw_wow_event_to_string(u32 wow_event)
+{
+ return NULL;
+}
+static inline void ath9k_hw_wow_apply_pattern(struct ath_hw *ah,
+ u8 *user_pattern,
+ u8 *user_mask,
+ int pattern_count,
+ int pattern_len)
+{
+}
+static inline u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+ return 0;
+}
+static inline void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+}
+#endif
+
+
+
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index dee9e09..f337121 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -434,6 +434,7 @@ static int ath9k_init_queues(struct ath_softc *sc)
for (i = 0; i < WME_NUM_AC; i++) {
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
sc->tx.txq_map[i]->mac80211_qnum = i;
+ sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
}
return 0;
}
@@ -489,6 +490,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->config.txpowlimit = ATH_TXPOWER_MAX;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -557,9 +559,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
spin_lock_init(&sc->debug.samp_lock);
#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
+ tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
(unsigned long)sc);
+ INIT_WORK(&sc->hw_reset_work, ath_reset_work);
+ INIT_WORK(&sc->hw_check_work, ath_hw_check);
+ INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
+ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+ setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
+
/*
* Cache line size is used to size and align various
* structures used to communicate with the hardware.
@@ -590,6 +598,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
+ if (common->bus_ops->aspm_init)
+ common->bus_ops->aspm_init(common);
+
return 0;
err_btcoex:
@@ -703,6 +714,24 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+#ifdef CONFIG_PM_SLEEP
+
+ if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
+ device_can_wakeup(sc->dev)) {
+
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT;
+ hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
+ hw->wiphy->wowlan.pattern_min_len = 1;
+ hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
+
+ }
+
+ atomic_set(&sc->wow_sleep_proc_intr, -1);
+ atomic_set(&sc->wow_got_bmiss_intr, -1);
+
+#endif
+
hw->queues = 4;
hw->max_rates = 4;
hw->channel_change_time = 5000;
@@ -782,11 +811,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
ARRAY_SIZE(ath9k_tpt_blink));
#endif
- INIT_WORK(&sc->hw_reset_work, ath_reset_work);
- INIT_WORK(&sc->hw_check_work, ath_hw_check);
- INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
- INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
-
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
@@ -805,9 +829,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
goto error_world;
}
- setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
-
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
new file mode 100644
index 0000000..d4549e9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/*
+ * TX polling - checks if the TX engine is stuck somewhere
+ * and issues a chip reset if so.
+ */
+void ath_tx_complete_poll_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ tx_complete_work.work);
+ struct ath_txq *txq;
+ int i;
+ bool needreset = false;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ sc->tx_complete_poll_work_seen++;
+#endif
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i)) {
+ txq = &sc->tx.txq[i];
+ ath_txq_lock(sc, txq);
+ if (txq->axq_depth) {
+ if (txq->axq_tx_inprogress) {
+ needreset = true;
+ ath_txq_unlock(sc, txq);
+ break;
+ } else {
+ txq->axq_tx_inprogress = true;
+ }
+ }
+ ath_txq_unlock_complete(sc, txq);
+ }
+
+ if (needreset) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "tx hung, resetting the chip\n");
+ ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
+ return;
+ }
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
+ msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
+}
+
+/*
+ * Checks if the BB/MAC is hung.
+ */
+void ath_hw_check(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long flags;
+ int busy;
+ u8 is_alive, nbeacon = 1;
+ enum ath_reset_type type;
+
+ ath9k_ps_wakeup(sc);
+ is_alive = ath9k_hw_check_alive(sc->sc_ah);
+
+ if (is_alive && !AR_SREV_9300(sc->sc_ah))
+ goto out;
+ else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
+ ath_dbg(common, RESET,
+ "DCU stuck is detected. Schedule chip reset\n");
+ type = RESET_TYPE_MAC_HANG;
+ goto sched_reset;
+ }
+
+ spin_lock_irqsave(&common->cc_lock, flags);
+ busy = ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
+ busy, sc->hw_busy_count + 1);
+ if (busy >= 99) {
+ if (++sc->hw_busy_count >= 3) {
+ type = RESET_TYPE_BB_HANG;
+ goto sched_reset;
+ }
+ } else if (busy >= 0) {
+ sc->hw_busy_count = 0;
+ nbeacon = 3;
+ }
+
+ ath_start_rx_poll(sc, nbeacon);
+ goto out;
+
+sched_reset:
+ ath9k_queue_reset(sc, type);
+out:
+ ath9k_ps_restore(sc);
+}
+
+/*
+ * PLL-WAR for AR9485/AR9340
+ */
+static bool ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
+{
+ static int count;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (pll_sqsum >= 0x40000) {
+ count++;
+ if (count == 3) {
+ ath_dbg(common, RESET, "PLL WAR, resetting the chip\n");
+ ath9k_queue_reset(sc, RESET_TYPE_PLL_HANG);
+ count = 0;
+ return true;
+ }
+ } else {
+ count = 0;
+ }
+
+ return false;
+}
+
+void ath_hw_pll_work(struct work_struct *work)
+{
+ u32 pll_sqsum;
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ hw_pll_work.work);
+ /*
+ * ensure that the PLL WAR is executed only
+ * after the STA is associated (or) if the
+ * beaconing had started in interfaces that
+ * uses beacons.
+ */
+ if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+ return;
+
+ ath9k_ps_wakeup(sc);
+ pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
+ ath9k_ps_restore(sc);
+ if (ath_hw_pll_rx_hang_check(sc, pll_sqsum))
+ return;
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
+ msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
+}
+
+/*
+ * RX Polling - monitors baseband hangs.
+ */
+void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
+{
+ if (!AR_SREV_9300(sc->sc_ah))
+ return;
+
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ return;
+
+ mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
+ (nbeacon * sc->cur_beacon_conf.beacon_interval));
+}
+
+void ath_rx_poll(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+
+ ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+}
+
+/*
+ * PA Pre-distortion.
+ */
+static void ath_paprd_activate(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ int chain;
+
+ if (!caldata || !caldata->paprd_done)
+ return;
+
+ ath9k_ps_wakeup(sc);
+ ar9003_paprd_enable(ah, false);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->txchainmask & BIT(chain)))
+ continue;
+
+ ar9003_paprd_populate_single_table(ah, caldata, chain);
+ }
+
+ ar9003_paprd_enable(ah, true);
+ ath9k_ps_restore(sc);
+}
+
+static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int time_left;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[WME_AC_BE];
+
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = hw->conf.channel->band;
+ tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.rates[0].idx = 0;
+ tx_info->control.rates[0].count = 1;
+ tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ tx_info->control.rates[1].idx = -1;
+
+ init_completion(&sc->paprd_complete);
+ txctl.paprd = BIT(chain);
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ time_left = wait_for_completion_timeout(&sc->paprd_complete,
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
+
+ if (!time_left)
+ ath_dbg(common, CALIBRATE,
+ "Timeout waiting for paprd training on TX chain %d\n",
+ chain);
+
+ return !!time_left;
+}
+
+void ath_paprd_calibrate(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = NULL;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ftype;
+ int chain_ok = 0;
+ int chain;
+ int len = 1800;
+
+ if (!caldata)
+ return;
+
+ ath9k_ps_wakeup(sc);
+
+ if (ar9003_paprd_init_table(ah) < 0)
+ goto fail_paprd;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ goto fail_paprd;
+
+ skb_put(skb, len);
+ memset(skb->data, 0, len);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
+ hdr->frame_control = cpu_to_le16(ftype);
+ hdr->duration_id = cpu_to_le16(10);
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->txchainmask & BIT(chain)))
+ continue;
+
+ chain_ok = 0;
+
+ ath_dbg(common, CALIBRATE,
+ "Sending PAPRD frame for thermal measurement on chain %d\n",
+ chain);
+ if (!ath_paprd_send_frame(sc, skb, chain))
+ goto fail_paprd;
+
+ ar9003_paprd_setup_gain_table(ah, chain);
+
+ ath_dbg(common, CALIBRATE,
+ "Sending PAPRD training frame on chain %d\n", chain);
+ if (!ath_paprd_send_frame(sc, skb, chain))
+ goto fail_paprd;
+
+ if (!ar9003_paprd_is_done(ah)) {
+ ath_dbg(common, CALIBRATE,
+ "PAPRD not yet done on chain %d\n", chain);
+ break;
+ }
+
+ if (ar9003_paprd_create_curve(ah, caldata, chain)) {
+ ath_dbg(common, CALIBRATE,
+ "PAPRD create curve failed on chain %d\n",
+ chain);
+ break;
+ }
+
+ chain_ok = 1;
+ }
+ kfree_skb(skb);
+
+ if (chain_ok) {
+ caldata->paprd_done = true;
+ ath_paprd_activate(sc);
+ }
+
+fail_paprd:
+ ath9k_ps_restore(sc);
+}
+
+/*
+ * ANI performs periodic noise floor calibration
+ * that is used to adjust and optimize the chip performance. This
+ * takes environmental changes (location, temperature) into account.
+ * When the task is complete, it reschedules itself depending on the
+ * appropriate interval that was calculated.
+ */
+void ath_ani_calibrate(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool longcal = false;
+ bool shortcal = false;
+ bool aniflag = false;
+ unsigned int timestamp = jiffies_to_msecs(jiffies);
+ u32 cal_interval, short_cal_interval, long_cal_interval;
+ unsigned long flags;
+
+ if (ah->caldata && ah->caldata->nfcal_interference)
+ long_cal_interval = ATH_LONG_CALINTERVAL_INT;
+ else
+ long_cal_interval = ATH_LONG_CALINTERVAL;
+
+ short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
+ ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
+
+ /* Only calibrate if awake */
+ if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
+ goto set_timer;
+
+ ath9k_ps_wakeup(sc);
+
+ /* Long calibration runs independently of short calibration. */
+ if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
+ longcal = true;
+ common->ani.longcal_timer = timestamp;
+ }
+
+ /* Short calibration applies only while caldone is false */
+ if (!common->ani.caldone) {
+ if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
+ shortcal = true;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.resetcal_timer = timestamp;
+ }
+ } else {
+ if ((timestamp - common->ani.resetcal_timer) >=
+ ATH_RESTART_CALINTERVAL) {
+ common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+ if (common->ani.caldone)
+ common->ani.resetcal_timer = timestamp;
+ }
+ }
+
+ /* Verify whether we must check ANI */
+ if (sc->sc_ah->config.enable_ani
+ && (timestamp - common->ani.checkani_timer) >=
+ ah->config.ani_poll_interval) {
+ aniflag = true;
+ common->ani.checkani_timer = timestamp;
+ }
+
+ /* Call ANI routine if necessary */
+ if (aniflag) {
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+ }
+
+ /* Perform calibration if necessary */
+ if (longcal || shortcal) {
+ common->ani.caldone =
+ ath9k_hw_calibrate(ah, ah->curchan,
+ ah->rxchainmask, longcal);
+ }
+
+ ath_dbg(common, ANI,
+ "Calibration @%lu finished: %s %s %s, caldone: %s\n",
+ jiffies,
+ longcal ? "long" : "", shortcal ? "short" : "",
+ aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
+ ath9k_debug_samp_bb_mac(sc);
+ ath9k_ps_restore(sc);
+
+set_timer:
+ /*
+ * Set timer interval based on previous results.
+ * The interval must be the shortest necessary to satisfy ANI,
+ * short calibration and long calibration.
+ */
+ cal_interval = ATH_LONG_CALINTERVAL;
+ if (sc->sc_ah->config.enable_ani)
+ cal_interval = min(cal_interval,
+ (u32)ah->config.ani_poll_interval);
+ if (!common->ani.caldone)
+ cal_interval = min(cal_interval, (u32)short_cal_interval);
+
+ mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
+ if (!ah->caldata->paprd_done)
+ ieee80211_queue_work(sc->hw, &sc->paprd_work);
+ else if (!ah->paprd_table_write_done)
+ ath_paprd_activate(sc);
+ }
+}
+
+void ath_start_ani(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ if (common->disable_ani ||
+ !test_bit(SC_OP_ANI_RUN, &sc->sc_flags) ||
+ (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ return;
+
+ common->ani.longcal_timer = timestamp;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.checkani_timer = timestamp;
+
+ ath_dbg(common, ANI, "Starting ANI\n");
+ mod_timer(&common->ani.timer,
+ jiffies + msecs_to_jiffies((u32)ah->config.ani_poll_interval));
+}
+
+void ath_stop_ani(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ ath_dbg(common, ANI, "Stopping ANI\n");
+ del_timer_sync(&common->ani.timer);
+}
+
+void ath_check_ani(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+
+ /*
+ * Check for the various conditions in which ANI has to
+ * be stopped.
+ */
+ if (ah->opmode == NL80211_IFTYPE_ADHOC) {
+ if (!cur_conf->enable_beacon)
+ goto stop_ani;
+ } else if (ah->opmode == NL80211_IFTYPE_AP) {
+ if (!cur_conf->enable_beacon) {
+ /*
+ * Disable ANI only when there are no
+ * associated stations.
+ */
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ goto stop_ani;
+ }
+ } else if (ah->opmode == NL80211_IFTYPE_STATION) {
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ goto stop_ani;
+ }
+
+ if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags)) {
+ set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ ath_start_ani(sc);
+ }
+
+ return;
+
+stop_ani:
+ clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ ath_stop_ani(sc);
+}
+
+void ath_update_survey_nf(struct ath_softc *sc, int channel)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_channel *chan = &ah->channels[channel];
+ struct survey_info *survey = &sc->survey[channel];
+
+ if (chan->noisefloor) {
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+ survey->noise = ath9k_hw_getchan_noise(ah, chan);
+ }
+}
+
+/*
+ * Updates the survey statistics and returns the busy time since last
+ * update in %, if the measurement duration was long enough for the
+ * result to be useful, -1 otherwise.
+ */
+int ath_update_survey_stats(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int pos = ah->curchan - &ah->channels[0];
+ struct survey_info *survey = &sc->survey[pos];
+ struct ath_cycle_counters *cc = &common->cc_survey;
+ unsigned int div = common->clockrate * 1000;
+ int ret = 0;
+
+ if (!ah->curchan)
+ return -1;
+
+ if (ah->power_mode == ATH9K_PM_AWAKE)
+ ath_hw_cycle_counters_update(common);
+
+ if (cc->cycles > 0) {
+ survey->filled |= SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY |
+ SURVEY_INFO_CHANNEL_TIME_RX |
+ SURVEY_INFO_CHANNEL_TIME_TX;
+ survey->channel_time += cc->cycles / div;
+ survey->channel_time_busy += cc->rx_busy / div;
+ survey->channel_time_rx += cc->rx_frame / div;
+ survey->channel_time_tx += cc->tx_frame / div;
+ }
+
+ if (cc->cycles < div)
+ return -1;
+
+ if (cc->cycles > 0)
+ ret = cc->rx_busy * 100 / cc->cycles;
+
+ memset(cc, 0, sizeof(*cc));
+
+ ath_update_survey_nf(sc, pos);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 04ef775..7990cd5 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -810,7 +810,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
return;
}
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
async_mask = AR_INTR_MAC_IRQ;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 21c9556..0eba36d 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -646,6 +646,7 @@ enum ath9k_rx_filter {
ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
ATH9K_RX_FILTER_CONTROL_WRAPPER = 0x00080000,
+ ATH9K_RX_FILTER_4ADDRESS = 0x00100000,
};
#define ATH9K_RATESERIES_RTS_CTS 0x0001
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dfa78e8..6049d8b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -19,7 +19,10 @@
#include "ath9k.h"
#include "btcoex.h"
-static u8 parse_mpdudensity(u8 mpdudensity)
+static void ath9k_set_assoc_state(struct ath_softc *sc,
+ struct ieee80211_vif *vif);
+
+u8 ath9k_parse_mpdudensity(u8 mpdudensity)
{
/*
* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
@@ -101,6 +104,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
spin_lock(&common->cc_lock);
ath_hw_cycle_counters_update(common);
memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+ memset(&common->cc_ani, 0, sizeof(common->cc_ani));
spin_unlock(&common->cc_lock);
}
@@ -129,6 +133,8 @@ void ath9k_ps_restore(struct ath_softc *sc)
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK))) {
mode = ATH9K_PM_NETWORK_SLEEP;
+ if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
+ ath9k_btcoex_stop_gen_timer(sc);
} else {
goto unlock;
}
@@ -143,90 +149,17 @@ void ath9k_ps_restore(struct ath_softc *sc)
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
}
-void ath_start_ani(struct ath_common *common)
-{
- struct ath_hw *ah = common->ah;
- unsigned long timestamp = jiffies_to_msecs(jiffies);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- if (!(sc->sc_flags & SC_OP_ANI_RUN))
- return;
-
- if (sc->sc_flags & SC_OP_OFFCHANNEL)
- return;
-
- common->ani.longcal_timer = timestamp;
- common->ani.shortcal_timer = timestamp;
- common->ani.checkani_timer = timestamp;
-
- mod_timer(&common->ani.timer,
- jiffies +
- msecs_to_jiffies((u32)ah->config.ani_poll_interval));
-}
-
-static void ath_update_survey_nf(struct ath_softc *sc, int channel)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath9k_channel *chan = &ah->channels[channel];
- struct survey_info *survey = &sc->survey[channel];
-
- if (chan->noisefloor) {
- survey->filled |= SURVEY_INFO_NOISE_DBM;
- survey->noise = ath9k_hw_getchan_noise(ah, chan);
- }
-}
-
-/*
- * Updates the survey statistics and returns the busy time since last
- * update in %, if the measurement duration was long enough for the
- * result to be useful, -1 otherwise.
- */
-static int ath_update_survey_stats(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int pos = ah->curchan - &ah->channels[0];
- struct survey_info *survey = &sc->survey[pos];
- struct ath_cycle_counters *cc = &common->cc_survey;
- unsigned int div = common->clockrate * 1000;
- int ret = 0;
-
- if (!ah->curchan)
- return -1;
-
- if (ah->power_mode == ATH9K_PM_AWAKE)
- ath_hw_cycle_counters_update(common);
-
- if (cc->cycles > 0) {
- survey->filled |= SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_BUSY |
- SURVEY_INFO_CHANNEL_TIME_RX |
- SURVEY_INFO_CHANNEL_TIME_TX;
- survey->channel_time += cc->cycles / div;
- survey->channel_time_busy += cc->rx_busy / div;
- survey->channel_time_rx += cc->rx_frame / div;
- survey->channel_time_tx += cc->tx_frame / div;
- }
-
- if (cc->cycles < div)
- return -1;
-
- if (cc->cycles > 0)
- ret = cc->rx_busy * 100 / cc->cycles;
-
- memset(cc, 0, sizeof(*cc));
-
- ath_update_survey_nf(sc, pos);
-
- return ret;
-}
-
static void __ath_cancel_work(struct ath_softc *sc)
{
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
cancel_delayed_work_sync(&sc->hw_pll_work);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ cancel_work_sync(&sc->mci_work);
+#endif
}
static void ath_cancel_work(struct ath_softc *sc)
@@ -235,26 +168,39 @@ static void ath_cancel_work(struct ath_softc *sc)
cancel_work_sync(&sc->hw_reset_work);
}
+static void ath_restart_work(struct ath_softc *sc)
+{
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+
+ if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) ||
+ AR_SREV_9550(sc->sc_ah))
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
+ msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
+
+ ath_start_rx_poll(sc, 3);
+ ath_start_ani(sc);
+}
+
static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- bool ret;
+ bool ret = true;
ieee80211_stop_queues(sc->hw);
sc->hw_busy_count = 0;
- del_timer_sync(&common->ani.timer);
+ ath_stop_ani(sc);
del_timer_sync(&sc->rx_poll_timer);
ath9k_debug_samp_bb_mac(sc);
ath9k_hw_disable_interrupts(ah);
- ret = ath_drain_all_txq(sc, retry_tx);
-
if (!ath_stoprecv(sc))
ret = false;
+ if (!ath_drain_all_txq(sc, retry_tx))
+ ret = false;
+
if (!flush) {
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true);
@@ -270,6 +216,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long flags;
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
@@ -278,36 +225,30 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
+
+ clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
ath9k_hw_set_interrupts(ah);
ath9k_hw_enable_interrupts(ah);
- if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) && start) {
- if (sc->sc_flags & SC_OP_BEACONS)
- ath_set_beacon(sc);
-
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
- ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
- ath_start_rx_poll(sc, 3);
- if (!common->disable_ani)
- ath_start_ani(common);
- }
-
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
- struct ath_hw_antcomb_conf div_ant_conf;
- u8 lna_conf;
+ if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
+ if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+ goto work;
- ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+ ath9k_set_beacon(sc);
- if (sc->ant_rx == 1)
- lna_conf = ATH_ANT_DIV_COMB_LNA1;
- else
- lna_conf = ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.main_lna_conf = lna_conf;
- div_ant_conf.alt_lna_conf = lna_conf;
-
- ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ }
+ work:
+ ath_restart_work(sc);
}
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
+ ath_ant_comb_update(sc);
+
ieee80211_wake_queues(sc->hw);
return true;
@@ -327,7 +268,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
spin_lock_bh(&sc->sc_pcu_lock);
- if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) {
+ if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
fastcc = false;
caldata = &sc->caldata;
}
@@ -370,7 +311,7 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
{
int r;
- if (sc->sc_flags & SC_OP_INVALID)
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
r = ath_reset_internal(sc, hchan, false);
@@ -378,262 +319,11 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
return r;
}
-static void ath_paprd_activate(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- int chain;
-
- if (!caldata || !caldata->paprd_done)
- return;
-
- ath9k_ps_wakeup(sc);
- ar9003_paprd_enable(ah, false);
- for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->txchainmask & BIT(chain)))
- continue;
-
- ar9003_paprd_populate_single_table(ah, caldata, chain);
- }
-
- ar9003_paprd_enable(ah, true);
- ath9k_ps_restore(sc);
-}
-
-static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_tx_control txctl;
- int time_left;
-
- memset(&txctl, 0, sizeof(txctl));
- txctl.txq = sc->tx.txq_map[WME_AC_BE];
-
- memset(tx_info, 0, sizeof(*tx_info));
- tx_info->band = hw->conf.channel->band;
- tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
- tx_info->control.rates[0].idx = 0;
- tx_info->control.rates[0].count = 1;
- tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- tx_info->control.rates[1].idx = -1;
-
- init_completion(&sc->paprd_complete);
- txctl.paprd = BIT(chain);
-
- if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
- dev_kfree_skb_any(skb);
- return false;
- }
-
- time_left = wait_for_completion_timeout(&sc->paprd_complete,
- msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
-
- if (!time_left)
- ath_dbg(common, CALIBRATE,
- "Timeout waiting for paprd training on TX chain %d\n",
- chain);
-
- return !!time_left;
-}
-
-void ath_paprd_calibrate(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- struct ieee80211_hdr *hdr;
- struct sk_buff *skb = NULL;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath_common *common = ath9k_hw_common(ah);
- int ftype;
- int chain_ok = 0;
- int chain;
- int len = 1800;
-
- if (!caldata)
- return;
-
- ath9k_ps_wakeup(sc);
-
- if (ar9003_paprd_init_table(ah) < 0)
- goto fail_paprd;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- goto fail_paprd;
-
- skb_put(skb, len);
- memset(skb->data, 0, len);
- hdr = (struct ieee80211_hdr *)skb->data;
- ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
- hdr->frame_control = cpu_to_le16(ftype);
- hdr->duration_id = cpu_to_le16(10);
- memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
-
- for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->txchainmask & BIT(chain)))
- continue;
-
- chain_ok = 0;
-
- ath_dbg(common, CALIBRATE,
- "Sending PAPRD frame for thermal measurement on chain %d\n",
- chain);
- if (!ath_paprd_send_frame(sc, skb, chain))
- goto fail_paprd;
-
- ar9003_paprd_setup_gain_table(ah, chain);
-
- ath_dbg(common, CALIBRATE,
- "Sending PAPRD training frame on chain %d\n", chain);
- if (!ath_paprd_send_frame(sc, skb, chain))
- goto fail_paprd;
-
- if (!ar9003_paprd_is_done(ah)) {
- ath_dbg(common, CALIBRATE,
- "PAPRD not yet done on chain %d\n", chain);
- break;
- }
-
- if (ar9003_paprd_create_curve(ah, caldata, chain)) {
- ath_dbg(common, CALIBRATE,
- "PAPRD create curve failed on chain %d\n",
- chain);
- break;
- }
-
- chain_ok = 1;
- }
- kfree_skb(skb);
-
- if (chain_ok) {
- caldata->paprd_done = true;
- ath_paprd_activate(sc);
- }
-
-fail_paprd:
- ath9k_ps_restore(sc);
-}
-
-/*
- * This routine performs the periodic noise floor calibration function
- * that is used to adjust and optimize the chip performance. This
- * takes environmental changes (location, temperature) into account.
- * When the task is complete, it reschedules itself depending on the
- * appropriate interval that was calculated.
- */
-void ath_ani_calibrate(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *)data;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- bool longcal = false;
- bool shortcal = false;
- bool aniflag = false;
- unsigned int timestamp = jiffies_to_msecs(jiffies);
- u32 cal_interval, short_cal_interval, long_cal_interval;
- unsigned long flags;
-
- if (ah->caldata && ah->caldata->nfcal_interference)
- long_cal_interval = ATH_LONG_CALINTERVAL_INT;
- else
- long_cal_interval = ATH_LONG_CALINTERVAL;
-
- short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
- ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
-
- /* Only calibrate if awake */
- if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
- goto set_timer;
-
- ath9k_ps_wakeup(sc);
-
- /* Long calibration runs independently of short calibration. */
- if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
- longcal = true;
- common->ani.longcal_timer = timestamp;
- }
-
- /* Short calibration applies only while caldone is false */
- if (!common->ani.caldone) {
- if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
- shortcal = true;
- common->ani.shortcal_timer = timestamp;
- common->ani.resetcal_timer = timestamp;
- }
- } else {
- if ((timestamp - common->ani.resetcal_timer) >=
- ATH_RESTART_CALINTERVAL) {
- common->ani.caldone = ath9k_hw_reset_calvalid(ah);
- if (common->ani.caldone)
- common->ani.resetcal_timer = timestamp;
- }
- }
-
- /* Verify whether we must check ANI */
- if (sc->sc_ah->config.enable_ani
- && (timestamp - common->ani.checkani_timer) >=
- ah->config.ani_poll_interval) {
- aniflag = true;
- common->ani.checkani_timer = timestamp;
- }
-
- /* Call ANI routine if necessary */
- if (aniflag) {
- spin_lock_irqsave(&common->cc_lock, flags);
- ath9k_hw_ani_monitor(ah, ah->curchan);
- ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
- }
-
- /* Perform calibration if necessary */
- if (longcal || shortcal) {
- common->ani.caldone =
- ath9k_hw_calibrate(ah, ah->curchan,
- ah->rxchainmask, longcal);
- }
-
- ath_dbg(common, ANI,
- "Calibration @%lu finished: %s %s %s, caldone: %s\n",
- jiffies,
- longcal ? "long" : "", shortcal ? "short" : "",
- aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
-
- ath9k_ps_restore(sc);
-
-set_timer:
- /*
- * Set timer interval based on previous results.
- * The interval must be the shortest necessary to satisfy ANI,
- * short calibration and long calibration.
- */
- ath9k_debug_samp_bb_mac(sc);
- cal_interval = ATH_LONG_CALINTERVAL;
- if (sc->sc_ah->config.enable_ani)
- cal_interval = min(cal_interval,
- (u32)ah->config.ani_poll_interval);
- if (!common->ani.caldone)
- cal_interval = min(cal_interval, (u32)short_cal_interval);
-
- mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
- if (!ah->caldata->paprd_done)
- ieee80211_queue_work(sc->hw, &sc->paprd_work);
- else if (!ah->paprd_table_write_done)
- ath_paprd_activate(sc);
- }
-}
-
static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
struct ath_node *an;
+ u8 density;
an = (struct ath_node *)sta->drv_priv;
#ifdef CONFIG_ATH9K_DEBUGFS
@@ -648,7 +338,8 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
- an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
+ density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
+ an->mpdudensity = density;
}
}
@@ -667,13 +358,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_tx_node_cleanup(sc, an);
}
-
void ath9k_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
-
+ enum ath_reset_type type;
+ unsigned long flags;
u32 status = sc->intrstatus;
u32 rxmask;
@@ -682,20 +373,17 @@ void ath9k_tasklet(unsigned long data)
if ((status & ATH9K_INT_FATAL) ||
(status & ATH9K_INT_BB_WATCHDOG)) {
-#ifdef CONFIG_ATH9K_DEBUGFS
- enum ath_reset_type type;
if (status & ATH9K_INT_FATAL)
type = RESET_TYPE_FATAL_INT;
else
type = RESET_TYPE_BB_WATCHDOG;
- RESET_STAT_INC(sc, type);
-#endif
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ ath9k_queue_reset(sc, type);
goto out;
}
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
/*
* TSF sync does not look correct; remain awake to sync with
@@ -704,6 +392,7 @@ void ath9k_tasklet(unsigned long data)
ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
@@ -765,15 +454,17 @@ irqreturn_t ath_isr(int irq, void *dev)
* touch anything. Note this can happen early
* on if the IRQ is shared.
*/
- if (sc->sc_flags & SC_OP_INVALID)
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return IRQ_NONE;
-
/* shared irq, not for us */
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
+ if(test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ return IRQ_HANDLED;
+
/*
* Figure out the reason(s) for the interrupt. Note
* that the hal returns a pseudo-ISR that may include
@@ -796,6 +487,17 @@ irqreturn_t ath_isr(int irq, void *dev)
if (status & SCHED_INTR)
sched = true;
+#ifdef CONFIG_PM_SLEEP
+ if (status & ATH9K_INT_BMISS) {
+ if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
+ ath_dbg(common, ANY, "during WoW we got a BMISS\n");
+ atomic_inc(&sc->wow_got_bmiss_intr);
+ atomic_dec(&sc->wow_sleep_proc_intr);
+ }
+ ath_dbg(common, INTERRUPT, "beacon miss interrupt\n");
+ }
+#endif
+
/*
* If a FATAL or RXORN interrupt is received, we have to reset the
* chip immediately.
@@ -826,24 +528,6 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_hw_set_interrupts(ah);
}
- if (status & ATH9K_INT_MIB) {
- /*
- * Disable interrupts until we service the MIB
- * interrupt; otherwise it will continue to
- * fire.
- */
- ath9k_hw_disable_interrupts(ah);
- /*
- * Let the hal handle the event. We assume
- * it will clear whatever condition caused
- * the interrupt.
- */
- spin_lock(&common->cc_lock);
- ath9k_hw_proc_mib_event(ah);
- spin_unlock(&common->cc_lock);
- ath9k_hw_enable_interrupts(ah);
- }
-
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
if (status & ATH9K_INT_TIM_TIMER) {
if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
@@ -851,8 +535,10 @@ irqreturn_t ath_isr(int irq, void *dev)
/* Clear RxAbort bit so that we can
* receive frames */
ath9k_setpower(sc, ATH9K_PM_AWAKE);
+ spin_lock(&sc->sc_pm_lock);
ath9k_hw_setrxabort(sc->sc_ah, 0);
sc->ps_flags |= PS_WAIT_FOR_BEACON;
+ spin_unlock(&sc->sc_pm_lock);
}
chip_reset:
@@ -894,92 +580,20 @@ static int ath_reset(struct ath_softc *sc, bool retry_tx)
return r;
}
-void ath_reset_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
-
- ath_reset(sc, true);
-}
-
-void ath_hw_check(struct work_struct *work)
+void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
{
- struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- unsigned long flags;
- int busy;
- u8 is_alive, nbeacon = 1;
-
- ath9k_ps_wakeup(sc);
- is_alive = ath9k_hw_check_alive(sc->sc_ah);
-
- if (is_alive && !AR_SREV_9300(sc->sc_ah))
- goto out;
- else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
- ath_dbg(common, RESET,
- "DCU stuck is detected. Schedule chip reset\n");
- RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
- goto sched_reset;
- }
-
- spin_lock_irqsave(&common->cc_lock, flags);
- busy = ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
- busy, sc->hw_busy_count + 1);
- if (busy >= 99) {
- if (++sc->hw_busy_count >= 3) {
- RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
- goto sched_reset;
- }
- } else if (busy >= 0) {
- sc->hw_busy_count = 0;
- nbeacon = 3;
- }
-
- ath_start_rx_poll(sc, nbeacon);
- goto out;
-
-sched_reset:
+#ifdef CONFIG_ATH9K_DEBUGFS
+ RESET_STAT_INC(sc, type);
+#endif
+ set_bit(SC_OP_HW_RESET, &sc->sc_flags);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
-out:
- ath9k_ps_restore(sc);
-}
-
-static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
-{
- static int count;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
- if (pll_sqsum >= 0x40000) {
- count++;
- if (count == 3) {
- /* Rx is hung for more than 500ms. Reset it */
- ath_dbg(common, RESET, "Possible RX hang, resetting\n");
- RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- count = 0;
- }
- } else
- count = 0;
}
-void ath_hw_pll_work(struct work_struct *work)
+void ath_reset_work(struct work_struct *work)
{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- hw_pll_work.work);
- u32 pll_sqsum;
-
- if (AR_SREV_9485(sc->sc_ah)) {
-
- ath9k_ps_wakeup(sc);
- pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
- ath9k_ps_restore(sc);
-
- ath_hw_pll_rx_hang_check(sc, pll_sqsum);
+ struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
- ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
- }
+ ath_reset(sc, true);
}
/**********************/
@@ -1044,10 +658,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
- ah->imask |= ATH9K_INT_MCI;
+ ath_mci_enable(sc);
- sc->sc_flags &= ~SC_OP_INVALID;
+ clear_bit(SC_OP_INVALID, &sc->sc_flags);
sc->sc_ah->is_monitoring = false;
if (!ath_complete_reset(sc, false)) {
@@ -1070,8 +683,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
- ath9k_start_btcoex(sc);
-
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
@@ -1089,6 +700,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_tx_control txctl;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ unsigned long flags;
if (sc->ps_enabled) {
/*
@@ -1111,6 +723,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* completed and if needed, also for RX of buffered frames.
*/
ath9k_ps_wakeup(sc);
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
ath9k_hw_setrxabort(sc->sc_ah, 0);
if (ieee80211_is_pspoll(hdr->frame_control)) {
@@ -1126,6 +739,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* the ps_flags bit is cleared. We are just dropping
* the ps_usecount here.
*/
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_ps_restore(sc);
}
@@ -1166,7 +780,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_cancel_work(sc);
del_timer_sync(&sc->rx_poll_timer);
- if (sc->sc_flags & SC_OP_INVALID) {
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@@ -1175,8 +789,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
- ath9k_stop_btcoex(sc);
-
spin_lock_bh(&sc->sc_pcu_lock);
/* prevent tasklets to enable interrupts once we disable them */
@@ -1223,7 +835,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
- sc->sc_flags |= SC_OP_INVALID;
+ set_bit(SC_OP_INVALID, &sc->sc_flags);
sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@@ -1243,16 +855,6 @@ bool ath9k_uses_beacons(int type)
}
}
-static void ath9k_reclaim_beacon(struct ath_softc *sc,
- struct ieee80211_vif *vif)
-{
- struct ath_vif *avp = (void *)vif->drv_priv;
-
- ath9k_set_beaconing_status(sc, false);
- ath_beacon_return(sc, avp);
- ath9k_set_beaconing_status(sc, true);
-}
-
static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath9k_vif_iter_data *iter_data = data;
@@ -1284,6 +886,18 @@ static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
}
}
+static void ath9k_sta_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = data;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (avp->primary_sta_vif)
+ ath9k_set_assoc_state(sc, vif);
+}
+
/* Called with sc->mutex held. */
void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1317,21 +931,18 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_vif_iter_data iter_data;
+ enum nl80211_iftype old_opmode = ah->opmode;
ath9k_calculate_iter_data(hw, vif, &iter_data);
- /* Set BSSID mask. */
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
- /* Set op-mode & TSF */
if (iter_data.naps > 0) {
- ath9k_hw_set_tsfadjust(ah, 1);
- sc->sc_flags |= SC_OP_TSF_RESET;
+ ath9k_hw_set_tsfadjust(ah, true);
ah->opmode = NL80211_IFTYPE_AP;
} else {
- ath9k_hw_set_tsfadjust(ah, 0);
- sc->sc_flags &= ~SC_OP_TSF_RESET;
+ ath9k_hw_set_tsfadjust(ah, false);
if (iter_data.nmeshes)
ah->opmode = NL80211_IFTYPE_MESH_POINT;
@@ -1343,70 +954,27 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
ah->opmode = NL80211_IFTYPE_STATION;
}
- /*
- * Enable MIB interrupts when there are hardware phy counters.
- */
- if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
- if (ah->config.enable_ani)
- ah->imask |= ATH9K_INT_MIB;
+ ath9k_hw_setopmode(ah);
+
+ if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
ah->imask |= ATH9K_INT_TSFOOR;
- } else {
- ah->imask &= ~ATH9K_INT_MIB;
+ else
ah->imask &= ~ATH9K_INT_TSFOOR;
- }
ath9k_hw_set_interrupts(ah);
- /* Set up ANI */
- if (iter_data.naps > 0) {
- sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
-
- if (!common->disable_ani) {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
- }
-
- } else {
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
- }
-}
-
-/* Called with sc->mutex held, vif counts set up properly. */
-static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct ath_softc *sc = hw->priv;
-
- ath9k_calculate_summary_state(hw, vif);
-
- if (ath9k_uses_beacons(vif->type)) {
- /* Reserve a beacon slot for the vif */
- ath9k_set_beaconing_status(sc, false);
- ath_beacon_alloc(sc, vif);
- ath9k_set_beaconing_status(sc, true);
+ /*
+ * If we are changing the opmode to STATION,
+ * a beacon sync needs to be done.
+ */
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ old_opmode == NL80211_IFTYPE_AP &&
+ test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ ieee80211_iterate_active_interfaces_atomic(sc->hw,
+ ath9k_sta_vif_iter, sc);
}
}
-void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
-{
- if (!AR_SREV_9300(sc->sc_ah))
- return;
-
- if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF))
- return;
-
- mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
- (nbeacon * sc->cur_beacon_conf.beacon_interval));
-}
-
-void ath_rx_poll(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *)data;
-
- ieee80211_queue_work(sc->hw, &sc->hw_check_work);
-}
-
static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1442,20 +1010,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
}
}
- if ((ah->opmode == NL80211_IFTYPE_ADHOC) ||
- ((vif->type == NL80211_IFTYPE_ADHOC) &&
- sc->nvifs > 0)) {
- ath_err(common, "Cannot create ADHOC interface when other"
- " interfaces already exist.\n");
- ret = -EINVAL;
- goto out;
- }
-
ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
- ath9k_do_vif_add_setup(hw, vif);
+ ath9k_calculate_summary_state(hw, vif);
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_beacon_assign_slot(sc, vif);
+
out:
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
@@ -1472,18 +1034,10 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
int ret = 0;
ath_dbg(common, CONFIG, "Change Interface\n");
+
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- /* See if new interface type is valid. */
- if ((new_type == NL80211_IFTYPE_ADHOC) &&
- (sc->nvifs > 1)) {
- ath_err(common, "When using ADHOC, it must be the only"
- " interface.\n");
- ret = -EINVAL;
- goto out;
- }
-
if (ath9k_uses_beacons(new_type) &&
!ath9k_uses_beacons(vif->type)) {
if (sc->nbcnvifs >= ATH_BCBUF) {
@@ -1493,15 +1047,16 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
}
}
- /* Clean up old vif stuff */
if (ath9k_uses_beacons(vif->type))
- ath9k_reclaim_beacon(sc, vif);
+ ath9k_beacon_remove_slot(sc, vif);
- /* Add new settings */
vif->type = new_type;
vif->p2p = p2p;
- ath9k_do_vif_add_setup(hw, vif);
+ ath9k_calculate_summary_state(hw, vif);
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_beacon_assign_slot(sc, vif);
+
out:
ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
@@ -1521,9 +1076,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
sc->nvifs--;
- /* Reclaim beacon resources */
if (ath9k_uses_beacons(vif->type))
- ath9k_reclaim_beacon(sc, vif);
+ ath9k_beacon_remove_slot(sc, vif);
ath9k_calculate_summary_state(hw, NULL);
@@ -1581,14 +1135,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- if (sc->ps_idle)
+ if (sc->ps_idle) {
ath_cancel_work(sc);
- else
+ ath9k_stop_btcoex(sc);
+ } else {
+ ath9k_start_btcoex(sc);
/*
* The chip needs a reset to properly wake up from
* full sleep
*/
reset_channel = ah->chip_fullsleep;
+ }
}
/*
@@ -1626,11 +1183,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ah->curchan)
old_pos = ah->curchan - &ah->channels[0];
- if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- sc->sc_flags |= SC_OP_OFFCHANNEL;
- else
- sc->sc_flags &= ~SC_OP_OFFCHANNEL;
-
ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
curchan->center_freq, conf->channel_type);
@@ -1672,6 +1224,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return -EINVAL;
}
@@ -1821,21 +1374,18 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
qi.tqi_aifs = params->aifs;
qi.tqi_cwmin = params->cw_min;
qi.tqi_cwmax = params->cw_max;
- qi.tqi_burstTime = params->txop;
+ qi.tqi_burstTime = params->txop * 32;
ath_dbg(common, CONFIG,
"Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, txq->axq_qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
+ ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime);
ret = ath_txq_update(sc, txq->axq_qnum, &qi);
if (ret)
ath_err(common, "TXQ Update failed\n");
- if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
- if (queue == WME_AC_BE && !ret)
- ath_beaconq_config(sc);
-
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
@@ -1904,83 +1454,53 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
return ret;
}
-static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+
+static void ath9k_set_assoc_state(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
{
- struct ath_softc *sc = data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ unsigned long flags;
+
+ set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+ avp->primary_sta_vif = true;
/*
- * Skip iteration if primary station vif's bss info
- * was not changed
+ * Set the AID, BSSID and do beacon-sync only when
+ * the HW opmode is STATION.
+ *
+ * But the primary bit is set above in any case.
*/
- if (sc->sc_flags & SC_OP_PRIM_STA_VIF)
+ if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
return;
- if (bss_conf->assoc) {
- sc->sc_flags |= SC_OP_PRIM_STA_VIF;
- avp->primary_sta_vif = true;
- memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
- common->curaid = bss_conf->aid;
- ath9k_hw_write_associd(sc->sc_ah);
- ath_dbg(common, CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
- ath_beacon_config(sc, vif);
- /*
- * Request a re-configuration of Beacon related timers
- * on the receipt of the first Beacon frame (i.e.,
- * after time sync with the AP).
- */
- sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
- /* Reset rssi stats */
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
- sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ common->curaid = bss_conf->aid;
+ ath9k_hw_write_associd(sc->sc_ah);
- ath_start_rx_poll(sc, 3);
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
- if (!common->disable_ani) {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
- }
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- }
+ ath_dbg(common, CONFIG,
+ "Primary Station interface: %pM, BSSID: %pM\n",
+ vif->addr, common->curbssid);
}
-static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_softc *sc = data;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct ath_vif *avp = (void *)vif->drv_priv;
- if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
+ if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
return;
- /* Reconfigure bss info */
- if (avp->primary_sta_vif && !bss_conf->assoc) {
- ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n",
- common->curaid, common->curbssid);
- sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS);
- avp->primary_sta_vif = false;
- memset(common->curbssid, 0, ETH_ALEN);
- common->curaid = 0;
- }
-
- ieee80211_iterate_active_interfaces_atomic(
- sc->hw, ath9k_bss_iter, sc);
-
- /*
- * None of station vifs are associated.
- * Clear bssid & aid
- */
- if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF)) {
- ath9k_hw_write_associd(sc->sc_ah);
- /* Stop ANI */
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
- del_timer_sync(&sc->rx_poll_timer);
- memset(&sc->caldata, 0, sizeof(sc->caldata));
- }
+ if (bss_conf->assoc)
+ ath9k_set_assoc_state(sc, vif);
}
static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
@@ -1988,6 +1508,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+#define CHECK_ANI \
+ (BSS_CHANGED_ASSOC | \
+ BSS_CHANGED_IBSS | \
+ BSS_CHANGED_BEACON_ENABLED)
+
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -1998,53 +1523,41 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
if (changed & BSS_CHANGED_ASSOC) {
- ath9k_config_bss(sc, vif);
+ ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
+ bss_conf->bssid, bss_conf->assoc);
+
+ if (avp->primary_sta_vif && !bss_conf->assoc) {
+ clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+ avp->primary_sta_vif = false;
- ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
- common->curbssid, common->curaid);
+ if (ah->opmode == NL80211_IFTYPE_STATION)
+ clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(sc->hw,
+ ath9k_bss_assoc_iter, sc);
+
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
+ ah->opmode == NL80211_IFTYPE_STATION) {
+ memset(common->curbssid, 0, ETH_ALEN);
+ common->curaid = 0;
+ ath9k_hw_write_associd(sc->sc_ah);
+ }
}
if (changed & BSS_CHANGED_IBSS) {
- /* There can be only one vif available */
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
-
- if (bss_conf->ibss_joined) {
- sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
-
- if (!common->disable_ani) {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
- }
-
- } else {
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
- del_timer_sync(&sc->rx_poll_timer);
- }
}
- /*
- * In case of AP mode, the HW TSF has to be reset
- * when the beacon interval changes.
- */
- if ((changed & BSS_CHANGED_BEACON_INT) &&
- (vif->type == NL80211_IFTYPE_AP))
- sc->sc_flags |= SC_OP_TSF_RESET;
-
- /* Configure beaconing (AP, IBSS, MESH) */
- if (ath9k_uses_beacons(vif->type) &&
- ((changed & BSS_CHANGED_BEACON) ||
- (changed & BSS_CHANGED_BEACON_ENABLED) ||
- (changed & BSS_CHANGED_BEACON_INT))) {
- ath9k_set_beaconing_status(sc, false);
- if (bss_conf->enable_beacon)
- ath_beacon_alloc(sc, vif);
- else
- avp->is_bslot_active = false;
- ath_beacon_config(sc, vif);
- ath9k_set_beaconing_status(sc, true);
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
+ (changed & BSS_CHANGED_BEACON_INT)) {
+ if (ah->opmode == NL80211_IFTYPE_AP &&
+ bss_conf->enable_beacon)
+ ath9k_set_tsfadjust(sc, vif);
+ if (ath9k_allow_beacon_config(sc, vif))
+ ath9k_beacon_config(sc, vif, changed);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2066,8 +1579,13 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & CHECK_ANI)
+ ath_check_ani(sc);
+
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
+
+#undef CHECK_ANI
}
static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
@@ -2223,7 +1741,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
return;
}
- if (sc->sc_flags & SC_OP_INVALID) {
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@@ -2296,10 +1814,11 @@ static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
if (!vif)
return 0;
- avp = (void *)vif->drv_priv;
- if (!avp->is_bslot_active)
+ if (!vif->bss_conf.enable_beacon)
return 0;
+ avp = (void *)vif->drv_priv;
+
if (!sc->beacon.tx_processed && !edma) {
tasklet_disable(&sc->bcon_tasklet);
@@ -2353,12 +1872,29 @@ static u32 fill_chainmask(u32 cap, u32 new)
return filled;
}
+static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
+{
+ switch (val & 0x7) {
+ case 0x1:
+ case 0x3:
+ case 0x7:
+ return true;
+ case 0x2:
+ return (ah->caps.rx_chainmask == 1);
+ default:
+ return false;
+ }
+}
+
static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
- if (!rx_ant || !tx_ant)
+ if (ah->caps.rx_chainmask != 1)
+ rx_ant |= tx_ant;
+
+ if (!validate_antenna_mask(ah, rx_ant) || !tx_ant)
return -EINVAL;
sc->ant_rx = rx_ant;
@@ -2388,6 +1924,490 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
+#ifdef CONFIG_ATH9K_DEBUGFS
+
+/* Ethtool support for get-stats */
+
+#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
+static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ AMKSTR(d_tx_pkts),
+ AMKSTR(d_tx_bytes),
+ AMKSTR(d_tx_mpdus_queued),
+ AMKSTR(d_tx_mpdus_completed),
+ AMKSTR(d_tx_mpdu_xretries),
+ AMKSTR(d_tx_aggregates),
+ AMKSTR(d_tx_ampdus_queued_hw),
+ AMKSTR(d_tx_ampdus_queued_sw),
+ AMKSTR(d_tx_ampdus_completed),
+ AMKSTR(d_tx_ampdu_retries),
+ AMKSTR(d_tx_ampdu_xretries),
+ AMKSTR(d_tx_fifo_underrun),
+ AMKSTR(d_tx_op_exceeded),
+ AMKSTR(d_tx_timer_expiry),
+ AMKSTR(d_tx_desc_cfg_err),
+ AMKSTR(d_tx_data_underrun),
+ AMKSTR(d_tx_delim_underrun),
+
+ "d_rx_decrypt_crc_err",
+ "d_rx_phy_err",
+ "d_rx_mic_err",
+ "d_rx_pre_delim_crc_err",
+ "d_rx_post_delim_crc_err",
+ "d_rx_decrypt_busy_err",
+
+ "d_rx_phyerr_radar",
+ "d_rx_phyerr_ofdm_timing",
+ "d_rx_phyerr_cck_timing",
+
+};
+#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
+
+static void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath9k_gstrings_stats,
+ sizeof(ath9k_gstrings_stats));
+}
+
+static int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH9K_SSTATS_LEN;
+ return 0;
+}
+
+#define PR_QNUM(_n) (sc->tx.txq_map[_n]->axq_qnum)
+#define AWDATA(elem) \
+ do { \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem; \
+ } while (0)
+
+#define AWDATA_RX(elem) \
+ do { \
+ data[i++] = sc->debug.stats.rxstats.elem; \
+ } while (0)
+
+static void ath9k_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath_softc *sc = hw->priv;
+ int i = 0;
+
+ data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_pkts_all);
+ data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_bytes_all);
+ AWDATA_RX(rx_pkts_all);
+ AWDATA_RX(rx_bytes_all);
+
+ AWDATA(tx_pkts_all);
+ AWDATA(tx_bytes_all);
+ AWDATA(queued);
+ AWDATA(completed);
+ AWDATA(xretries);
+ AWDATA(a_aggr);
+ AWDATA(a_queued_hw);
+ AWDATA(a_queued_sw);
+ AWDATA(a_completed);
+ AWDATA(a_retries);
+ AWDATA(a_xretries);
+ AWDATA(fifo_underrun);
+ AWDATA(xtxop);
+ AWDATA(timer_exp);
+ AWDATA(desc_cfg_err);
+ AWDATA(data_underrun);
+ AWDATA(delim_underrun);
+
+ AWDATA_RX(decrypt_crc_err);
+ AWDATA_RX(phy_err);
+ AWDATA_RX(mic_err);
+ AWDATA_RX(pre_delim_crc_err);
+ AWDATA_RX(post_delim_crc_err);
+ AWDATA_RX(decrypt_busy_err);
+
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
+
+ WARN_ON(i != ATH9K_SSTATS_LEN);
+}
+
+/* End of ethtool get-stats functions */
+
+#endif
+
+
+#ifdef CONFIG_PM_SLEEP
+
+static void ath9k_wow_map_triggers(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan,
+ u32 *wow_triggers)
+{
+ if (wowlan->disconnect)
+ *wow_triggers |= AH_WOW_LINK_CHANGE |
+ AH_WOW_BEACON_MISS;
+ if (wowlan->magic_pkt)
+ *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
+
+ if (wowlan->n_patterns)
+ *wow_triggers |= AH_WOW_USER_PATTERN_EN;
+
+ sc->wow_enabled = *wow_triggers;
+
+}
+
+static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pcaps = &ah->caps;
+ int pattern_count = 0;
+ int i, byte_cnt;
+ u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
+ u8 dis_deauth_mask[MAX_PATTERN_SIZE];
+
+ memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
+ memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
+
+ /*
+ * Create Dissassociate / Deauthenticate packet filter
+ *
+ * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
+ * +--------------+----------+---------+--------+--------+----
+ * + Frame Control+ Duration + DA + SA + BSSID +
+ * +--------------+----------+---------+--------+--------+----
+ *
+ * The above is the management frame format for disassociate/
+ * deauthenticate pattern, from this we need to match the first byte
+ * of 'Frame Control' and DA, SA, and BSSID fields
+ * (skipping 2nd byte of FC and Duration feild.
+ *
+ * Disassociate pattern
+ * --------------------
+ * Frame control = 00 00 1010
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
+ *
+ * Deauthenticate pattern
+ * ----------------------
+ * Frame control = 00 00 1100
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
+ */
+
+ /* Create Disassociate Pattern first */
+
+ byte_cnt = 0;
+
+ /* Fill out the mask with all FF's */
+
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
+ dis_deauth_mask[i] = 0xff;
+
+ /* copy the first byte of frame control field */
+ dis_deauth_pattern[byte_cnt] = 0xa0;
+ byte_cnt++;
+
+ /* skip 2nd byte of frame control and Duration field */
+ byte_cnt += 3;
+
+ /*
+ * need not match the destination mac address, it can be a broadcast
+ * mac address or an unicast to this station
+ */
+ byte_cnt += 6;
+
+ /* copy the source mac address */
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+ byte_cnt += 6;
+
+ /* copy the bssid, its same as the source mac address */
+
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+ /* Create Disassociate pattern mask */
+
+ if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_EXACT) {
+
+ if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_DWORD) {
+ /*
+ * for AR9280, because of hardware limitation, the
+ * first 4 bytes have to be matched for all patterns.
+ * the mask for disassociation and de-auth pattern
+ * matching need to enable the first 4 bytes.
+ * also the duration field needs to be filled.
+ */
+ dis_deauth_mask[0] = 0xf0;
+
+ /*
+ * fill in duration field
+ FIXME: what is the exact value ?
+ */
+ dis_deauth_pattern[2] = 0xff;
+ dis_deauth_pattern[3] = 0xff;
+ } else {
+ dis_deauth_mask[0] = 0xfe;
+ }
+
+ dis_deauth_mask[1] = 0x03;
+ dis_deauth_mask[2] = 0xc0;
+ } else {
+ dis_deauth_mask[0] = 0xef;
+ dis_deauth_mask[1] = 0x3f;
+ dis_deauth_mask[2] = 0x00;
+ dis_deauth_mask[3] = 0xfc;
+ }
+
+ ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
+
+ ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+
+ pattern_count++;
+ /*
+ * for de-authenticate pattern, only the first byte of the frame
+ * control field gets changed from 0xA0 to 0xC0
+ */
+ dis_deauth_pattern[0] = 0xC0;
+
+ ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+
+}
+
+static void ath9k_wow_add_pattern(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_wow_pattern *wow_pattern = NULL;
+ struct cfg80211_wowlan_trig_pkt_pattern *patterns = wowlan->patterns;
+ int mask_len;
+ s8 i = 0;
+
+ if (!wowlan->n_patterns)
+ return;
+
+ /*
+ * Add the new user configured patterns
+ */
+ for (i = 0; i < wowlan->n_patterns; i++) {
+
+ wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
+
+ if (!wow_pattern)
+ return;
+
+ /*
+ * TODO: convert the generic user space pattern to
+ * appropriate chip specific/802.11 pattern.
+ */
+
+ mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+ memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
+ memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
+ memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
+ patterns[i].pattern_len);
+ memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
+ wow_pattern->pattern_len = patterns[i].pattern_len;
+
+ /*
+ * just need to take care of deauth and disssoc pattern,
+ * make sure we don't overwrite them.
+ */
+
+ ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
+ wow_pattern->mask_bytes,
+ i + 2,
+ wow_pattern->pattern_len);
+ kfree(wow_pattern);
+
+ }
+
+}
+
+static int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 wow_triggers_enabled = 0;
+ int ret = 0;
+
+ mutex_lock(&sc->mutex);
+
+ ath_cancel_work(sc);
+ del_timer_sync(&common->ani.timer);
+ del_timer_sync(&sc->rx_poll_timer);
+
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ ath_dbg(common, ANY, "Device not present\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
+
+ if (WARN_ON(!wowlan)) {
+ ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
+
+ if (!device_can_wakeup(sc->dev)) {
+ ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ /*
+ * none of the sta vifs are associated
+ * and we are not currently handling multivif
+ * cases, for instance we have to seperately
+ * configure 'keep alive frame' for each
+ * STA.
+ */
+
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ ath_dbg(common, WOW, "None of the STA vifs are associated\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ if (sc->nvifs > 1) {
+ ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
+
+ ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
+ wow_triggers_enabled);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_stop_btcoex(sc);
+
+ /*
+ * Enable wake up on recieving disassoc/deauth
+ * frame by default.
+ */
+ ath9k_wow_add_disassoc_deauth_pattern(sc);
+
+ if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
+ ath9k_wow_add_pattern(sc, wowlan);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+ /*
+ * To avoid false wake, we enable beacon miss interrupt only
+ * when we go to sleep. We save the current interrupt mask
+ * so we can restore it after the system wakes up
+ */
+ sc->wow_intr_before_sleep = ah->imask;
+ ah->imask &= ~ATH9K_INT_GLOBAL;
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /*
+ * we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock
+ */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+
+ ath9k_hw_wow_enable(ah, wow_triggers_enabled);
+
+ ath9k_ps_restore(sc);
+ ath_dbg(common, ANY, "WoW enabled in ath9k\n");
+ atomic_inc(&sc->wow_sleep_proc_intr);
+
+fail_wow:
+ mutex_unlock(&sc->mutex);
+ return ret;
+}
+
+static int ath9k_resume(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 wow_status;
+
+ mutex_lock(&sc->mutex);
+
+ ath9k_ps_wakeup(sc);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = sc->wow_intr_before_sleep;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ wow_status = ath9k_hw_wow_wakeup(ah);
+
+ if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
+ /*
+ * some devices may not pick beacon miss
+ * as the reason they woke up so we add
+ * that here for that shortcoming.
+ */
+ wow_status |= AH_WOW_BEACON_MISS;
+ atomic_dec(&sc->wow_got_bmiss_intr);
+ ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
+ }
+
+ atomic_dec(&sc->wow_sleep_proc_intr);
+
+ if (wow_status) {
+ ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
+ ath9k_hw_wow_event_to_string(wow_status), wow_status);
+ }
+
+ ath_restart_work(sc);
+ ath9k_start_btcoex(sc);
+
+ ath9k_ps_restore(sc);
+ mutex_unlock(&sc->mutex);
+
+ return 0;
+}
+
+static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath_softc *sc = hw->priv;
+
+ mutex_lock(&sc->mutex);
+ device_init_wakeup(sc->dev, 1);
+ device_set_wakeup_enable(sc->dev, enabled);
+ mutex_unlock(&sc->mutex);
+}
+
+#endif
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2416,4 +2436,16 @@ struct ieee80211_ops ath9k_ops = {
.get_stats = ath9k_get_stats,
.set_antenna = ath9k_set_antenna,
.get_antenna = ath9k_get_antenna,
+
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ath9k_suspend,
+ .resume = ath9k_resume,
+ .set_wakeup = ath9k_set_wakeup,
+#endif
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+ .get_et_sset_count = ath9k_get_et_sset_count,
+ .get_et_stats = ath9k_get_et_stats,
+ .get_et_strings = ath9k_get_et_strings,
+#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 29fe52d..fb536e7 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -20,7 +20,7 @@
#include "ath9k.h"
#include "mci.h"
-static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 };
+static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
static struct ath_mci_profile_info*
ath_mci_find_profile(struct ath_mci_profile *mci,
@@ -28,11 +28,14 @@ ath_mci_find_profile(struct ath_mci_profile *mci,
{
struct ath_mci_profile_info *entry;
+ if (list_empty(&mci->info))
+ return NULL;
+
list_for_each_entry(entry, &mci->info, list) {
if (entry->conn_handle == info->conn_handle)
- break;
+ return entry;
}
- return entry;
+ return NULL;
}
static bool ath_mci_add_profile(struct ath_common *common,
@@ -49,31 +52,21 @@ static bool ath_mci_add_profile(struct ath_common *common,
(info->type != MCI_GPM_COEX_PROFILE_VOICE))
return false;
- entry = ath_mci_find_profile(mci, info);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return false;
- if (entry) {
- memcpy(entry, info, 10);
- } else {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return false;
-
- memcpy(entry, info, 10);
- INC_PROF(mci, info);
- list_add_tail(&info->list, &mci->info);
- }
+ memcpy(entry, info, 10);
+ INC_PROF(mci, info);
+ list_add_tail(&entry->list, &mci->info);
return true;
}
static void ath_mci_del_profile(struct ath_common *common,
struct ath_mci_profile *mci,
- struct ath_mci_profile_info *info)
+ struct ath_mci_profile_info *entry)
{
- struct ath_mci_profile_info *entry;
-
- entry = ath_mci_find_profile(mci, info);
-
if (!entry)
return;
@@ -86,12 +79,16 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
{
struct ath_mci_profile_info *info, *tinfo;
+ mci->aggr_limit = 0;
+
+ if (list_empty(&mci->info))
+ return;
+
list_for_each_entry_safe(info, tinfo, &mci->info, list) {
list_del(&info->list);
DEC_PROF(mci, info);
kfree(info);
}
- mci->aggr_limit = 0;
}
static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
@@ -116,42 +113,60 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
struct ath_mci_profile_info *info;
u32 num_profile = NUM_PROF(mci);
+ if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
+ goto skip_tuning;
+
+ btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
+
if (num_profile == 1) {
info = list_first_entry(&mci->info,
struct ath_mci_profile_info,
list);
- if (mci->num_sco && info->T == 12) {
- mci->aggr_limit = 8;
+ if (mci->num_sco) {
+ if (info->T == 12)
+ mci->aggr_limit = 8;
+ else if (info->T == 6) {
+ mci->aggr_limit = 6;
+ btcoex->duty_cycle = 30;
+ }
ath_dbg(common, MCI,
- "Single SCO, aggregation limit 2 ms\n");
- } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) &&
- !info->master) {
- btcoex->btcoex_period = 60;
+ "Single SCO, aggregation limit %d 1/4 ms\n",
+ mci->aggr_limit);
+ } else if (mci->num_pan || mci->num_other_acl) {
+ /*
+ * For single PAN/FTP profile, allocate 35% for BT
+ * to improve WLAN throughput.
+ */
+ btcoex->duty_cycle = 35;
+ btcoex->btcoex_period = 53;
ath_dbg(common, MCI,
- "Single slave PAN/FTP, bt period 60 ms\n");
- } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) &&
- (info->T > 0 && info->T < 50) &&
- (info->A > 1 || info->W > 1)) {
+ "Single PAN/FTP bt period %d ms dutycycle %d\n",
+ btcoex->duty_cycle, btcoex->btcoex_period);
+ } else if (mci->num_hid) {
btcoex->duty_cycle = 30;
- mci->aggr_limit = 8;
+ mci->aggr_limit = 6;
ath_dbg(common, MCI,
"Multiple attempt/timeout single HID "
- "aggregation limit 2 ms dutycycle 30%%\n");
+ "aggregation limit 1.5 ms dutycycle 30%%\n");
}
- } else if ((num_profile == 2) && (mci->num_hid == 2)) {
- btcoex->duty_cycle = 30;
- mci->aggr_limit = 8;
- ath_dbg(common, MCI,
- "Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
- } else if (num_profile > 3) {
+ } else if (num_profile == 2) {
+ if (mci->num_hid == 2)
+ btcoex->duty_cycle = 30;
mci->aggr_limit = 6;
ath_dbg(common, MCI,
- "Three or more profiles aggregation limit 1.5 ms\n");
+ "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
+ btcoex->duty_cycle);
+ } else if (num_profile >= 3) {
+ mci->aggr_limit = 4;
+ ath_dbg(common, MCI,
+ "Three or more profiles aggregation limit 1 ms\n");
}
+skip_tuning:
if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
if (IS_CHAN_HT(sc->sc_ah->curchan))
ath_mci_adjust_aggr_limit(btcoex);
@@ -159,18 +174,17 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
btcoex->btcoex_period >>= 1;
}
- ath9k_hw_btcoex_disable(sc->sc_ah);
ath9k_btcoex_timer_pause(sc);
+ ath9k_hw_btcoex_disable(sc->sc_ah);
if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
return;
- btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0);
+ btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
- btcoex->btcoex_period *= 1000;
- btcoex->btcoex_no_stomp = btcoex->btcoex_period *
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
(100 - btcoex->duty_cycle) / 100;
ath9k_hw_btcoex_enable(sc->sc_ah);
@@ -181,20 +195,16 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
switch (opcode) {
case MCI_GPM_BT_CAL_REQ:
- if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
- ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- } else {
- ath_dbg(common, MCI, "MCI State mismatch: %d\n",
- ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+ if (mci_hw->bt_state == MCI_BT_AWAKE) {
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
+ ath9k_queue_reset(sc, RESET_TYPE_MCI);
}
- break;
- case MCI_GPM_BT_CAL_DONE:
- ar9003_mci_state(ah, MCI_STATE_BT, NULL);
+ ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
break;
case MCI_GPM_BT_CAL_GRANT:
MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
@@ -207,32 +217,55 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
}
}
+static void ath9k_mci_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
+
+ ath_mci_update_scheme(sc);
+}
+
static void ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info *entry = NULL;
+
+ entry = ath_mci_find_profile(mci, info);
+ if (entry) {
+ /*
+ * Two MCI interrupts are generated while connecting to
+ * headset and A2DP profile, but only one MCI interrupt
+ * is generated with last added profile type while disconnecting
+ * both profiles.
+ * So while adding second profile type decrement
+ * the first one.
+ */
+ if (entry->type != info->type) {
+ DEC_PROF(mci, entry);
+ INC_PROF(mci, info);
+ }
+ memcpy(entry, info, 10);
+ }
if (info->start) {
- if (!ath_mci_add_profile(common, mci, info))
+ if (!entry && !ath_mci_add_profile(common, mci, info))
return;
} else
- ath_mci_del_profile(common, mci, info);
+ ath_mci_del_profile(common, mci, entry);
btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
mci->aggr_limit = mci->num_sco ? 6 : 0;
- if (NUM_PROF(mci)) {
+ btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
+ if (NUM_PROF(mci))
btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
- } else {
+ else
btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
ATH_BTCOEX_STOMP_LOW;
- btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
- }
- ath_mci_update_scheme(sc);
+ ieee80211_queue_work(sc->hw, &sc->mci_work);
}
static void ath_mci_process_status(struct ath_softc *sc,
@@ -247,8 +280,6 @@ static void ath_mci_process_status(struct ath_softc *sc,
if (status->is_link)
return;
- memset(&info, 0, sizeof(struct ath_mci_profile_info));
-
info.conn_handle = status->conn_handle;
if (ath_mci_find_profile(mci, &info))
return;
@@ -268,7 +299,7 @@ static void ath_mci_process_status(struct ath_softc *sc,
} while (++i < ATH_MCI_MAX_PROFILE);
if (old_num_mgmt != mci->num_mgmt)
- ath_mci_update_scheme(sc);
+ ieee80211_queue_work(sc->hw, &sc->mci_work);
}
static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -277,25 +308,20 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
struct ath_mci_profile_info profile_info;
struct ath_mci_profile_status profile_status;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u32 version;
- u8 major;
- u8 minor;
+ u8 major, minor;
u32 seq_num;
switch (opcode) {
case MCI_GPM_COEX_VERSION_QUERY:
- version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION,
- NULL);
+ ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
break;
case MCI_GPM_COEX_VERSION_RESPONSE:
major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
- version = (major << 8) + minor;
- version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION,
- &version);
+ ar9003_mci_set_bt_version(ah, major, minor);
break;
case MCI_GPM_COEX_STATUS_QUERY:
- ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+ ar9003_mci_send_wlan_channels(ah);
break;
case MCI_GPM_COEX_BT_PROFILE_INFO:
memcpy(&profile_info,
@@ -322,7 +348,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
seq_num = *((u32 *)(rx_payload + 12));
ath_dbg(common, MCI,
- "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+ "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
profile_status.is_link, profile_status.conn_handle,
profile_status.is_critical, seq_num);
@@ -362,6 +388,7 @@ int ath_mci_setup(struct ath_softc *sc)
mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
mci->sched_buf.bf_paddr);
+ INIT_WORK(&sc->mci_work, ath9k_mci_work);
ath_dbg(common, MCI, "MCI Initialized\n");
return 0;
@@ -389,6 +416,7 @@ void ath_mci_intr(struct ath_softc *sc)
struct ath_mci_coex *mci = &sc->mci_coex;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 mci_int, mci_int_rxmsg;
u32 offset, subtype, opcode;
u32 *pgpm;
@@ -397,8 +425,8 @@ void ath_mci_intr(struct ath_softc *sc)
ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
- if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
- ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
+ ar9003_mci_get_next_gpm_offset(ah, true, NULL);
return;
}
@@ -417,46 +445,41 @@ void ath_mci_intr(struct ath_softc *sc)
NULL, 0, true, false);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
- ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+ ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
/*
* always do this for recovery and 2G/5G toggling and LNA_TRANS
*/
- ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
- if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
- if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) !=
- MCI_BT_SLEEP)
- ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE,
- NULL);
- }
+ if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
+ (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
+ MCI_BT_SLEEP))
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
- if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
- if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) !=
- MCI_BT_AWAKE)
- ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
- NULL);
- }
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
+ MCI_BT_AWAKE))
+ mci_hw->bt_state = MCI_BT_SLEEP;
}
if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
(mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
- ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+ ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
skip_gpm = true;
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
- offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
- NULL);
+ offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
@@ -465,8 +488,8 @@ void ath_mci_intr(struct ath_softc *sc)
while (more_data == MCI_GPM_MORE) {
pgpm = mci->gpm_buf.bf_addr;
- offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
- &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, false,
+ &more_data);
if (offset == MCI_GPM_INVALID)
break;
@@ -507,23 +530,17 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
- int value_dbm = ar9003_mci_state(ah,
- MCI_STATE_CONT_RSSI_POWER, NULL);
+ int value_dbm = MS(mci_hw->cont_status,
+ AR_MCI_CONT_RSSI_POWER);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
- if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
- ath_dbg(common, MCI,
- "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n",
- ar9003_mci_state(ah,
- MCI_STATE_CONT_PRIORITY, NULL),
- value_dbm);
- else
- ath_dbg(common, MCI,
- "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
- ar9003_mci_state(ah,
- MCI_STATE_CONT_PRIORITY, NULL),
- value_dbm);
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
+ MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
+ "tx" : "rx",
+ MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
+ value_dbm);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
@@ -538,3 +555,14 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
}
+
+void ath_mci_enable(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (!common->btcoex_enabled)
+ return;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ sc->sc_ah->imask |= ATH9K_INT_MCI;
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index c841444..fc14eea 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -130,4 +130,13 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci);
int ath_mci_setup(struct ath_softc *sc);
void ath_mci_cleanup(struct ath_softc *sc);
void ath_mci_intr(struct ath_softc *sc);
-#endif
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ath_mci_enable(struct ath_softc *sc);
+#else
+static inline void ath_mci_enable(struct ath_softc *sc)
+{
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+#endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index a856b51..87b89d5 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -115,6 +115,9 @@ static void ath_pci_aspm_init(struct ath_common *common)
int pos;
u8 aspm;
+ if (!ah->is_pciexpress)
+ return;
+
pos = pci_pcie_cap(pdev);
if (!pos)
return;
@@ -138,6 +141,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm);
+ ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
return;
}
@@ -147,6 +151,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
ah->aspm_enabled = true;
/* Initialize PCIe PM and SERDES registers. */
ath9k_hw_configpcipowersave(ah, false);
+ ath_info(common, "ASPM enabled: 0x%x\n", aspm);
}
}
@@ -246,7 +251,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->mem = mem;
/* Will be cleared in ath9k_start() */
- sc->sc_flags |= SC_OP_INVALID;
+ set_bit(SC_OP_INVALID, &sc->sc_flags);
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
@@ -308,6 +313,9 @@ static int ath_pci_suspend(struct device *device)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
+ if (sc->wow_enabled)
+ return 0;
+
/* The device has to be moved to FULLSLEEP forcibly.
* Otherwise the chip never moved to full sleep,
* when no interface is up.
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 92a6c0a..e034add 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -770,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate *rates = tx_info->control.rates;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix, high_rix;
+ u8 try_per_rate, i = 0, rix;
int is_probe = 0;
if (rate_control_send_low(sta, priv_sta, txrc))
@@ -791,7 +791,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
rate_table = ath_rc_priv->rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
&is_probe, false);
- high_rix = rix;
/*
* If we're in HT mode and both us and our peer supports LDPC.
@@ -839,16 +838,16 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
try_per_rate = 8;
/*
- * Use a legacy rate as last retry to ensure that the frame
- * is tried in both MCS and legacy rates.
+ * If the last rate in the rate series is MCS and has
+ * more than 80% of per thresh, then use a legacy rate
+ * as last retry to ensure that the frame is tried in both
+ * MCS and legacy rate.
*/
- if ((rates[2].flags & IEEE80211_TX_RC_MCS) &&
- (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) ||
- (ath_rc_priv->per[high_rix] > 45)))
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
+ if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
+ (ath_rc_priv->per[rix] > 45))
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
&is_probe, true);
- else
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
/* All other rates in the series have RTS enabled */
ath_rc_rate_set_series(rate_table, &rates[i], txrc,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index e1fcc68..12aca02 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -20,43 +20,6 @@
#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
-static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
- int mindelta, int main_rssi_avg,
- int alt_rssi_avg, int pkt_count)
-{
- return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
- (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
-}
-
-static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
- int curr_main_set, int curr_alt_set,
- int alt_rssi_avg, int main_rssi_avg)
-{
- bool result = false;
- switch (div_group) {
- case 0:
- if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
- result = true;
- break;
- case 1:
- case 2:
- if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
- (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
- (alt_rssi_avg >= (main_rssi_avg - 5))) ||
- ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
- (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
- (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
- (alt_rssi_avg >= 4))
- result = true;
- else
- result = false;
- break;
- }
-
- return result;
-}
-
static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
{
return sc->ps_enabled &&
@@ -303,7 +266,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
+ ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
spin_unlock_bh(&sc->rx.rxbuflock);
}
@@ -322,8 +285,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
- sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
+ clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
sc->sc_ah->caps.rx_status_len;
@@ -467,6 +430,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
}
+ if (AR_SREV_9550(sc->sc_ah))
+ rfilt |= ATH9K_RX_FILTER_4ADDRESS;
+
return rfilt;
}
@@ -500,7 +466,7 @@ int ath_startrecv(struct ath_softc *sc)
start_recv:
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
+ ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
spin_unlock_bh(&sc->rx.rxbuflock);
@@ -535,11 +501,11 @@ bool ath_stoprecv(struct ath_softc *sc)
void ath_flushrecv(struct ath_softc *sc)
{
- sc->sc_flags |= SC_OP_RXFLUSH;
+ set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true);
ath_rx_tasklet(sc, 1, false);
- sc->sc_flags &= ~SC_OP_RXFLUSH;
+ clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
}
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
@@ -587,7 +553,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, PS,
"Reconfigure Beacon timers based on timestamp from the AP\n");
- ath_set_beacon(sc);
+ ath9k_set_beacon(sc);
}
if (ath_beacon_dtim_pending_cab(skb)) {
@@ -624,13 +590,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
/* Process Beacon and CAB receive in PS state */
if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
- && mybeacon)
+ && mybeacon) {
ath_rx_ps_beacon(sc, skb);
- else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
- (ieee80211_is_data(hdr->frame_control) ||
- ieee80211_is_action(hdr->frame_control)) &&
- is_multicast_ether_addr(hdr->addr1) &&
- !ieee80211_has_moredata(hdr->frame_control)) {
+ } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
+ (ieee80211_is_data(hdr->frame_control) ||
+ ieee80211_is_action(hdr->frame_control)) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ !ieee80211_has_moredata(hdr->frame_control)) {
/*
* No more broadcast/multicast frames to be received at this
* point.
@@ -695,9 +661,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
__skb_unlink(skb, &rx_edma->rx_fifo);
list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
- } else {
- bf = NULL;
}
+
+ bf = NULL;
}
*dest = bf;
@@ -822,7 +788,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
* descriptor does contain a valid key index. This has been observed
* mostly with CCMP encryption.
*/
- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
if (!rx_stats->rs_datalen) {
@@ -1067,709 +1034,6 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
rxs->flag &= ~RX_FLAG_DECRYPTED;
}
-static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
- struct ath_hw_antcomb_conf ant_conf,
- int main_rssi_avg)
-{
- antcomb->quick_scan_cnt = 0;
-
- if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
- antcomb->rssi_lna2 = main_rssi_avg;
- else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
- antcomb->rssi_lna1 = main_rssi_avg;
-
- switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
- case 0x10: /* LNA2 A-B */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
- break;
- case 0x20: /* LNA1 A-B */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
- break;
- case 0x21: /* LNA1 LNA2 */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->second_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case 0x12: /* LNA2 LNA1 */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->second_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case 0x13: /* LNA2 A+B */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
- break;
- case 0x23: /* LNA1 A+B */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
- break;
- default:
- break;
- }
-}
-
-static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
- struct ath_hw_antcomb_conf *div_ant_conf,
- int main_rssi_avg, int alt_rssi_avg,
- int alt_ratio)
-{
- /* alt_good */
- switch (antcomb->quick_scan_cnt) {
- case 0:
- /* set alt to main, and alt to first conf */
- div_ant_conf->main_lna_conf = antcomb->main_conf;
- div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
- break;
- case 1:
- /* set alt to main, and alt to first conf */
- div_ant_conf->main_lna_conf = antcomb->main_conf;
- div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
- antcomb->rssi_first = main_rssi_avg;
- antcomb->rssi_second = alt_rssi_avg;
-
- if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
- /* main is LNA1 */
- if (ath_is_alt_ant_ratio_better(alt_ratio,
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
- ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
- main_rssi_avg, alt_rssi_avg,
- antcomb->total_pkt_count))
- antcomb->first_ratio = true;
- else
- antcomb->first_ratio = false;
- } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
- ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
- ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
- main_rssi_avg, alt_rssi_avg,
- antcomb->total_pkt_count))
- antcomb->first_ratio = true;
- else
- antcomb->first_ratio = false;
- } else {
- if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg +
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
- (alt_rssi_avg > main_rssi_avg)) &&
- (antcomb->total_pkt_count > 50))
- antcomb->first_ratio = true;
- else
- antcomb->first_ratio = false;
- }
- break;
- case 2:
- antcomb->alt_good = false;
- antcomb->scan_not_start = false;
- antcomb->scan = false;
- antcomb->rssi_first = main_rssi_avg;
- antcomb->rssi_third = alt_rssi_avg;
-
- if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
- antcomb->rssi_lna1 = alt_rssi_avg;
- else if (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- antcomb->rssi_lna2 = alt_rssi_avg;
- else if (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
- if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
- antcomb->rssi_lna2 = main_rssi_avg;
- else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
- antcomb->rssi_lna1 = main_rssi_avg;
- }
-
- if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
- div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
- else
- div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
-
- if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
- ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
- main_rssi_avg, alt_rssi_avg,
- antcomb->total_pkt_count))
- antcomb->second_ratio = true;
- else
- antcomb->second_ratio = false;
- } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
- ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
- ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
- main_rssi_avg, alt_rssi_avg,
- antcomb->total_pkt_count))
- antcomb->second_ratio = true;
- else
- antcomb->second_ratio = false;
- } else {
- if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg +
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
- (alt_rssi_avg > main_rssi_avg)) &&
- (antcomb->total_pkt_count > 50))
- antcomb->second_ratio = true;
- else
- antcomb->second_ratio = false;
- }
-
- /* set alt to the conf with maximun ratio */
- if (antcomb->first_ratio && antcomb->second_ratio) {
- if (antcomb->rssi_second > antcomb->rssi_third) {
- /* first alt*/
- if ((antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2*/
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->first_quick_scan_conf;
- } else if ((antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2)) {
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- } else {
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->second_quick_scan_conf;
- }
- } else if (antcomb->first_ratio) {
- /* first alt */
- if ((antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->first_quick_scan_conf;
- } else if (antcomb->second_ratio) {
- /* second alt */
- if ((antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->second_quick_scan_conf;
- } else {
- /* main is largest */
- if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf = antcomb->main_conf;
- }
- break;
- default:
- break;
- }
-}
-
-static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
- struct ath_ant_comb *antcomb, int alt_ratio)
-{
- if (ant_conf->div_group == 0) {
- /* Adjust the fast_div_bias based on main and alt lna conf */
- switch ((ant_conf->main_lna_conf << 4) |
- ant_conf->alt_lna_conf) {
- case 0x01: /* A-B LNA2 */
- ant_conf->fast_div_bias = 0x3b;
- break;
- case 0x02: /* A-B LNA1 */
- ant_conf->fast_div_bias = 0x3d;
- break;
- case 0x03: /* A-B A+B */
- ant_conf->fast_div_bias = 0x1;
- break;
- case 0x10: /* LNA2 A-B */
- ant_conf->fast_div_bias = 0x7;
- break;
- case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x2;
- break;
- case 0x13: /* LNA2 A+B */
- ant_conf->fast_div_bias = 0x7;
- break;
- case 0x20: /* LNA1 A-B */
- ant_conf->fast_div_bias = 0x6;
- break;
- case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x0;
- break;
- case 0x23: /* LNA1 A+B */
- ant_conf->fast_div_bias = 0x6;
- break;
- case 0x30: /* A+B A-B */
- ant_conf->fast_div_bias = 0x1;
- break;
- case 0x31: /* A+B LNA2 */
- ant_conf->fast_div_bias = 0x3b;
- break;
- case 0x32: /* A+B LNA1 */
- ant_conf->fast_div_bias = 0x3d;
- break;
- default:
- break;
- }
- } else if (ant_conf->div_group == 1) {
- /* Adjust the fast_div_bias based on main and alt_lna_conf */
- switch ((ant_conf->main_lna_conf << 4) |
- ant_conf->alt_lna_conf) {
- case 0x01: /* A-B LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x02: /* A-B LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x03: /* A-B A+B */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x10: /* LNA2 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x3f;
- else
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x13: /* LNA2 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x3f;
- else
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x20: /* LNA1 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x3f;
- else
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x23: /* LNA1 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x3f;
- else
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x30: /* A+B A-B */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x31: /* A+B LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x32: /* A+B LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- default:
- break;
- }
- } else if (ant_conf->div_group == 2) {
- /* Adjust the fast_div_bias based on main and alt_lna_conf */
- switch ((ant_conf->main_lna_conf << 4) |
- ant_conf->alt_lna_conf) {
- case 0x01: /* A-B LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x02: /* A-B LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x03: /* A-B A+B */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x10: /* LNA2 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x1;
- else
- ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x13: /* LNA2 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x1;
- else
- ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x20: /* LNA1 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x1;
- else
- ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x23: /* LNA1 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x1;
- else
- ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x30: /* A+B A-B */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x31: /* A+B LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x32: /* A+B LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- default:
- break;
- }
- }
-}
-
-/* Antenna diversity and combining */
-static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
-{
- struct ath_hw_antcomb_conf div_ant_conf;
- struct ath_ant_comb *antcomb = &sc->ant_comb;
- int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
- int curr_main_set;
- int main_rssi = rs->rs_rssi_ctl0;
- int alt_rssi = rs->rs_rssi_ctl1;
- int rx_ant_conf, main_ant_conf;
- bool short_scan = false;
-
- rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
- ATH_ANT_RX_MASK;
- main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
- ATH_ANT_RX_MASK;
-
- /* Record packet only when both main_rssi and alt_rssi is positive */
- if (main_rssi > 0 && alt_rssi > 0) {
- antcomb->total_pkt_count++;
- antcomb->main_total_rssi += main_rssi;
- antcomb->alt_total_rssi += alt_rssi;
- if (main_ant_conf == rx_ant_conf)
- antcomb->main_recv_cnt++;
- else
- antcomb->alt_recv_cnt++;
- }
-
- /* Short scan check */
- if (antcomb->scan && antcomb->alt_good) {
- if (time_after(jiffies, antcomb->scan_start_time +
- msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
- short_scan = true;
- else
- if (antcomb->total_pkt_count ==
- ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
- alt_ratio = ((antcomb->alt_recv_cnt * 100) /
- antcomb->total_pkt_count);
- if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
- short_scan = true;
- }
- }
-
- if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
- rs->rs_moreaggr) && !short_scan)
- return;
-
- if (antcomb->total_pkt_count) {
- alt_ratio = ((antcomb->alt_recv_cnt * 100) /
- antcomb->total_pkt_count);
- main_rssi_avg = (antcomb->main_total_rssi /
- antcomb->total_pkt_count);
- alt_rssi_avg = (antcomb->alt_total_rssi /
- antcomb->total_pkt_count);
- }
-
-
- ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
- curr_alt_set = div_ant_conf.alt_lna_conf;
- curr_main_set = div_ant_conf.main_lna_conf;
-
- antcomb->count++;
-
- if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
- if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
- ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
- main_rssi_avg);
- antcomb->alt_good = true;
- } else {
- antcomb->alt_good = false;
- }
-
- antcomb->count = 0;
- antcomb->scan = true;
- antcomb->scan_not_start = true;
- }
-
- if (!antcomb->scan) {
- if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
- alt_ratio, curr_main_set, curr_alt_set,
- alt_rssi_avg, main_rssi_avg)) {
- if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
- /* Switch main and alt LNA */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
-
- goto div_comb_done;
- } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
- (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
- /* Set alt to another LNA */
- if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
-
- goto div_comb_done;
- }
-
- if ((alt_rssi_avg < (main_rssi_avg +
- div_ant_conf.lna1_lna2_delta)))
- goto div_comb_done;
- }
-
- if (!antcomb->scan_not_start) {
- switch (curr_alt_set) {
- case ATH_ANT_DIV_COMB_LNA2:
- antcomb->rssi_lna2 = alt_rssi_avg;
- antcomb->rssi_lna1 = main_rssi_avg;
- antcomb->scan = true;
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1:
- antcomb->rssi_lna1 = alt_rssi_avg;
- antcomb->rssi_lna2 = main_rssi_avg;
- antcomb->scan = true;
- /* set to A+B */
- div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
- antcomb->rssi_add = alt_rssi_avg;
- antcomb->scan = true;
- /* set to A-B */
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
- antcomb->rssi_sub = alt_rssi_avg;
- antcomb->scan = false;
- if (antcomb->rssi_lna2 >
- (antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
- /* use LNA2 as main LNA */
- if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
- (antcomb->rssi_add > antcomb->rssi_sub)) {
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- } else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
- /* set to A-B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- } else {
- /* set to LNA1 */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- }
- } else {
- /* use LNA1 as main LNA */
- if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
- (antcomb->rssi_add > antcomb->rssi_sub)) {
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- } else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
- /* set to A-B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- } else {
- /* set to LNA2 */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
- }
- break;
- default:
- break;
- }
- } else {
- if (!antcomb->alt_good) {
- antcomb->scan_not_start = false;
- /* Set alt to another LNA */
- if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
- goto div_comb_done;
- }
- }
-
- ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
- main_rssi_avg, alt_rssi_avg,
- alt_ratio);
-
- antcomb->quick_scan_cnt++;
-
-div_comb_done:
- ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
- ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
-
- antcomb->scan_start_time = jiffies;
- antcomb->total_pkt_count = 0;
- antcomb->main_total_rssi = 0;
- antcomb->alt_total_rssi = 0;
- antcomb->main_recv_cnt = 0;
- antcomb->alt_recv_cnt = 0;
-}
-
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_buf *bf;
@@ -1803,7 +1067,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
do {
/* If handling rx interrupt and flush is in progress => exit */
- if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
+ if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
break;
memset(&rs, 0, sizeof(rs));
@@ -1841,13 +1105,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
else
rs.is_mybeacon = false;
+ sc->rx.num_pkts++;
ath_debug_stat_rx(sc, &rs);
/*
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
*/
- if (sc->sc_flags & SC_OP_RXFLUSH) {
+ if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
RX_STAT_INC(rx_drop_rxflush);
goto requeue_drop_frag;
}
@@ -1968,7 +1233,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb_trim(skb, skb->len - 8);
spin_lock_irqsave(&sc->sc_pm_lock, flags);
-
if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA)) ||
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 458f81b..87cac8e 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -696,9 +696,12 @@
#define AR_WA_BIT7 (1 << 7)
#define AR_WA_BIT23 (1 << 23)
#define AR_WA_D3_L1_DISABLE (1 << 14)
+#define AR_WA_UNTIE_RESET_EN (1 << 15) /* Enable PCI Reset
+ to POR (power-on-reset) */
#define AR_WA_D3_TO_L1_DISABLE_REAL (1 << 16)
#define AR_WA_ASPM_TIMER_BASED_DISABLE (1 << 17)
-#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
+#define AR_WA_RESET_EN (1 << 18) /* Enable PCI-Reset to
+ POR (bit 15) */
#define AR_WA_ANALOG_SHIFT (1 << 20)
#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
#define AR_WA_BIT22 (1 << 22)
@@ -798,6 +801,7 @@
#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
#define AR_SREV_VERSION_9462 0x280
#define AR_SREV_REVISION_9462_20 2
+#define AR_SREV_VERSION_9550 0x400
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -905,6 +909,9 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
+#define AR_SREV_9550(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
+
#define AR_SREV_9580(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
@@ -1028,6 +1035,8 @@ enum {
#define AR_PCIE_PM_CTRL (AR_SREV_9340(ah) ? 0x4004 : 0x4014)
#define AR_PCIE_PM_CTRL_ENA 0x00080000
+#define AR_PCIE_PHY_REG3 0x18c08
+
#define AR_NUM_GPIO 14
#define AR928X_NUM_GPIO 10
#define AR9285_NUM_GPIO 12
@@ -1231,6 +1240,8 @@ enum {
#define AR_RTC_PLL_CLKSEL 0x00000300
#define AR_RTC_PLL_CLKSEL_S 8
#define AR_RTC_PLL_BYPASS 0x00010000
+#define AR_RTC_PLL_NOPWD 0x00040000
+#define AR_RTC_PLL_NOPWD_S 18
#define PLL3 0x16188
#define PLL3_DO_MEAS_MASK 0x40000000
@@ -1643,11 +1654,11 @@ enum {
#define AR_TPC 0x80e8
#define AR_TPC_ACK 0x0000003f
-#define AR_TPC_ACK_S 0x00
+#define AR_TPC_ACK_S 0
#define AR_TPC_CTS 0x00003f00
-#define AR_TPC_CTS_S 0x08
+#define AR_TPC_CTS_S 8
#define AR_TPC_CHIRP 0x003f0000
-#define AR_TPC_CHIRP_S 0x16
+#define AR_TPC_CHIRP_S 16
#define AR_QUIET1 0x80fc
#define AR_QUIET1_NEXT_QUIET_S 0
@@ -1883,6 +1894,8 @@ enum {
#define AR_PCU_MISC_MODE2_HWWAR2 0x02000000
#define AR_PCU_MISC_MODE2_RESERVED2 0xFFFE0000
+#define AR_PCU_MISC_MODE3 0x83d0
+
#define AR_MAC_PCU_ASYNC_FIFO_REG3 0x8358
#define AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL 0x00000400
#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000
@@ -1905,6 +1918,140 @@ enum {
#define AR_RATE_DURATION_32 0x8780
#define AR_RATE_DURATION(_n) (AR_RATE_DURATION_0 + ((_n)<<2))
+/* WoW - Wake On Wireless */
+
+#define AR_PMCTRL_AUX_PWR_DET 0x10000000 /* Puts Chip in L2 state */
+#define AR_PMCTRL_D3COLD_VAUX 0x00800000
+#define AR_PMCTRL_HOST_PME_EN 0x00400000 /* Send OOB WAKE_L on WoW
+ event */
+#define AR_PMCTRL_WOW_PME_CLR 0x00200000 /* Clear WoW event */
+#define AR_PMCTRL_PWR_STATE_MASK 0x0f000000 /* Power State Mask */
+#define AR_PMCTRL_PWR_STATE_D1D3 0x0f000000 /* Activate D1 and D3 */
+#define AR_PMCTRL_PWR_STATE_D1D3_REAL 0x0f000000 /* Activate D1 and D3 */
+#define AR_PMCTRL_PWR_STATE_D0 0x08000000 /* Activate D0 */
+#define AR_PMCTRL_PWR_PM_CTRL_ENA 0x00008000 /* Enable power mgmt */
+
+#define AR_WOW_BEACON_TIMO_MAX 0xffffffff
+
+/*
+ * MAC WoW Registers
+ */
+
+#define AR_WOW_PATTERN 0x825C
+#define AR_WOW_COUNT 0x8260
+#define AR_WOW_BCN_EN 0x8270
+#define AR_WOW_BCN_TIMO 0x8274
+#define AR_WOW_KEEP_ALIVE_TIMO 0x8278
+#define AR_WOW_KEEP_ALIVE 0x827c
+#define AR_WOW_US_SCALAR 0x8284
+#define AR_WOW_KEEP_ALIVE_DELAY 0x8288
+#define AR_WOW_PATTERN_MATCH 0x828c
+#define AR_WOW_PATTERN_OFF1 0x8290 /* pattern bytes 0 -> 3 */
+#define AR_WOW_PATTERN_OFF2 0x8294 /* pattern bytes 4 -> 7 */
+
+/* for AR9285 or later version of chips */
+#define AR_WOW_EXACT 0x829c
+#define AR_WOW_LENGTH1 0x8360
+#define AR_WOW_LENGTH2 0X8364
+/* register to enable match for less than 256 bytes packets */
+#define AR_WOW_PATTERN_MATCH_LT_256B 0x8368
+
+#define AR_SW_WOW_CONTROL 0x20018
+#define AR_SW_WOW_ENABLE 0x1
+#define AR_SWITCH_TO_REFCLK 0x2
+#define AR_RESET_CONTROL 0x4
+#define AR_RESET_VALUE_MASK 0x8
+#define AR_HW_WOW_DISABLE 0x10
+#define AR_CLR_MAC_INTERRUPT 0x20
+#define AR_CLR_KA_INTERRUPT 0x40
+
+/* AR_WOW_PATTERN register values */
+#define AR_WOW_BACK_OFF_SHIFT(x) ((x & 0xf) << 28) /* in usecs */
+#define AR_WOW_MAC_INTR_EN 0x00040000
+#define AR_WOW_MAGIC_EN 0x00010000
+#define AR_WOW_PATTERN_EN(x) (x & 0xff)
+#define AR_WOW_PAT_FOUND_SHIFT 8
+#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
+#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
+#define AR_WOW_MAGIC_PAT_FOUND 0x00020000
+#define AR_WOW_MAC_INTR 0x00080000
+#define AR_WOW_KEEP_ALIVE_FAIL 0x00100000
+#define AR_WOW_BEACON_FAIL 0x00200000
+
+#define AR_WOW_STATUS(x) (x & (AR_WOW_PATTERN_FOUND_MASK | \
+ AR_WOW_MAGIC_PAT_FOUND | \
+ AR_WOW_KEEP_ALIVE_FAIL | \
+ AR_WOW_BEACON_FAIL))
+#define AR_WOW_CLEAR_EVENTS(x) (x & ~(AR_WOW_PATTERN_EN(0xff) | \
+ AR_WOW_MAGIC_EN | \
+ AR_WOW_MAC_INTR_EN | \
+ AR_WOW_BEACON_FAIL | \
+ AR_WOW_KEEP_ALIVE_FAIL))
+
+/* AR_WOW_COUNT register values */
+#define AR_WOW_AIFS_CNT(x) (x & 0xff)
+#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
+#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)
+
+/* AR_WOW_BCN_EN register */
+#define AR_WOW_BEACON_FAIL_EN 0x00000001
+
+/* AR_WOW_BCN_TIMO rgister */
+#define AR_WOW_BEACON_TIMO 0x40000000 /* valid if BCN_EN is set */
+
+/* AR_WOW_KEEP_ALIVE_TIMO register */
+#define AR_WOW_KEEP_ALIVE_TIMO_VALUE
+#define AR_WOW_KEEP_ALIVE_NEVER 0xffffffff
+
+/* AR_WOW_KEEP_ALIVE register */
+#define AR_WOW_KEEP_ALIVE_AUTO_DIS 0x00000001
+#define AR_WOW_KEEP_ALIVE_FAIL_DIS 0x00000002
+
+/* AR_WOW_KEEP_ALIVE_DELAY register */
+#define AR_WOW_KEEP_ALIVE_DELAY_VALUE 0x000003e8 /* 1 msec */
+
+
+/*
+ * keep it long for beacon workaround - ensure no false alarm
+ */
+#define AR_WOW_BMISSTHRESHOLD 0x20
+
+/* AR_WOW_PATTERN_MATCH register */
+#define AR_WOW_PAT_END_OF_PKT(x) (x & 0xf)
+#define AR_WOW_PAT_OFF_MATCH(x) ((x & 0xf) << 8)
+
+/*
+ * default values for Wow Configuration for backoff, aifs, slot, keep-alive
+ * to be programmed into various registers.
+ */
+#define AR_WOW_PAT_BACKOFF 0x00000004 /* AR_WOW_PATTERN_REG */
+#define AR_WOW_CNT_AIFS_CNT 0x00000022 /* AR_WOW_COUNT_REG */
+#define AR_WOW_CNT_SLOT_CNT 0x00000009 /* AR_WOW_COUNT_REG */
+/*
+ * Keepalive count applicable for AR9280 2.0 and above.
+ */
+#define AR_WOW_CNT_KA_CNT 0x00000008 /* AR_WOW_COUNT register */
+
+/* WoW - Transmit buffer for keep alive frames */
+#define AR_WOW_TRANSMIT_BUFFER 0xe000 /* E000 - EFFC */
+
+#define AR_WOW_TXBUF(i) (AR_WOW_TRANSMIT_BUFFER + ((i) << 2))
+
+#define AR_WOW_KA_DESC_WORD2 0xe000
+
+#define AR_WOW_KA_DATA_WORD0 0xe030
+
+/* WoW Transmit Buffer for patterns */
+#define AR_WOW_TB_PATTERN(i) (0xe100 + (i << 8))
+#define AR_WOW_TB_MASK(i) (0xec00 + (i << 5))
+
+/* Currently Pattern 0-7 are supported - so bit 0-7 are set */
+#define AR_WOW_PATTERN_SUPPORTED 0xff
+#define AR_WOW_LENGTH_MAX 0xff
+#define AR_WOW_LEN1_SHIFT(_i) ((0x3 - ((_i) & 0x3)) << 0x3)
+#define AR_WOW_LENGTH1_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN1_SHIFT(_i))
+#define AR_WOW_LEN2_SHIFT(_i) ((0x7 - ((_i) & 0x7)) << 0x3)
+#define AR_WOW_LENGTH2_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN2_SHIFT(_i))
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
@@ -2077,12 +2224,6 @@ enum {
AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
- AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
- AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
#define AR_MCI_CPU_INT 0x1840
@@ -2098,8 +2239,8 @@ enum {
#define AR_MCI_CONT_STATUS 0x1848
#define AR_MCI_CONT_RSSI_POWER 0x000000FF
#define AR_MCI_CONT_RSSI_POWER_S 0
-#define AR_MCI_CONT_RRIORITY 0x0000FF00
-#define AR_MCI_CONT_RRIORITY_S 8
+#define AR_MCI_CONT_PRIORITY 0x0000FF00
+#define AR_MCI_CONT_PRIORITY_S 8
#define AR_MCI_CONT_TXRX 0x00010000
#define AR_MCI_CONT_TXRX_S 16
@@ -2162,10 +2303,6 @@ enum {
#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
-#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
-#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
-#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
-#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
#define AR_BTCOEX_WL_LNA 0x1940
#define AR_BTCOEX_RFGAIN_CTRL 0x1944
@@ -2211,5 +2348,7 @@ enum {
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
+#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
+#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
#endif
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
new file mode 100644
index 0000000..44a08eb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "ath9k.h"
+#include "reg.h"
+#include "hw-ops.h"
+
+const char *ath9k_hw_wow_event_to_string(u32 wow_event)
+{
+ if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
+ return "Magic pattern";
+ if (wow_event & AH_WOW_USER_PATTERN_EN)
+ return "User pattern";
+ if (wow_event & AH_WOW_LINK_CHANGE)
+ return "Link change";
+ if (wow_event & AH_WOW_BEACON_MISS)
+ return "Beacon miss";
+
+ return "unknown reason";
+}
+EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
+
+static void ath9k_hw_config_serdes_wow_sleep(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < ah->iniPcieSerdesWow.ia_rows; i++)
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdesWow, i, 0),
+ INI_RA(&ah->iniPcieSerdesWow, i, 1));
+
+ usleep_range(1000, 1500);
+}
+
+static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+
+ /* set rx disable bit */
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+
+ if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
+ return;
+ } else {
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_RXDP, 0x0);
+ }
+
+ /* AR9280 WoW has sleep issue, do not set it to sleep */
+ if (AR_SREV_9280_20(ah))
+ return;
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
+}
+
+static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
+ u32 ctl[13] = {0};
+ u32 data_word[KAL_NUM_DATA_WORDS];
+ u8 i;
+ u32 wow_ka_data_word0;
+
+ memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
+ memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
+
+ /* set the transmit buffer */
+ ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
+
+ if (!(AR_SREV_9300_20_OR_LATER(ah)))
+ ctl[0] += (KAL_ANTENNA_MODE << 25);
+
+ ctl[1] = 0;
+ ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
+ ctl[4] = 0;
+ ctl[7] = (ah->txchainmask) << 2;
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ctl[2] = 0xf << 16; /* tx_tries 0 */
+ else
+ ctl[2] = 0x7 << 16; /* tx_tries 0 */
+
+
+ for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ /* for AR9300 family 13 descriptor words */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
+ (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
+ data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
+ (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+ data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
+ (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
+ data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ /* AR9462 2.0 has an extra descriptor word (time based
+ * discard) compared to other chips */
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
+ wow_ka_data_word0 = AR_WOW_TXBUF(13);
+ } else {
+ wow_ka_data_word0 = AR_WOW_TXBUF(12);
+ }
+
+ for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
+ REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
+
+}
+
+void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len)
+{
+ int i;
+ u32 pattern_val, mask_val;
+ u32 set, clr;
+
+ /* FIXME: should check count by querying the hardware capability */
+ if (pattern_count >= MAX_NUM_PATTERN)
+ return;
+
+ REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
+
+ /* set the registers for pattern */
+ for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
+ memcpy(&pattern_val, user_pattern, 4);
+ REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
+ pattern_val);
+ user_pattern += 4;
+ }
+
+ /* set the registers for mask */
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
+ memcpy(&mask_val, user_mask, 4);
+ REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
+ user_mask += 4;
+ }
+
+ /* set the pattern length to be matched
+ *
+ * AR_WOW_LENGTH1_REG1
+ * bit 31:24 pattern 0 length
+ * bit 23:16 pattern 1 length
+ * bit 15:8 pattern 2 length
+ * bit 7:0 pattern 3 length
+ *
+ * AR_WOW_LENGTH1_REG2
+ * bit 31:24 pattern 4 length
+ * bit 23:16 pattern 5 length
+ * bit 15:8 pattern 6 length
+ * bit 7:0 pattern 7 length
+ *
+ * the below logic writes out the new
+ * pattern length for the corresponding
+ * pattern_count, while masking out the
+ * other fields
+ */
+
+ ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
+
+ if (!AR_SREV_9285_12_OR_LATER(ah))
+ return;
+
+ if (pattern_count < 4) {
+ /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN1_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH1_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
+ } else {
+ /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN2_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH2_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
+ }
+
+}
+EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+ u32 wow_status = 0;
+ u32 val = 0, rval;
+ /*
+ * read the WoW status register to know
+ * the wakeup reason
+ */
+ rval = REG_READ(ah, AR_WOW_PATTERN);
+ val = AR_WOW_STATUS(rval);
+
+ /*
+ * mask only the WoW events that we have enabled. Sometimes
+ * we have spurious WoW events from the AR_WOW_PATTERN
+ * register. This mask will clean it up.
+ */
+
+ val &= ah->wow_event_mask;
+
+ if (val) {
+
+ if (val & AR_WOW_MAGIC_PAT_FOUND)
+ wow_status |= AH_WOW_MAGIC_PATTERN_EN;
+
+ if (AR_WOW_PATTERN_FOUND(val))
+ wow_status |= AH_WOW_USER_PATTERN_EN;
+
+ if (val & AR_WOW_KEEP_ALIVE_FAIL)
+ wow_status |= AH_WOW_LINK_CHANGE;
+
+ if (val & AR_WOW_BEACON_FAIL)
+ wow_status |= AH_WOW_BEACON_MISS;
+
+ }
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip to
+ * generate next wow signal.
+ * disable D3 before accessing other registers ?
+ */
+
+ /* do we need to check the bit value 0x01000000 (7-10) ?? */
+ REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
+ AR_PMCTRL_PWR_STATE_D1D3);
+
+ /*
+ * clear all events
+ */
+ REG_WRITE(ah, AR_WOW_PATTERN,
+ AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+
+ /*
+ * tie reset register for AR9002 family of chipsets
+ * NB: not tieing it back might have some repurcussions.
+ */
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_WA, AR_WA_UNTIE_RESET_EN |
+ AR_WA_POR_SHORT | AR_WA_RESET_EN);
+ }
+
+
+ /*
+ * restore the beacon threshold to init value
+ */
+ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+ /*
+ * Restore the way the PCI-E reset, Power-On-Reset, external
+ * PCIE_POR_SHORT pins are tied to its original value.
+ * Previously just before WoW sleep, we untie the PCI-E
+ * reset to our Chip's Power On Reset so that any PCI-E
+ * reset from the bus will not reset our chip
+ */
+
+ if (AR_SREV_9280_20_OR_LATER(ah) && ah->is_pciexpress)
+ ath9k_hw_configpcipowersave(ah, false);
+
+ ah->wow_event_mask = 0;
+
+ return wow_status;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
+
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+ u32 wow_event_mask;
+ u32 set, clr;
+
+ /*
+ * wow_event_mask is a mask to the AR_WOW_PATTERN register to
+ * indicate which WoW events we have enabled. The WoW events
+ * are from the 'pattern_enable' in this function and
+ * 'pattern_count' of ath9k_hw_wow_apply_pattern()
+ */
+
+ wow_event_mask = ah->wow_event_mask;
+
+ /*
+ * Untie Power-on-Reset from the PCI-E-Reset. When we are in
+ * WOW sleep, we do want the Reset from the PCI-E to disturb
+ * our hw state
+ */
+
+ if (ah->is_pciexpress) {
+
+ /*
+ * we need to untie the internal POR (power-on-reset)
+ * to the external PCI-E reset. We also need to tie
+ * the PCI-E Phy reset to the PCI-E reset.
+ */
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
+ clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
+ REG_RMW(ah, AR_WA, set, clr);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
+ set = AR9285_WA_DEFAULT;
+ else
+ set = AR9280_WA_DEFAULT;
+
+ /*
+ * In AR9280 and AR9285, bit 14 in WA register
+ * (disable L1) should only be set when device
+ * enters D3 state and be cleared when device
+ * comes back to D0
+ */
+
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ set |= AR_WA_D3_L1_DISABLE;
+
+ clr = AR_WA_UNTIE_RESET_EN;
+ set |= AR_WA_RESET_EN | AR_WA_POR_SHORT;
+ REG_RMW(ah, AR_WA, set, clr);
+
+ /*
+ * for WoW sleep, we reprogram the SerDes so that the
+ * PLL and CLK REQ are both enabled. This uses more
+ * power but otherwise WoW sleep is unstable and the
+ * chip may disappear.
+ */
+
+ if (AR_SREV_9285_12_OR_LATER(ah))
+ ath9k_hw_config_serdes_wow_sleep(ah);
+
+ }
+ }
+
+ /*
+ * set the power states appropriately and enable PME
+ */
+ set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
+ AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip
+ * to generate next wow signal.
+ */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ clr = AR_PMCTRL_WOW_PME_CLR;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+ /*
+ * Setup for:
+ * - beacon misses
+ * - magic pattern
+ * - keep alive timeout
+ * - pattern matching
+ */
+
+ /*
+ * Program default values for pattern backoff, aifs/slot/KAL count,
+ * beacon miss timeout, KAL timeout, etc.
+ */
+
+ set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
+ REG_SET_BIT(ah, AR_WOW_PATTERN, set);
+
+ set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
+ AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
+ AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
+ REG_SET_BIT(ah, AR_WOW_COUNT, set);
+
+ if (pattern_enable & AH_WOW_BEACON_MISS)
+ set = AR_WOW_BEACON_TIMO;
+ /* We are not using beacon miss, program a large value */
+ else
+ set = AR_WOW_BEACON_TIMO_MAX;
+
+ REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+
+ /*
+ * Keep alive timo in ms except AR9280
+ */
+ if (!pattern_enable || AR_SREV_9280(ah))
+ set = AR_WOW_KEEP_ALIVE_NEVER;
+ else
+ set = KAL_TIMEOUT * 32;
+
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+
+ /*
+ * Keep alive delay in us. based on 'power on clock',
+ * therefore in usec
+ */
+ set = KAL_DELAY * 1000;
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+
+ /*
+ * Create keep alive pattern to respond to beacons
+ */
+ ath9k_wow_create_keep_alive_pattern(ah);
+
+ /*
+ * Configure MAC WoW Registers
+ */
+
+ set = 0;
+ /* Send keep alive timeouts anyway */
+ clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+
+ if (pattern_enable & AH_WOW_LINK_CHANGE)
+ wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
+ else
+ set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+
+ /*
+ * FIXME: For now disable keep alive frame
+ * failure. This seems to sometimes trigger
+ * unnecessary wake up with AR9485 chipsets.
+ */
+ set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+
+ REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+
+
+ /*
+ * we are relying on a bmiss failure. ensure we have
+ * enough threshold to prevent false positives
+ */
+ REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
+ AR_WOW_BMISSTHRESHOLD);
+
+ set = 0;
+ clr = 0;
+
+ if (pattern_enable & AH_WOW_BEACON_MISS) {
+ set = AR_WOW_BEACON_FAIL_EN;
+ wow_event_mask |= AR_WOW_BEACON_FAIL;
+ } else {
+ clr = AR_WOW_BEACON_FAIL_EN;
+ }
+
+ REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
+
+ set = 0;
+ clr = 0;
+ /*
+ * Enable the magic packet registers
+ */
+ if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
+ set = AR_WOW_MAGIC_EN;
+ wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
+ } else {
+ clr = AR_WOW_MAGIC_EN;
+ }
+ set |= AR_WOW_MAC_INTR_EN;
+ REG_RMW(ah, AR_WOW_PATTERN, set, clr);
+
+ /*
+ * For AR9285 and later version of chipsets
+ * enable WoW pattern match for packets less
+ * than 256 bytes for all patterns
+ */
+ if (AR_SREV_9285_12_OR_LATER(ah))
+ REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
+ AR_WOW_PATTERN_SUPPORTED);
+
+ /*
+ * Set the power states appropriately and enable PME
+ */
+ clr = 0;
+ set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
+ AR_PMCTRL_PWR_PM_CTRL_ENA;
+ /*
+ * This is needed for AR9300 chipsets to wake-up
+ * the host.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ clr = AR_PCIE_PM_CTRL_ENA;
+
+ REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+
+ if (AR_SREV_9462(ah)) {
+ /*
+ * this is needed to prevent the chip waking up
+ * the host within 3-4 seconds with certain
+ * platform/BIOS. The fix is to enable
+ * D1 & D3 to match original definition and
+ * also match the OTP value. Anyway this
+ * is more related to SW WOW.
+ */
+ clr = AR_PMCTRL_PWR_STATE_D1D3;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+ set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ }
+
+
+
+ REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ /* to bring down WOW power low margin */
+ set = BIT(13);
+ REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
+ /* HW WoW */
+ clr = BIT(5);
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+ }
+
+ ath9k_hw_set_powermode_wow_sleep(ah);
+ ah->wow_event_mask = wow_event_mask;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 23eaa1b..2c9da6b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -29,6 +29,8 @@
#define HT_LTF(_ns) (4 * (_ns))
#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
+#define TIME_SYMBOLS(t) ((t) >> 2)
+#define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18)
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
@@ -64,7 +66,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ bool dequeue);
enum {
MCS_HT20,
@@ -73,50 +76,23 @@ enum {
MCS_HT40_SGI,
};
-static int ath_max_4ms_framelen[4][32] = {
- [MCS_HT20] = {
- 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
- 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
- 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
- 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
- },
- [MCS_HT20_SGI] = {
- 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
- 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
- 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
- 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
- },
- [MCS_HT40] = {
- 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
- 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
- 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
- 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
- },
- [MCS_HT40_SGI] = {
- 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
- 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
- 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
- 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
- }
-};
-
/*********************/
/* Aggregation logic */
/*********************/
-static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
__acquires(&txq->axq_lock)
{
spin_lock_bh(&txq->axq_lock);
}
-static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
__releases(&txq->axq_lock)
{
spin_unlock_bh(&txq->axq_lock);
}
-static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
+void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
__releases(&txq->axq_lock)
{
struct sk_buff_head q;
@@ -613,10 +589,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
rcu_read_unlock();
- if (needreset) {
- RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- }
+ if (needreset)
+ ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}
static bool ath_lookup_legacy(struct ath_buf *bf)
@@ -649,6 +623,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ieee80211_tx_rate *rates;
u32 max_4ms_framelen, frmlen;
u16 aggr_limit, bt_aggr_limit, legacy = 0;
+ int q = tid->ac->txq->mac80211_qnum;
int i;
skb = bf->bf_mpdu;
@@ -657,8 +632,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
/*
* Find the lowest frame length among the rate series that will have a
- * 4ms transmit duration.
- * TODO - TXOP limit needs to be considered.
+ * 4ms (or TXOP limited) transmit duration.
*/
max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
@@ -681,7 +655,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
modeidx++;
- frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
+ frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
max_4ms_framelen = min(max_4ms_framelen, frmlen);
}
@@ -811,7 +785,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
fi = get_frame_info(skb);
bf = fi->bf;
if (!fi->bf)
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
if (!bf)
continue;
@@ -928,6 +902,44 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
return duration;
}
+static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
+{
+ int streams = HT_RC_2_STREAMS(mcs);
+ int symbols, bits;
+ int bytes = 0;
+
+ symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
+ bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
+ bits -= OFDM_PLCP_BITS;
+ bytes = bits / 8;
+ bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
+ if (bytes > 65532)
+ bytes = 65532;
+
+ return bytes;
+}
+
+void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
+{
+ u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
+ int mcs;
+
+ /* 4ms is the default (and maximum) duration */
+ if (!txop || txop > 4096)
+ txop = 4096;
+
+ cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
+ cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
+ cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
+ cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
+ for (mcs = 0; mcs < 32; mcs++) {
+ cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
+ cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
+ cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
+ cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
+ }
+}
+
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_info *info, int len)
{
@@ -937,6 +949,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ieee80211_tx_rate *rates;
const struct ieee80211_rate *rate;
struct ieee80211_hdr *hdr;
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
int i;
u8 rix = 0;
@@ -947,18 +960,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
/* set dur_update_en for l-sig computation except for PS-Poll frames */
info->dur_update = !ieee80211_is_pspoll(hdr->frame_control);
-
- /*
- * We check if Short Preamble is needed for the CTS rate by
- * checking the BSS's global flag.
- * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
- */
- rate = ieee80211_get_rts_cts_rate(sc->hw, tx_info);
- info->rtscts_rate = rate->hw_value;
-
- if (tx_info->control.vif &&
- tx_info->control.vif->bss_conf.use_short_preamble)
- info->rtscts_rate |= rate->hw_value_short;
+ info->rtscts_rate = fi->rtscts_rate;
for (i = 0; i < 4; i++) {
bool is_40, is_sgi, is_sp;
@@ -1000,13 +1002,13 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
}
/* legacy rates */
+ rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
else
phy = WLAN_RC_PHY_OFDM;
- rate = &sc->sbands[tx_info->band].bitrates[rates[i].idx];
info->rates[i].Rate = rate->hw_value;
if (rate->hw_value_short) {
if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
@@ -1174,6 +1176,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
{
struct ath_atx_tid *txtid;
struct ath_node *an;
+ u8 density;
an = (struct ath_node *)sta->drv_priv;
txtid = ATH_AN_2_TID(an, tid);
@@ -1181,6 +1184,17 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
return -EAGAIN;
+ /* update ampdu factor/density, they may have changed. This may happen
+ * in HT IBSS when a beacon with HT-info is received after the station
+ * has already been added.
+ */
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor);
+ density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
+ an->mpdudensity = density;
+ }
+
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
@@ -1400,16 +1414,6 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int error = 0;
struct ath9k_tx_queue_info qi;
- if (qnum == sc->beacon.beaconq) {
- /*
- * XXX: for beacon queue, we just save the parameter.
- * It will be picked up by ath_beaconq_config when
- * it's necessary.
- */
- sc->beacon.beacon_qi = *qinfo;
- return 0;
- }
-
BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
ath9k_hw_get_txq_props(ah, qnum, &qi);
@@ -1535,7 +1539,7 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
int i;
u32 npend = 0;
- if (sc->sc_flags & SC_OP_INVALID)
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return true;
ath9k_hw_abort_tx_dma(ah);
@@ -1583,7 +1587,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
struct ath_atx_ac *ac, *ac_tmp, *last_ac;
struct ath_atx_tid *tid, *last_tid;
- if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
+ list_empty(&txq->axq_acq) ||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return;
@@ -1726,7 +1731,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
return;
}
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
return;
@@ -1753,7 +1758,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf)
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
if (!bf)
return;
@@ -1775,10 +1780,22 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_sta *sta = tx_info->control.sta;
struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ const struct ieee80211_rate *rate;
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_node *an = NULL;
enum ath9k_key_type keytype;
+ bool short_preamble = false;
+ /*
+ * We check if Short Preamble is needed for the CTS rate by
+ * checking the BSS's global flag.
+ * But for the rate series, IEEE80211_TX_RC_USE_SHORT_PREAMBLE is used.
+ */
+ if (tx_info->control.vif &&
+ tx_info->control.vif->bss_conf.use_short_preamble)
+ short_preamble = true;
+
+ rate = ieee80211_get_rts_cts_rate(hw, tx_info);
keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
if (sta)
@@ -1793,6 +1810,9 @@ static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
fi->keyix = ATH9K_TXKEYIX_INVALID;
fi->keytype = keytype;
fi->framelen = framelen;
+ fi->rtscts_rate = rate->hw_value;
+ if (short_preamble)
+ fi->rtscts_rate |= rate->hw_value_short;
}
u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
@@ -1814,7 +1834,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
struct ath_txq *txq,
struct ath_atx_tid *tid,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ bool dequeue)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1884,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
return bf;
error:
+ if (dequeue)
+ __skb_unlink(skb, &tid->buf_q);
dev_kfree_skb_any(skb);
return NULL;
}
@@ -1893,7 +1916,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
*/
ath_tx_send_ampdu(sc, tid, skb, txctl);
} else {
- bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
+ bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
if (!bf)
return;
@@ -1967,7 +1990,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
ath_txq_lock(sc, txq);
if (txq == sc->tx.txq_map[q] &&
- ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
+ ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
+ !txq->stopped) {
ieee80211_stop_queue(sc->hw, q);
txq->stopped = true;
}
@@ -1990,6 +2014,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
int q, padpos, padsize;
+ unsigned long flags;
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
@@ -2008,6 +2033,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
ath_dbg(common, PS,
@@ -2017,13 +2043,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK));
}
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
q = skb_get_queue_mapping(skb);
if (txq == sc->tx.txq_map[q]) {
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
- if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
+ if (txq->stopped &&
+ txq->pending_frames < sc->tx.txq_max_pending[q]) {
ieee80211_wake_queue(sc->hw, q);
txq->stopped = false;
}
@@ -2167,7 +2195,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_txq_lock(sc, txq);
for (;;) {
- if (work_pending(&sc->hw_reset_work))
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
break;
if (list_empty(&txq->axq_q)) {
@@ -2227,46 +2255,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_txq_unlock_complete(sc, txq);
}
-static void ath_tx_complete_poll_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- tx_complete_work.work);
- struct ath_txq *txq;
- int i;
- bool needreset = false;
-#ifdef CONFIG_ATH9K_DEBUGFS
- sc->tx_complete_poll_work_seen++;
-#endif
-
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i)) {
- txq = &sc->tx.txq[i];
- ath_txq_lock(sc, txq);
- if (txq->axq_depth) {
- if (txq->axq_tx_inprogress) {
- needreset = true;
- ath_txq_unlock(sc, txq);
- break;
- } else {
- txq->axq_tx_inprogress = true;
- }
- }
- ath_txq_unlock_complete(sc, txq);
- }
-
- if (needreset) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
- "tx hung, resetting the chip\n");
- RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- }
-
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
- msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
-}
-
-
-
void ath_tx_tasklet(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
@@ -2290,7 +2278,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
int status;
for (;;) {
- if (work_pending(&sc->hw_reset_work))
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
break;
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 0cea20e..376be11 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -289,6 +289,7 @@ struct ar9170 {
unsigned int mem_block_size;
unsigned int rx_size;
unsigned int tx_seq_table;
+ bool ba_filter;
} fw;
/* interface configuration combinations */
@@ -425,6 +426,10 @@ struct ar9170 {
struct sk_buff *rx_failover;
int rx_failover_missing;
+ /* FIFO for collecting outstanding BlockAckRequest */
+ struct list_head bar_list[__AR9170_NUM_TXQ];
+ spinlock_t bar_list_lock[__AR9170_NUM_TXQ];
+
#ifdef CONFIG_CARL9170_WPC
struct {
bool pbc_state;
@@ -468,6 +473,12 @@ enum carl9170_ps_off_override_reasons {
PS_OFF_BCN = BIT(1),
};
+struct carl9170_bar_list_entry {
+ struct list_head list;
+ struct rcu_head head;
+ struct sk_buff *skb;
+};
+
struct carl9170_ba_stats {
u8 ampdu_len;
u8 ampdu_ack_len;
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index 195dc65..39a6387 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -138,7 +138,7 @@ int carl9170_reboot(struct ar9170 *ar)
if (!cmd)
return -ENOMEM;
- err = __carl9170_exec_cmd(ar, (struct carl9170_cmd *)cmd, true);
+ err = __carl9170_exec_cmd(ar, cmd, true);
return err;
}
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 5c73c03..c5ca6f1 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -307,6 +307,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WOL))
device_set_wakeup_enable(&ar->udev->dev, true);
+ if (SUPP(CARL9170FW_RX_BA_FILTER))
+ ar->fw.ba_filter = true;
+
if_comb_types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 6d9c089..66848d4 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -78,6 +78,9 @@ enum carl9170fw_feature_list {
/* HW (ANI, CCA, MIB) tally counters */
CARL9170FW_HW_COUNTERS,
+ /* Firmware will pass BA when BARs are queued */
+ CARL9170FW_RX_BA_FILTER,
+
/* KEEP LAST */
__CARL9170FW_FEATURE_NUM
};
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 8d2523b..858e58d 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -949,6 +949,9 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
u32 rx_filter = 0;
+ if (!ar->fw.ba_filter)
+ rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
+
if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
rx_filter |= CARL9170_RX_FILTER_BAD;
@@ -1753,6 +1756,9 @@ void *carl9170_alloc(size_t priv_size)
for (i = 0; i < ar->hw->queues; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
+
+ INIT_LIST_HEAD(&ar->bar_list[i]);
+ spin_lock_init(&ar->bar_list_lock[i]);
}
INIT_WORK(&ar->ps_work, carl9170_ps_work);
INIT_WORK(&ar->ping_work, carl9170_ping_work);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 84b22ee..6f6a341 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -161,7 +161,7 @@ static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
{
- struct carl9170_rsp *cmd = (void *) buf;
+ struct carl9170_rsp *cmd = buf;
struct ieee80211_vif *vif;
if (carl9170_check_sequence(ar, cmd->hdr.seq))
@@ -520,7 +520,7 @@ static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
*/
static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
{
- struct ieee80211_hdr *hdr = (void *) data;
+ struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
u8 *tim;
u8 tim_len;
@@ -576,6 +576,53 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
}
}
+static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
+{
+ struct ieee80211_bar *bar = (void *) data;
+ struct carl9170_bar_list_entry *entry;
+ unsigned int queue;
+
+ if (likely(!ieee80211_is_back(bar->frame_control)))
+ return;
+
+ if (len <= sizeof(*bar) + FCS_LEN)
+ return;
+
+ queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT) & 7);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
+ struct sk_buff *entry_skb = entry->skb;
+ struct _carl9170_tx_superframe *super = (void *)entry_skb->data;
+ struct ieee80211_bar *entry_bar = (void *)super->frame_data;
+
+#define TID_CHECK(a, b) ( \
+ ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
+ ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
+
+ if (bar->start_seq_num == entry_bar->start_seq_num &&
+ TID_CHECK(bar->control, entry_bar->control) &&
+ compare_ether_addr(bar->ra, entry_bar->ta) == 0 &&
+ compare_ether_addr(bar->ta, entry_bar->ra) == 0) {
+ struct ieee80211_tx_info *tx_info;
+
+ tx_info = IEEE80211_SKB_CB(entry_skb);
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_del_rcu(&entry->list);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ kfree_rcu(entry, head);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+#undef TID_CHECK
+}
+
static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
{
__le16 fc;
@@ -738,6 +785,8 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
carl9170_ps_beacon(ar, buf, mpdu_len);
+ carl9170_ba_check(ar, buf, mpdu_len);
+
skb = carl9170_rx_copy_data(buf, mpdu_len);
if (!skb)
goto drop;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index aed3051..6a86814 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -277,11 +277,11 @@ static void carl9170_tx_release(struct kref *ref)
return;
BUILD_BUG_ON(
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
+ offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
- memset(&txinfo->status.ampdu_ack_len, 0,
+ memset(&txinfo->status.ack_signal, 0,
sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
+ offsetof(struct ieee80211_tx_info, status.ack_signal));
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
@@ -436,6 +436,45 @@ out_rcu:
rcu_read_unlock();
}
+static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
+ struct ieee80211_tx_info *tx_info)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_bar *bar = (void *) super->frame_data;
+
+ /*
+ * Unlike all other frames, the status report for BARs does
+ * not directly come from the hardware as it is incapable of
+ * matching a BA to a previously send BAR.
+ * Instead the RX-path will scan for incoming BAs and set the
+ * IEEE80211_TX_STAT_ACK if it sees one that was likely
+ * caused by a BAR from us.
+ */
+
+ if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
+ !(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
+ struct carl9170_bar_list_entry *entry;
+ int queue = skb_get_queue_mapping(skb);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
+ if (entry->skb == skb) {
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_del_rcu(&entry->list);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ kfree_rcu(entry, head);
+ goto out;
+ }
+ }
+
+ WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
+ queue, bar->ra, bar->ta, bar->control,
+ bar->start_seq_num);
+out:
+ rcu_read_unlock();
+ }
+}
+
void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
const bool success)
{
@@ -445,6 +484,8 @@ void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
txinfo = IEEE80211_SKB_CB(skb);
+ carl9170_tx_bar_status(ar, skb, txinfo);
+
if (success)
txinfo->flags |= IEEE80211_TX_STAT_ACK;
else
@@ -1265,6 +1306,26 @@ out_rcu:
return false;
}
+static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_bar *bar = (void *) super->frame_data;
+
+ if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
+ skb->len >= sizeof(struct ieee80211_bar)) {
+ struct carl9170_bar_list_entry *entry;
+ unsigned int queue = skb_get_queue_mapping(skb);
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!WARN_ON_ONCE(!entry)) {
+ entry->skb = skb;
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ }
+ }
+}
+
static void carl9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
@@ -1287,6 +1348,8 @@ static void carl9170_tx(struct ar9170 *ar)
if (unlikely(carl9170_tx_ps_drop(ar, skb)))
continue;
+ carl9170_bar_check(ar, skb);
+
atomic_inc(&ar->tx_total_pending);
q = __carl9170_get_queue(ar, i);
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index e651db8..2ec3e91 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
-#define CARL9170FW_VERSION_YEAR 11
-#define CARL9170FW_VERSION_MONTH 8
-#define CARL9170FW_VERSION_DAY 15
-#define CARL9170FW_VERSION_GIT "1.9.4"
+#define CARL9170FW_VERSION_YEAR 12
+#define CARL9170FW_VERSION_MONTH 7
+#define CARL9170FW_VERSION_DAY 7
+#define CARL9170FW_VERSION_GIT "1.9.6"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 0e81904..5c54aa4 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
return -EIO;
set_bit(idx, common->keymap);
+ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ set_bit(idx, common->ccmp_keymap);
+
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
set_bit(idx + 64, common->keymap);
set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
return;
clear_bit(key->hw_key_idx, common->keymap);
+ clear_bit(key->hw_key_idx, common->ccmp_keymap);
if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
return;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index d07c030..4a4e98f 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2952,10 +2952,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
/* current AP address - only in reassoc frame */
if (is_reassoc) {
memcpy(body.ap, priv->CurrentBSSID, 6);
- ssid_el_p = (u8 *)&body.ssid_el_id;
+ ssid_el_p = &body.ssid_el_id;
bodysize = 18 + priv->SSID_size;
} else {
- ssid_el_p = (u8 *)&body.ap[0];
+ ssid_el_p = &body.ap[0];
bodysize = 12 + priv->SSID_size;
}
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 67c13af..7c899fc 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -870,12 +870,9 @@ struct b43_wl {
* handler, only. This basically is just the IRQ mask register. */
spinlock_t hardirq_lock;
- /* The number of queues that were registered with the mac80211 subsystem
- * initially. This is a backup copy of hw->queues in case hw->queues has
- * to be dynamically lowered at runtime (Firmware does not support QoS).
- * hw->queues has to be restored to the original value before unregistering
- * from the mac80211 subsystem. */
- u16 mac80211_initially_registered_queues;
+ /* Set this if we call ieee80211_register_hw() and check if we call
+ * ieee80211_unregister_hw(). */
+ bool hw_registred;
/* We can only have one operating interface (802.11 core)
* at a time. General information about this interface follows.
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5a39b22..b80352b 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2359,6 +2359,8 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
if (err)
goto err_load;
+ fw->opensource = (ctx->req_type == B43_FWTYPE_OPENSOURCE);
+
return 0;
err_no_ucode:
@@ -2434,9 +2436,14 @@ static void b43_request_firmware(struct work_struct *work)
goto out;
start_ieee80211:
+ wl->hw->queues = B43_QOS_QUEUE_NUM;
+ if (!modparam_qos || dev->fw.opensource)
+ wl->hw->queues = 1;
+
err = ieee80211_register_hw(wl->hw);
if (err)
goto err_one_core_detach;
+ wl->hw_registred = true;
b43_leds_register(wl->current_dev);
goto out;
@@ -2536,11 +2543,9 @@ static int b43_upload_microcode(struct b43_wldev *dev)
dev->fw.hdr_format = B43_FW_HDR_410;
else
dev->fw.hdr_format = B43_FW_HDR_351;
- dev->fw.opensource = (fwdate == 0xFFFF);
+ WARN_ON(dev->fw.opensource != (fwdate == 0xFFFF));
- /* Default to use-all-queues. */
- dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
- dev->qos_enabled = !!modparam_qos;
+ dev->qos_enabled = dev->wl->hw->queues > 1;
/* Default to firmware/hardware crypto acceleration. */
dev->hwcrypto_enabled = true;
@@ -2558,14 +2563,8 @@ static int b43_upload_microcode(struct b43_wldev *dev)
/* Disable hardware crypto and fall back to software crypto. */
dev->hwcrypto_enabled = false;
}
- if (!(fwcapa & B43_FWCAPA_QOS)) {
- b43info(dev->wl, "QoS not supported by firmware\n");
- /* Disable QoS. Tweak hw->queues to 1. It will be restored before
- * ieee80211_unregister to make sure the networking core can
- * properly free possible resources. */
- dev->wl->hw->queues = 1;
- dev->qos_enabled = false;
- }
+ /* adding QoS support should use an offline discovery mechanism */
+ WARN(fwcapa & B43_FWCAPA_QOS, "QoS in OpenFW not supported\n");
} else {
b43info(dev->wl, "Loading firmware version %u.%u "
"(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
@@ -3766,7 +3765,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
if (prev_status >= B43_STAT_STARTED) {
err = b43_wireless_core_start(up_dev);
if (err) {
- b43err(wl, "Fatal: Coult not start device for "
+ b43err(wl, "Fatal: Could not start device for "
"selected %s-GHz band\n",
band_to_string(chan->band));
b43_wireless_core_exit(up_dev);
@@ -5297,8 +5296,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
- hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
- wl->mac80211_initially_registered_queues = hw->queues;
+ wl->hw_registred = false;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5368,11 @@ static void b43_bcma_remove(struct bcma_device *core)
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
- /* Restore the queues count before unregistering, because firmware detect
- * might have modified it. Restoring is important, so the networking
- * stack can properly free resources. */
- wl->hw->queues = wl->mac80211_initially_registered_queues;
- b43_leds_stop(wldev);
- ieee80211_unregister_hw(wl->hw);
+ B43_WARN_ON(!wl);
+ if (wl->current_dev == wldev && wl->hw_registred) {
+ b43_leds_stop(wldev);
+ ieee80211_unregister_hw(wl->hw);
+ }
b43_one_core_detach(wldev->dev);
@@ -5446,11 +5443,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
cancel_work_sync(&wldev->restart_work);
B43_WARN_ON(!wl);
- if (wl->current_dev == wldev) {
- /* Restore the queues count before unregistering, because firmware detect
- * might have modified it. Restoring is important, so the networking
- * stack can properly free resources. */
- wl->hw->queues = wl->mac80211_initially_registered_queues;
+ if (wl->current_dev == wldev && wl->hw_registred) {
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
}
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 1081188..b92bb9c 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -1369,7 +1369,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
i << 2);
b43_nphy_poll_rssi(dev, 2, results[i], 8);
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i += 2) {
s32 curr;
s32 mind = 40;
s32 minpoll = 249;
@@ -1415,14 +1415,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i);
b43_nphy_poll_rssi(dev, i, poll_results, 8);
for (j = 0; j < 4; j++) {
- if (j / 2 == core)
+ if (j / 2 == core) {
offset[j] = 232 - poll_results[j];
- if (offset[j] < 0)
- offset[j] = -(abs(offset[j] + 4) / 8);
- else
- offset[j] = (offset[j] + 4) / 8;
- b43_nphy_scale_offset_rssi(dev, 0,
- offset[2 * core], core + 1, j % 2, i);
+ if (offset[j] < 0)
+ offset[j] = -(abs(offset[j] + 4) / 8);
+ else
+ offset[j] = (offset[j] + 4) / 8;
+ b43_nphy_scale_offset_rssi(dev, 0,
+ offset[2 * core], core + 1, j % 2, i);
+ }
}
}
}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index b31ccc0..136510e 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -663,7 +663,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
u32 uninitialized_var(macstat);
u16 chanid;
u16 phytype;
- int padding;
+ int padding, rate_idx;
memset(&status, 0, sizeof(status));
@@ -766,16 +766,17 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
}
if (phystat0 & B43_RX_PHYST0_OFDM)
- status.rate_idx = b43_plcp_get_bitrate_idx_ofdm(plcp,
+ rate_idx = b43_plcp_get_bitrate_idx_ofdm(plcp,
phytype == B43_PHYTYPE_A);
else
- status.rate_idx = b43_plcp_get_bitrate_idx_cck(plcp);
- if (unlikely(status.rate_idx == -1)) {
+ rate_idx = b43_plcp_get_bitrate_idx_cck(plcp);
+ if (unlikely(rate_idx == -1)) {
/* PLCP seems to be corrupted.
* Drop the frame, if we are not interested in corrupted frames. */
if (!(dev->wl->filter_flags & FIF_PLCPFAIL))
goto drop;
}
+ status.rate_idx = rate_idx;
status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT);
/*
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index f1f8bd0..2d3c664 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -52,7 +52,7 @@ struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
desc = ring->descbase;
desc = &(desc[slot]);
- return (struct b43legacy_dmadesc32 *)desc;
+ return desc;
}
static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index cd9c9bc..8156135 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1508,7 +1508,7 @@ static void b43legacy_release_firmware(struct b43legacy_wldev *dev)
static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
{
- b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/"
+ b43legacyerr(wl, "You must go to http://wireless.kernel.org/en/users/"
"Drivers/b43#devicefirmware "
"and download the correct firmware (version 3).\n");
}
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
if (prev_status >= B43legacy_STAT_STARTED) {
err = b43legacy_wireless_core_start(up_dev);
if (err) {
- b43legacyerr(wl, "Fatal: Coult not start device for "
+ b43legacyerr(wl, "Fatal: Could not start device for "
"newly selected %s-PHY mode\n",
phymode_to_string(new_mode));
b43legacy_wireless_core_exit(up_dev);
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index a8012f2..b8ffea6 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -269,8 +269,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->plcp), plcp_fragment_len,
rate);
- b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
- (&txhdr->plcp_fb), plcp_fragment_len,
+ b43legacy_generate_plcp_hdr(&txhdr->plcp_fb, plcp_fragment_len,
rate_fb->hw_value);
/* PHY TX Control word */
@@ -340,8 +339,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->rts_plcp),
len, rts_rate);
- b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
- (&txhdr->rts_plcp_fb),
+ b43legacy_generate_plcp_hdr(&txhdr->rts_plcp_fb,
len, rts_rate_fb);
hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame);
txhdr->rts_dur_fb = hdr->duration_id;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index abb48032..9d5170b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -34,3 +34,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
sdio_chip.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
+brcmfmac-$(CONFIG_BRCMDBG) += \
+ dhd_dbg.o \ No newline at end of file
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e2480d1..8e7e692 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
- /* redirect, configure ane enable io for interrupt signal */
+ /* redirect, configure and enable io for interrupt signal */
data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
- if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH)
+ if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
data |= SDIO_SEPINT_ACT_HI;
brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 82f51db..49765d3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -44,6 +44,7 @@
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -52,6 +53,7 @@
static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 9f63701..a11fe54 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -613,6 +613,9 @@ struct brcmf_pub {
struct work_struct multicast_work;
u8 macvalue[ETH_ALEN];
atomic_t pend_8021x_cnt;
+#ifdef DEBUG
+ struct dentry *dbgfs_dir;
+#endif
};
struct brcmf_if_event {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 3669164..537f499 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -36,6 +36,13 @@ struct dngl_stats {
unsigned long multicast; /* multicast packets received */
};
+struct brcmf_bus_dcmd {
+ char *name;
+ char *param;
+ int param_len;
+ struct list_head list;
+};
+
/* interface structure between common and bus layer */
struct brcmf_bus {
u8 type; /* bus type */
@@ -50,6 +57,7 @@ struct brcmf_bus {
unsigned long tx_realloc; /* Tx packets realloced for headroom */
struct dngl_stats dstats; /* Stats for dongle-based data */
u8 align; /* bus alignment requirement */
+ struct list_head dcmd_list;
/* interface functions pointers */
/* Stop bus module: clear pending frames, disable data flow */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 236cb9f..2621dd3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -800,13 +800,13 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
"event_msgs" + '\0' + bitvec */
char buf[128], *ptr;
- u32 dongle_align = drvr->bus_if->align;
- u32 glom = 0;
u32 roaming = 1;
uint bcn_timeout = 3;
int scan_assoc_time = 40;
int scan_unassoc_time = 40;
int i;
+ struct brcmf_bus_dcmd *cmdlst;
+ struct list_head *cur, *q;
mutex_lock(&drvr->proto_block);
@@ -827,17 +827,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
/* Print fw version info */
brcmf_dbg(ERROR, "Firmware version = %s\n", buf);
- /* Match Host and Dongle rx alignment */
- brcmf_c_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
- sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- /* disable glom option per default */
- brcmf_c_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
/* Setup timeout if Beacons are lost and roam is off to report
link down */
brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
@@ -874,6 +863,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
0, true);
}
+ /* set bus specific command if there is any */
+ list_for_each_safe(cur, q, &drvr->bus_if->dcmd_list) {
+ cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
+ if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
+ brcmf_c_mkiovar(cmdlst->name, cmdlst->param,
+ cmdlst->param_len, iovbuf,
+ sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+ }
+ list_del(cur);
+ kfree(cmdlst);
+ }
+
mutex_unlock(&drvr->proto_block);
return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
new file mode 100644
index 0000000..7f89540
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/debugfs.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+#include <linux/ieee80211.h>
+#include <linux/module.h>
+
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+
+static struct dentry *root_folder;
+
+void brcmf_debugfs_init(void)
+{
+ root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR(root_folder))
+ root_folder = NULL;
+}
+
+void brcmf_debugfs_exit(void)
+{
+ if (!root_folder)
+ return;
+
+ debugfs_remove_recursive(root_folder);
+ root_folder = NULL;
+}
+
+int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+{
+ if (!root_folder)
+ return -ENODEV;
+
+ drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder);
+ return PTR_RET(drvr->dbgfs_dir);
+}
+
+void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+{
+ if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
+ debugfs_remove_recursive(drvr->dbgfs_dir);
+}
+
+struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
+{
+ return drvr->dbgfs_dir;
+}
+
+static
+ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct brcmf_sdio_count *sdcnt = f->private_data;
+ char buf[750];
+ int res;
+
+ /* only allow read from start */
+ if (*ppos > 0)
+ return 0;
+
+ res = scnprintf(buf, sizeof(buf),
+ "intrcount: %u\nlastintrs: %u\n"
+ "pollcnt: %u\nregfails: %u\n"
+ "tx_sderrs: %u\nfcqueued: %u\n"
+ "rxrtx: %u\nrx_toolong: %u\n"
+ "rxc_errors: %u\nrx_hdrfail: %u\n"
+ "rx_badhdr: %u\nrx_badseq: %u\n"
+ "fc_rcvd: %u\nfc_xoff: %u\n"
+ "fc_xon: %u\nrxglomfail: %u\n"
+ "rxglomframes: %u\nrxglompkts: %u\n"
+ "f2rxhdrs: %u\nf2rxdata: %u\n"
+ "f2txdata: %u\nf1regdata: %u\n"
+ "tickcnt: %u\ntx_ctlerrs: %lu\n"
+ "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n"
+ "rx_ctlpkts: %lu\nrx_readahead: %lu\n",
+ sdcnt->intrcount, sdcnt->lastintrs,
+ sdcnt->pollcnt, sdcnt->regfails,
+ sdcnt->tx_sderrs, sdcnt->fcqueued,
+ sdcnt->rxrtx, sdcnt->rx_toolong,
+ sdcnt->rxc_errors, sdcnt->rx_hdrfail,
+ sdcnt->rx_badhdr, sdcnt->rx_badseq,
+ sdcnt->fc_rcvd, sdcnt->fc_xoff,
+ sdcnt->fc_xon, sdcnt->rxglomfail,
+ sdcnt->rxglomframes, sdcnt->rxglompkts,
+ sdcnt->f2rxhdrs, sdcnt->f2rxdata,
+ sdcnt->f2txdata, sdcnt->f1regdata,
+ sdcnt->tickcnt, sdcnt->tx_ctlerrs,
+ sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
+ sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
+
+ return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations brcmf_debugfs_sdio_counter_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = brcmf_debugfs_sdio_counter_read
+};
+
+void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
+ struct brcmf_sdio_count *sdcnt)
+{
+ struct dentry *dentry = drvr->dbgfs_dir;
+
+ if (!IS_ERR_OR_NULL(dentry))
+ debugfs_create_file("counters", S_IRUGO, dentry,
+ sdcnt, &brcmf_debugfs_sdio_counter_ops);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index a2c4576..b784920 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -76,4 +76,63 @@ do { \
extern int brcmf_msg_level;
+/*
+ * hold counter variables used in brcmfmac sdio driver.
+ */
+struct brcmf_sdio_count {
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint pollcnt; /* Count of active polls */
+ uint regfails; /* Count of R_REG failures */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+ uint tickcnt; /* Number of watchdog been schedule */
+ ulong tx_ctlerrs; /* Err of sending ctrl frames */
+ ulong tx_ctlpkts; /* Ctrl frames sent to dongle */
+ ulong rx_ctlerrs; /* Err of processing rx ctrl frames */
+ ulong rx_ctlpkts; /* Ctrl frames processed from dongle */
+ ulong rx_readahead_cnt; /* packets where header read-ahead was used */
+};
+
+struct brcmf_pub;
+#ifdef DEBUG
+void brcmf_debugfs_init(void);
+void brcmf_debugfs_exit(void);
+int brcmf_debugfs_attach(struct brcmf_pub *drvr);
+void brcmf_debugfs_detach(struct brcmf_pub *drvr);
+struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
+void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
+ struct brcmf_sdio_count *sdcnt);
+#else
+static inline void brcmf_debugfs_init(void)
+{
+}
+static inline void brcmf_debugfs_exit(void)
+{
+}
+static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+{
+ return 0;
+}
+static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+{
+}
+#endif
+
#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8933f9b..57bf1d7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1007,6 +1007,9 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
drvr->bus_if->drvr = drvr;
drvr->dev = dev;
+ /* create device debugfs folder */
+ brcmf_debugfs_attach(drvr);
+
/* Attach and link in the protocol */
ret = brcmf_proto_attach(drvr);
if (ret != 0) {
@@ -1017,6 +1020,8 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
+ INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
+
return ret;
fail:
@@ -1123,6 +1128,7 @@ void brcmf_detach(struct device *dev)
brcmf_proto_detach(drvr);
}
+ brcmf_debugfs_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
}
@@ -1192,6 +1198,8 @@ exit:
static void brcmf_driver_init(struct work_struct *work)
{
+ brcmf_debugfs_init();
+
#ifdef CONFIG_BRCMFMAC_SDIO
brcmf_sdio_init();
#endif
@@ -1219,6 +1227,7 @@ static void __exit brcmfmac_module_exit(void)
#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_exit();
#endif
+ brcmf_debugfs_exit();
}
module_init(brcmfmac_module_init);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1dbf2be..472f2ef 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -31,6 +31,8 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/bcma/bcma.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
@@ -48,6 +50,9 @@
#define CBUF_LEN (128)
+/* Device console log buffer state */
+#define CONSOLE_BUFFER_MAX 2024
+
struct rte_log_le {
__le32 buf; /* Can't be pointer on (64-bit) hosts */
__le32 buf_size;
@@ -281,7 +286,7 @@ struct rte_console {
* Shared structure between dongle and the host.
* The structure contains pointers to trap or assert information.
*/
-#define SDPCM_SHARED_VERSION 0x0002
+#define SDPCM_SHARED_VERSION 0x0003
#define SDPCM_SHARED_VERSION_MASK 0x00FF
#define SDPCM_SHARED_ASSERT_BUILT 0x0100
#define SDPCM_SHARED_ASSERT 0x0200
@@ -428,6 +433,29 @@ struct brcmf_console {
u8 *buf; /* Log buffer (host copy) */
uint last; /* Last buffer read index */
};
+
+struct brcmf_trap_info {
+ __le32 type;
+ __le32 epc;
+ __le32 cpsr;
+ __le32 spsr;
+ __le32 r0; /* a1 */
+ __le32 r1; /* a2 */
+ __le32 r2; /* a3 */
+ __le32 r3; /* a4 */
+ __le32 r4; /* v1 */
+ __le32 r5; /* v2 */
+ __le32 r6; /* v3 */
+ __le32 r7; /* v4 */
+ __le32 r8; /* v5 */
+ __le32 r9; /* sb/v6 */
+ __le32 r10; /* sl/v7 */
+ __le32 r11; /* fp/v8 */
+ __le32 r12; /* ip */
+ __le32 r13; /* sp */
+ __le32 r14; /* lr */
+ __le32 pc; /* r15 */
+};
#endif /* DEBUG */
struct sdpcm_shared {
@@ -439,6 +467,7 @@ struct sdpcm_shared {
u32 console_addr; /* Address of struct rte_console */
u32 msgtrace_addr;
u8 tag[32];
+ u32 brpt_addr;
};
struct sdpcm_shared_le {
@@ -450,6 +479,7 @@ struct sdpcm_shared_le {
__le32 console_addr; /* Address of struct rte_console */
__le32 msgtrace_addr;
u8 tag[32];
+ __le32 brpt_addr;
};
@@ -502,12 +532,9 @@ struct brcmf_sdio {
bool intr; /* Use interrupts */
bool poll; /* Use polling */
bool ipend; /* Device interrupt is pending */
- uint intrcount; /* Count of device interrupt callbacks */
- uint lastintrs; /* Count as of last watchdog timer */
uint spurious; /* Count of spurious interrupts */
uint pollrate; /* Ticks between device polls */
uint polltick; /* Tick counter */
- uint pollcnt; /* Count of active polls */
#ifdef DEBUG
uint console_interval;
@@ -515,8 +542,6 @@ struct brcmf_sdio {
uint console_addr; /* Console address from shared struct */
#endif /* DEBUG */
- uint regfails; /* Count of R_REG failures */
-
uint clkstate; /* State of sd and backplane clock(s) */
bool activity; /* Activity flag for clock down */
s32 idletime; /* Control for activity timeout */
@@ -531,33 +556,6 @@ struct brcmf_sdio {
/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
bool usebufpool;
- /* Some additional counters */
- uint tx_sderrs; /* Count of tx attempts with sd errors */
- uint fcqueued; /* Tx packets that got queued */
- uint rxrtx; /* Count of rtx requests (NAK to dongle) */
- uint rx_toolong; /* Receive frames too long to receive */
- uint rxc_errors; /* SDIO errors when reading control frames */
- uint rx_hdrfail; /* SDIO errors on header reads */
- uint rx_badhdr; /* Bad received headers (roosync?) */
- uint rx_badseq; /* Mismatched rx sequence number */
- uint fc_rcvd; /* Number of flow-control events received */
- uint fc_xoff; /* Number which turned on flow-control */
- uint fc_xon; /* Number which turned off flow-control */
- uint rxglomfail; /* Failed deglom attempts */
- uint rxglomframes; /* Number of glom frames (superframes) */
- uint rxglompkts; /* Number of packets from glom frames */
- uint f2rxhdrs; /* Number of header reads */
- uint f2rxdata; /* Number of frame data reads */
- uint f2txdata; /* Number of f2 frame writes */
- uint f1regdata; /* Number of f1 register accesses */
- uint tickcnt; /* Number of watchdog been schedule */
- unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
- unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
- unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
- unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
- unsigned long rx_readahead_cnt; /* Number of packets where header
- * read-ahead was used. */
-
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
bool ctrl_frame_stat;
@@ -583,6 +581,7 @@ struct brcmf_sdio {
u32 fw_ptr;
bool txoff; /* Transmit flow-controlled */
+ struct brcmf_sdio_count sdcnt;
};
/* clkstate */
@@ -945,7 +944,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
if (ret == 0)
w_sdreg32(bus, SMB_INT_ACK,
offsetof(struct sdpcmd_regs, tosbmailbox));
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
/* Dongle recomposed rx frames, accept them again */
if (hmb_data & HMB_DATA_NAKHANDLED) {
@@ -984,12 +983,12 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
HMB_DATA_FCDATA_SHIFT;
if (fcbits & ~bus->flowcontrol)
- bus->fc_xoff++;
+ bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
+ bus->sdcnt.fc_xon++;
- bus->fc_rcvd++;
+ bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
@@ -1021,7 +1020,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_RF_TERM, &err);
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
@@ -1029,7 +1028,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
SBSDIO_FUNC1_RFRAMEBCHI, &err);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_RFRAMEBCLO, &err);
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -1047,11 +1046,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
if (rtx) {
- bus->rxrtx++;
+ bus->sdcnt.rxrtx++;
err = w_sdreg32(bus, SMB_NAK,
offsetof(struct sdpcmd_regs, tosbmailbox));
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
if (err == 0)
bus->rxskip = true;
}
@@ -1243,7 +1242,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
dlen);
errcode = -1;
}
- bus->f2rxdata++;
+ bus->sdcnt.f2rxdata++;
/* On failure, kill the superframe, allow a couple retries */
if (errcode < 0) {
@@ -1256,7 +1255,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- bus->rxglomfail++;
+ bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
return 0;
@@ -1312,7 +1311,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (rxseq != seq) {
brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
seq, rxseq);
- bus->rx_badseq++;
+ bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@@ -1376,7 +1375,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- bus->rxglomfail++;
+ bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
bus->nextlen = 0;
@@ -1402,7 +1401,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (rxseq != seq) {
brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
seq, rxseq);
- bus->rx_badseq++;
+ bus->sdcnt.rx_badseq++;
rxseq = seq;
}
rxseq++;
@@ -1441,8 +1440,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
down(&bus->sdsem);
}
- bus->rxglomframes++;
- bus->rxglompkts += bus->glom.qlen;
+ bus->sdcnt.rxglomframes++;
+ bus->sdcnt.rxglompkts += bus->glom.qlen;
}
return num;
}
@@ -1526,7 +1525,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
bus->sdiodev->bus_if->dstats.rx_errors++;
- bus->rx_toolong++;
+ bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
@@ -1536,13 +1535,13 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
bus->sdiodev->sbwad,
SDIO_FUNC_2,
F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
- bus->f2rxdata++;
+ bus->sdcnt.f2rxdata++;
/* Control frame failures need retransmission */
if (sdret < 0) {
brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
rdlen, sdret);
- bus->rxc_errors++;
+ bus->sdcnt.rxc_errors++;
brcmf_sdbrcm_rxfail(bus, true, true);
goto done;
}
@@ -1589,7 +1588,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
/* Read the entire frame */
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, *pkt);
- bus->f2rxdata++;
+ bus->sdcnt.f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
@@ -1630,7 +1629,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
if ((u16)~(*len ^ check)) {
brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
nextlen, *len, check);
- bus->rx_badhdr++;
+ bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto fail;
}
@@ -1746,7 +1745,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
bus->nextlen = 0;
}
- bus->rx_readahead_cnt++;
+ bus->sdcnt.rx_readahead_cnt++;
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(
@@ -1754,12 +1753,12 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (bus->flowcontrol != fcbits) {
if (~bus->flowcontrol & fcbits)
- bus->fc_xoff++;
+ bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
+ bus->sdcnt.fc_xon++;
- bus->fc_rcvd++;
+ bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
@@ -1767,7 +1766,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (rxseq != seq) {
brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
seq, rxseq);
- bus->rx_badseq++;
+ bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@@ -1814,11 +1813,11 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->rxhdr,
BRCMF_FIRSTREAD);
- bus->f2rxhdrs++;
+ bus->sdcnt.f2rxhdrs++;
if (sdret < 0) {
brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
- bus->rx_hdrfail++;
+ bus->sdcnt.rx_hdrfail++;
brcmf_sdbrcm_rxfail(bus, true, true);
continue;
}
@@ -1840,7 +1839,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if ((u16) ~(len ^ check)) {
brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
len, check);
- bus->rx_badhdr++;
+ bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@@ -1861,7 +1860,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if ((doff < SDPCM_HDRLEN) || (doff > len)) {
brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
doff, len, SDPCM_HDRLEN, seq);
- bus->rx_badhdr++;
+ bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@@ -1880,19 +1879,19 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (bus->flowcontrol != fcbits) {
if (~bus->flowcontrol & fcbits)
- bus->fc_xoff++;
+ bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
+ bus->sdcnt.fc_xon++;
- bus->fc_rcvd++;
+ bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
/* Check and update sequence number */
if (rxseq != seq) {
brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
- bus->rx_badseq++;
+ bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@@ -1937,7 +1936,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
len, rdlen);
bus->sdiodev->bus_if->dstats.rx_errors++;
- bus->rx_toolong++;
+ bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@@ -1960,7 +1959,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
/* Read the remaining frame data */
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
- bus->f2rxdata++;
+ bus->sdcnt.f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
@@ -2147,18 +2146,18 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
- bus->f2txdata++;
+ bus->sdcnt.f2txdata++;
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
- bus->tx_sderrs++;
+ bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@@ -2166,7 +2165,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
@@ -2224,7 +2223,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
ret = r_sdreg32(bus, &intstatus,
offsetof(struct sdpcmd_regs,
intstatus));
- bus->f2txdata++;
+ bus->sdcnt.f2txdata++;
if (ret != 0)
break;
if (intstatus & bus->hostintmask)
@@ -2417,7 +2416,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
bus->ipend = false;
err = r_sdreg32(bus, &newstatus,
offsetof(struct sdpcmd_regs, intstatus));
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
if (err != 0)
newstatus = 0;
newstatus &= bus->hostintmask;
@@ -2426,7 +2425,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err = w_sdreg32(bus, newstatus,
offsetof(struct sdpcmd_regs,
intstatus));
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
}
}
@@ -2445,7 +2444,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err = r_sdreg32(bus, &newstatus,
offsetof(struct sdpcmd_regs, intstatus));
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
bus->fcstate =
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
intstatus |= (newstatus & bus->hostintmask);
@@ -2502,7 +2501,7 @@ clkwait:
int ret, i;
ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
+ SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
(u32) bus->ctrl_frame_len);
if (ret < 0) {
@@ -2510,13 +2509,13 @@ clkwait:
terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
- bus->tx_sderrs++;
+ bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, &err);
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@@ -2526,7 +2525,7 @@ clkwait:
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO,
&err);
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
@@ -2657,7 +2656,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
/* Check for existing queue, current flow-control,
pending event, or pending clock */
brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
- bus->fcqueued++;
+ bus->sdcnt.fcqueued++;
/* Priority based enq */
spin_lock_bh(&bus->txqlock);
@@ -2845,13 +2844,13 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
/* On failure, abort the command and terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
- bus->tx_sderrs++;
+ bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@@ -2859,7 +2858,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
if (hi == 0 && lo == 0)
break;
}
@@ -2976,13 +2975,324 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
up(&bus->sdsem);
if (ret)
- bus->tx_ctlerrs++;
+ bus->sdcnt.tx_ctlerrs++;
else
- bus->tx_ctlpkts++;
+ bus->sdcnt.tx_ctlpkts++;
return ret ? -EIO : 0;
}
+#ifdef DEBUG
+static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+{
+ return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+}
+
+static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh)
+{
+ u32 addr;
+ int rv;
+ u32 shaddr = 0;
+ struct sdpcm_shared_le sh_le;
+ __le32 addr_le;
+
+ shaddr = bus->ramsize - 4;
+
+ /*
+ * Read last word in socram to determine
+ * address of sdpcm_shared structure
+ */
+ rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
+ (u8 *)&addr_le, 4);
+ if (rv < 0)
+ return rv;
+
+ addr = le32_to_cpu(addr_le);
+
+ brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (!brcmf_sdio_valid_shared_address(addr)) {
+ brcmf_dbg(ERROR, "invalid sdpcm_shared address 0x%08X\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /* Read hndrte_shared structure */
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
+ if (rv < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = le32_to_cpu(sh_le.flags);
+ sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+ sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+ sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+ sh->assert_line = le32_to_cpu(sh_le.assert_line);
+ sh->console_addr = le32_to_cpu(sh_le.console_addr);
+ sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+ brcmf_dbg(ERROR,
+ "sdpcm_shared version mismatch: dhd %d dongle %d\n",
+ SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh, char __user *data,
+ size_t count)
+{
+ u32 addr, console_ptr, console_size, console_index;
+ char *conbuf = NULL;
+ __le32 sh_val;
+ int rv;
+ loff_t pos = 0;
+ int nbytes = 0;
+
+ /* obtain console information from device memory */
+ addr = sh->console_addr + offsetof(struct rte_console, log_le);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
+ if (rv < 0)
+ return rv;
+ console_ptr = le32_to_cpu(sh_val);
+
+ addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
+ if (rv < 0)
+ return rv;
+ console_size = le32_to_cpu(sh_val);
+
+ addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
+ if (rv < 0)
+ return rv;
+ console_index = le32_to_cpu(sh_val);
+
+ /* allocate buffer for console data */
+ if (console_size <= CONSOLE_BUFFER_MAX)
+ conbuf = vzalloc(console_size+1);
+
+ if (!conbuf)
+ return -ENOMEM;
+
+ /* obtain the console data from device */
+ conbuf[console_size] = '\0';
+ rv = brcmf_sdbrcm_membytes(bus, false, console_ptr, (u8 *)conbuf,
+ console_size);
+ if (rv < 0)
+ goto done;
+
+ rv = simple_read_from_buffer(data, count, &pos,
+ conbuf + console_index,
+ console_size - console_index);
+ if (rv < 0)
+ goto done;
+
+ nbytes = rv;
+ if (console_index > 0) {
+ pos = 0;
+ rv = simple_read_from_buffer(data+nbytes, count, &pos,
+ conbuf, console_index - 1);
+ if (rv < 0)
+ goto done;
+ rv += nbytes;
+ }
+done:
+ vfree(conbuf);
+ return rv;
+}
+
+static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
+ char __user *data, size_t count)
+{
+ int error, res;
+ char buf[350];
+ struct brcmf_trap_info tr;
+ int nbytes;
+ loff_t pos = 0;
+
+ if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
+ return 0;
+
+ error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
+ sizeof(struct brcmf_trap_info));
+ if (error < 0)
+ return error;
+
+ nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
+ if (nbytes < 0)
+ return nbytes;
+
+ res = scnprintf(buf, sizeof(buf),
+ "dongle trap info: type 0x%x @ epc 0x%08x\n"
+ " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
+ " lr 0x%08x pc 0x%08x offset 0x%x\n"
+ " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
+ " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
+ le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
+ le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
+ le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
+ le32_to_cpu(tr.pc), sh->trap_addr,
+ le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
+ le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
+ le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
+ le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
+
+ error = simple_read_from_buffer(data+nbytes, count, &pos, buf, res);
+ if (error < 0)
+ return error;
+
+ nbytes += error;
+ return nbytes;
+}
+
+static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh, char __user *data,
+ size_t count)
+{
+ int error = 0;
+ char buf[200];
+ char file[80] = "?";
+ char expr[80] = "<???>";
+ int res;
+ loff_t pos = 0;
+
+ if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+ brcmf_dbg(INFO, "firmware not built with -assert\n");
+ return 0;
+ } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
+ brcmf_dbg(INFO, "no assert in dongle\n");
+ return 0;
+ }
+
+ if (sh->assert_file_addr != 0) {
+ error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr,
+ (u8 *)file, 80);
+ if (error < 0)
+ return error;
+ }
+ if (sh->assert_exp_addr != 0) {
+ error = brcmf_sdbrcm_membytes(bus, false, sh->assert_exp_addr,
+ (u8 *)expr, 80);
+ if (error < 0)
+ return error;
+ }
+
+ res = scnprintf(buf, sizeof(buf),
+ "dongle assert: %s:%d: assert(%s)\n",
+ file, sh->assert_line, expr);
+ return simple_read_from_buffer(data, count, &pos, buf, res);
+}
+
+static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+{
+ int error;
+ struct sdpcm_shared sh;
+
+ down(&bus->sdsem);
+ error = brcmf_sdio_readshared(bus, &sh);
+ up(&bus->sdsem);
+
+ if (error < 0)
+ return error;
+
+ if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
+ brcmf_dbg(INFO, "firmware not built with -assert\n");
+ else if (sh.flags & SDPCM_SHARED_ASSERT)
+ brcmf_dbg(ERROR, "assertion in dongle\n");
+
+ if (sh.flags & SDPCM_SHARED_TRAP)
+ brcmf_dbg(ERROR, "firmware trap in dongle\n");
+
+ return 0;
+}
+
+static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ int error = 0;
+ struct sdpcm_shared sh;
+ int nbytes = 0;
+ loff_t pos = *ppos;
+
+ if (pos != 0)
+ return 0;
+
+ down(&bus->sdsem);
+ error = brcmf_sdio_readshared(bus, &sh);
+ if (error < 0)
+ goto done;
+
+ error = brcmf_sdio_assert_info(bus, &sh, data, count);
+ if (error < 0)
+ goto done;
+
+ nbytes = error;
+ error = brcmf_sdio_trap_info(bus, &sh, data, count);
+ if (error < 0)
+ goto done;
+
+ error += nbytes;
+ *ppos += error;
+done:
+ up(&bus->sdsem);
+ return error;
+}
+
+static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct brcmf_sdio *bus = f->private_data;
+ int res;
+
+ res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
+ if (res > 0)
+ *ppos += res;
+ return (ssize_t)res;
+}
+
+static const struct file_operations brcmf_sdio_forensic_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = brcmf_sdio_forensic_read
+};
+
+static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
+{
+ struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
+ struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
+
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+
+ debugfs_create_file("forensics", S_IRUGO, dentry, bus,
+ &brcmf_sdio_forensic_ops);
+ brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
+}
+#else
+static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+{
+ return 0;
+}
+
+static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
static int
brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
@@ -3009,60 +3319,27 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
rxlen, msglen);
} else if (timeleft == 0) {
brcmf_dbg(ERROR, "resumed on timeout\n");
+ brcmf_sdbrcm_checkdied(bus);
} else if (pending) {
brcmf_dbg(CTL, "cancelled\n");
return -ERESTARTSYS;
} else {
brcmf_dbg(CTL, "resumed for unknown reason?\n");
+ brcmf_sdbrcm_checkdied(bus);
}
if (rxlen)
- bus->rx_ctlpkts++;
+ bus->sdcnt.rx_ctlpkts++;
else
- bus->rx_ctlerrs++;
+ bus->sdcnt.rx_ctlerrs++;
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
-static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
-{
- int bcmerror = 0;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* Basic sanity checks */
- if (bus->sdiodev->bus_if->drvr_up) {
- bcmerror = -EISCONN;
- goto err;
- }
- if (!len) {
- bcmerror = -EOVERFLOW;
- goto err;
- }
-
- /* Free the old ones and replace with passed variables */
- kfree(bus->vars);
-
- bus->vars = kmalloc(len, GFP_ATOMIC);
- bus->varsz = bus->vars ? len : 0;
- if (bus->vars == NULL) {
- bcmerror = -ENOMEM;
- goto err;
- }
-
- /* Copy the passed variables, which should include the
- terminating double-null */
- memcpy(bus->vars, arg, bus->varsz);
-err:
- return bcmerror;
-}
-
static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
{
int bcmerror = 0;
- u32 varsize;
u32 varaddr;
- u8 *vbuffer;
u32 varsizew;
__le32 varsizew_le;
#ifdef DEBUG
@@ -3071,56 +3348,44 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
/* Even if there are no vars are to be written, we still
need to set the ramsize. */
- varsize = bus->varsz ? roundup(bus->varsz, 4) : 0;
- varaddr = (bus->ramsize - 4) - varsize;
+ varaddr = (bus->ramsize - 4) - bus->varsz;
if (bus->vars) {
- vbuffer = kzalloc(varsize, GFP_ATOMIC);
- if (!vbuffer)
- return -ENOMEM;
-
- memcpy(vbuffer, bus->vars, bus->varsz);
-
/* Write the vars list */
- bcmerror =
- brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize);
+ bcmerror = brcmf_sdbrcm_membytes(bus, true, varaddr,
+ bus->vars, bus->varsz);
#ifdef DEBUG
/* Verify NVRAM bytes */
- brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize);
- nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
- if (!nvram_ularray) {
- kfree(vbuffer);
+ brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n",
+ bus->varsz);
+ nvram_ularray = kmalloc(bus->varsz, GFP_ATOMIC);
+ if (!nvram_ularray)
return -ENOMEM;
- }
/* Upload image to verify downloaded contents. */
- memset(nvram_ularray, 0xaa, varsize);
+ memset(nvram_ularray, 0xaa, bus->varsz);
/* Read the vars list to temp buffer for comparison */
- bcmerror =
- brcmf_sdbrcm_membytes(bus, false, varaddr, nvram_ularray,
- varsize);
+ bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr,
+ nvram_ularray, bus->varsz);
if (bcmerror) {
brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n",
- bcmerror, varsize, varaddr);
+ bcmerror, bus->varsz, varaddr);
}
/* Compare the org NVRAM with the one read from RAM */
- if (memcmp(vbuffer, nvram_ularray, varsize))
+ if (memcmp(bus->vars, nvram_ularray, bus->varsz))
brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n");
else
brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n");
kfree(nvram_ularray);
#endif /* DEBUG */
-
- kfree(vbuffer);
}
/* adjust to the user specified RAM */
brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize);
brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n",
- varaddr, varsize);
- varsize = ((bus->ramsize - 4) - varaddr);
+ varaddr, bus->varsz);
/*
* Determine the length token:
@@ -3131,13 +3396,13 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
varsizew = 0;
varsizew_le = cpu_to_le32(0);
} else {
- varsizew = varsize / 4;
+ varsizew = bus->varsz / 4;
varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
varsizew_le = cpu_to_le32(varsizew);
}
brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n",
- varsize, varsizew);
+ bus->varsz, varsizew);
/* Write the length token to the last word */
bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4),
@@ -3261,13 +3526,21 @@ err:
* by two NULs.
*/
-static uint brcmf_process_nvram_vars(char *varbuf, uint len)
+static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
{
+ char *varbuf;
char *dp;
bool findNewline;
int column;
- uint buf_len, n;
+ int ret = 0;
+ uint buf_len, n, len;
+ len = bus->firmware->size;
+ varbuf = vmalloc(len);
+ if (!varbuf)
+ return -ENOMEM;
+
+ memcpy(varbuf, bus->firmware->data, len);
dp = varbuf;
findNewline = false;
@@ -3296,56 +3569,44 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
column++;
}
buf_len = dp - varbuf;
-
while (dp < varbuf + n)
*dp++ = 0;
- return buf_len;
+ kfree(bus->vars);
+ /* roundup needed for download to device */
+ bus->varsz = roundup(buf_len + 1, 4);
+ bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
+ if (bus->vars == NULL) {
+ bus->varsz = 0;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* copy the processed variables and add null termination */
+ memcpy(bus->vars, varbuf, buf_len);
+ bus->vars[buf_len] = 0;
+err:
+ vfree(varbuf);
+ return ret;
}
static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
- uint len;
- char *memblock = NULL;
- char *bufp;
int ret;
+ if (bus->sdiodev->bus_if->drvr_up)
+ return -EISCONN;
+
ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
return ret;
}
- bus->fw_ptr = 0;
-
- memblock = kmalloc(MEMBLOCK, GFP_ATOMIC);
- if (memblock == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- len = brcmf_sdbrcm_get_image(memblock, MEMBLOCK, bus);
-
- if (len > 0 && len < MEMBLOCK) {
- bufp = (char *)memblock;
- bufp[len] = 0;
- len = brcmf_process_nvram_vars(bufp, len);
- bufp += len;
- *bufp++ = 0;
- if (len)
- ret = brcmf_sdbrcm_downloadvars(bus, memblock, len + 1);
- if (ret)
- brcmf_dbg(ERROR, "error downloading vars: %d\n", ret);
- } else {
- brcmf_dbg(ERROR, "error reading nvram file: %d\n", len);
- ret = -EIO;
- }
-err:
- kfree(memblock);
+ ret = brcmf_process_nvram_vars(bus);
release_firmware(bus->firmware);
- bus->fw_ptr = 0;
return ret;
}
@@ -3419,7 +3680,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
return 0;
/* Start the watchdog timer */
- bus->tickcnt = 0;
+ bus->sdcnt.tickcnt = 0;
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
down(&bus->sdsem);
@@ -3512,7 +3773,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
/* Count the interrupt call */
- bus->intrcount++;
+ bus->sdcnt.intrcount++;
bus->ipend = true;
/* Shouldn't get this interrupt if we're sleeping? */
@@ -3554,7 +3815,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->polltick = 0;
/* Check device if no interrupts */
- if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+ if (!bus->intr ||
+ (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
if (!bus->dpc_sched) {
u8 devpend;
@@ -3569,7 +3831,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
/* If there is something, make like the ISR and
schedule the DPC */
if (intstatus) {
- bus->pollcnt++;
+ bus->sdcnt.pollcnt++;
bus->ipend = true;
bus->dpc_sched = true;
@@ -3581,7 +3843,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
}
/* Update interrupt tracking */
- bus->lastintrs = bus->intrcount;
+ bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
}
#ifdef DEBUG
/* Poll for console output periodically */
@@ -3623,6 +3885,8 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
return true;
if (chipid == BCM4330_CHIP_ID)
return true;
+ if (chipid == BCM4334_CHIP_ID)
+ return true;
return false;
}
@@ -3793,7 +4057,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
brcmf_sdbrcm_bus_watchdog(bus);
/* Count the tick for reference */
- bus->tickcnt++;
+ bus->sdcnt.tickcnt++;
} else
break;
}
@@ -3856,6 +4120,10 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
{
int ret;
struct brcmf_sdio *bus;
+ struct brcmf_bus_dcmd *dlst;
+ u32 dngl_txglom;
+ u32 dngl_txglomalign;
+ u8 idx;
brcmf_dbg(TRACE, "Enter\n");
@@ -3938,8 +4206,29 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
goto fail;
}
+ brcmf_sdio_debugfs_create(bus);
brcmf_dbg(INFO, "completed!!\n");
+ /* sdio bus core specific dcmd */
+ idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
+ if (dlst) {
+ if (bus->ci->c_inf[idx].rev < 12) {
+ /* for sdio core rev < 12, disable txgloming */
+ dngl_txglom = 0;
+ dlst->name = "bus:txglom";
+ dlst->param = (char *)&dngl_txglom;
+ dlst->param_len = sizeof(u32);
+ } else {
+ /* otherwise, set txglomalign */
+ dngl_txglomalign = bus->sdiodev->bus_if->align;
+ dlst->name = "bus:txglomalign";
+ dlst->param = (char *)&dngl_txglomalign;
+ dlst->param_len = sizeof(u32);
+ }
+ list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
+ }
+
/* if firmware path present try to download and bring up bus */
ret = brcmf_bus_start(bus->sdiodev->dev);
if (ret != 0) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index f8e1f1c..58155e2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -403,6 +403,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->c_inf[3].cib = 0x03004211;
ci->ramsize = 0x48000;
break;
+ case BCM4334_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x29004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0d004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x13080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x07004211;
+ ci->ramsize = 0x80000;
+ break;
default:
brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index c5a34ff..a299d42 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/firmware.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <net/cfg80211.h>
#include <defs.h>
@@ -1239,7 +1240,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
return -EINVAL;
}
- devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */
+ devinfo->image = vmalloc(fw->size); /* plus nvram */
if (!devinfo->image)
return -ENOMEM;
@@ -1603,7 +1604,7 @@ static struct usb_driver brcmf_usbdrvr = {
void brcmf_usb_exit(void)
{
usb_deregister(&brcmf_usbdrvr);
- kfree(g_image.data);
+ vfree(g_image.data);
g_image.data = NULL;
g_image.len = 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index d13ae9c..28c5fbb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -691,9 +691,10 @@ scan_out:
}
static s32
-brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+brcmf_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
+ struct net_device *ndev = request->wdev->netdev;
s32 err = 0;
WL_TRACE("Enter\n");
@@ -919,9 +920,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
if (params->bssid)
- WL_CONN("BSSID: %02X %02X %02X %02X %02X %02X\n",
- params->bssid[0], params->bssid[1], params->bssid[2],
- params->bssid[3], params->bssid[4], params->bssid[5]);
+ WL_CONN("BSSID: %pM\n", params->bssid);
else
WL_CONN("No BSSID specified\n");
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 6d8b721..8c9345d 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -318,10 +318,6 @@
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
-
-#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
-
#ifdef DEBUG
#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else
@@ -473,9 +469,6 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
- /* figure out buscore */
- sii->buscore = ai_findcore(&sii->pub, PCIE_CORE_ID, 0);
-
return true;
}
@@ -483,11 +476,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
- u32 w, savewin;
struct bcma_device *cc;
- struct ssb_sprom *sprom = &pbus->sprom;
-
- savewin = 0;
sii->icbus = pbus;
sii->pcibus = pbus->host_pci;
@@ -510,47 +499,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
/* PMU specific initializations */
if (ai_get_cccaps(sih) & CC_CAP_PMU) {
- si_pmu_init(sih);
(void)si_pmu_measure_alpclk(sih);
- si_pmu_res_init(sih);
- }
-
- /* setup the GPIO based LED powersave register */
- w = (sprom->leddc_on_time << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
- (sprom->leddc_off_time << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT);
- if (w == 0)
- w = DEFAULT_GPIOTIMERVAL;
- ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
- ~0, w);
-
- if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
- /*
- * enable 12 mA drive strenth for 43224 and
- * set chipControl register bit 15
- */
- if (ai_get_chiprev(sih) == 0) {
- SI_MSG("Applying 43224A0 WARs\n");
- ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
- CCTRL43224_GPIO_TOGGLE,
- CCTRL43224_GPIO_TOGGLE);
- si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
- CCTRL_43224A0_12MA_LED_DRIVE);
- }
- if (ai_get_chiprev(sih) >= 1) {
- SI_MSG("Applying 43224B0+ WARs\n");
- si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
- CCTRL_43224B0_12MA_LED_DRIVE);
- }
- }
-
- if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
- /*
- * enable 12 mA drive strenth for 4313 and
- * set chipControl register bit 1
- */
- SI_MSG("Applying 4313 WARs\n");
- si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
- CCTRL_4313_12MA_LED_DRIVE);
}
return sii;
@@ -589,7 +538,7 @@ void ai_detach(struct si_pub *sih)
struct si_pub *si_local = NULL;
memcpy(&si_local, &sih, sizeof(struct si_pub **));
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
if (sii == NULL)
return;
@@ -597,27 +546,6 @@ void ai_detach(struct si_pub *sih)
kfree(sii);
}
-/* return index of coreid or BADIDX if not found */
-struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
-{
- struct bcma_device *core;
- struct si_info *sii;
- uint found;
-
- sii = (struct si_info *)sih;
-
- found = 0;
-
- list_for_each_entry(core, &sii->icbus->cores, list)
- if (core->id.id == coreid) {
- if (found == coreunit)
- return core;
- found++;
- }
-
- return NULL;
-}
-
/*
* read/modify chipcommon core register.
*/
@@ -627,13 +555,12 @@ uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
u32 w;
struct si_info *sii;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
cc = sii->icbus->drv_cc.core;
/* mask and set */
- if (mask || val) {
+ if (mask || val)
bcma_maskset32(cc, regoff, ~mask, val);
- }
/* readback */
w = bcma_read32(cc, regoff);
@@ -694,12 +621,13 @@ ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
/* initialize power control delay registers */
void ai_clkctl_init(struct si_pub *sih)
{
+ struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *cc;
if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return;
- cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ cc = sii->icbus->drv_cc.core;
if (cc == NULL)
return;
@@ -721,7 +649,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
uint slowminfreq;
u16 fpdelay;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
if (ai_get_cccaps(sih) & CC_CAP_PMU) {
fpdelay = si_pmu_fast_pwrup_delay(sih);
return fpdelay;
@@ -731,7 +659,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
return 0;
fpdelay = 0;
- cc = ai_findcore(sih, CC_CORE_ID, 0);
+ cc = sii->icbus->drv_cc.core;
if (cc) {
slowminfreq = ai_slowclk_freq(sih, false, cc);
fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
@@ -753,12 +681,9 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
struct si_info *sii;
struct bcma_device *cc;
- sii = (struct si_info *)sih;
-
- if (PCI_FORCEHT(sih))
- return mode == BCMA_CLKMODE_FAST;
+ sii = container_of(sih, struct si_info, pub);
- cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
+ cc = sii->icbus->drv_cc.core;
bcma_core_set_clockmode(cc, mode);
return mode == BCMA_CLKMODE_FAST;
}
@@ -766,16 +691,10 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
- struct bcma_device *cc;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
- if (PCI_FORCEHT(sih)) {
- cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
- bcma_core_set_clockmode(cc, BCMA_CLKMODE_FAST);
- }
-
- if (PCIE(sih))
+ if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
}
@@ -783,26 +702,20 @@ void ai_pci_up(struct si_pub *sih)
void ai_pci_down(struct si_pub *sih)
{
struct si_info *sii;
- struct bcma_device *cc;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
- /* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sih)) {
- cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
- bcma_core_set_clockmode(cc, BCMA_CLKMODE_DYNAMIC);
- }
-
- if (PCIE(sih))
+ if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
+ struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *cc;
- cc = ai_findcore(sih, CC_CORE_ID, 0);
+ cc = sii->icbus->drv_cc.core;
/* EPA Fix */
bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
@@ -814,7 +727,7 @@ bool ai_deviceremoved(struct si_pub *sih)
u32 w;
struct si_info *sii;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype != BCMA_HOSTTYPE_PCI)
return false;
@@ -825,15 +738,3 @@ bool ai_deviceremoved(struct si_pub *sih)
return false;
}
-
-uint ai_get_buscoretype(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
- return sii->buscore->id.id;
-}
-
-uint ai_get_buscorerev(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
- return sii->buscore->id.rev;
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index d9f04a6..89562c1 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -88,16 +88,6 @@
#define CLKD_OTP 0x000f0000
#define CLKD_OTP_SHIFT 16
-/* Package IDs */
-#define BCM4717_PKG_ID 9 /* 4717 package id */
-#define BCM4718_PKG_ID 10 /* 4718 package id */
-#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
-
-/* these are router chips */
-#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
-#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
-#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
-
/* dynamic clock control defines */
#define LPOMINFREQ 25000 /* low power oscillator min */
#define LPOMAXFREQ 43000 /* low power oscillator max */
@@ -168,7 +158,6 @@ struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
struct bcma_bus *icbus; /* handle to soc interconnect bus */
struct pci_dev *pcibus; /* handle to pci bus */
- struct bcma_device *buscore;
u32 chipst; /* chip status */
};
@@ -183,8 +172,6 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern struct bcma_device *ai_findcore(struct si_pub *sih,
- u16 coreid, u16 coreunit);
extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
@@ -193,7 +180,7 @@ extern void ai_detach(struct si_pub *sih);
extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
+extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
extern bool ai_deviceremoved(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
@@ -202,9 +189,6 @@ extern void ai_pci_up(struct si_pub *sih);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
-extern uint ai_get_buscoretype(struct si_pub *sih);
-extern uint ai_get_buscorerev(struct si_pub *sih);
-
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
return sih->cccaps;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 95b5902..be5bcfb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -663,9 +663,6 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
/* patch the first MPDU */
if (count == 1) {
u8 plcp0, plcp3, is40, sgi;
- struct ieee80211_sta *sta;
-
- sta = tx_info->control.sta;
if (rr) {
plcp0 = plcp[0];
@@ -735,10 +732,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
* a candidate for aggregation
*/
p = pktq_ppeek(&qi->q, prec);
- /* tx_info must be checked with current p */
- tx_info = IEEE80211_SKB_CB(p);
-
if (p) {
+ tx_info = IEEE80211_SKB_CB(p);
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
@@ -759,6 +754,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
p = NULL;
continue;
}
+ /* next packet fit for aggregation so dequeue */
p = brcmu_pktq_pdeq(&qi->q, prec);
} else {
p = NULL;
@@ -1196,8 +1192,8 @@ static bool cb_del_ampdu_pkt(struct sk_buff *mpdu, void *arg_a)
bool rc;
rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false;
- rc = rc && (tx_info->control.sta == NULL || ampdu_pars->sta == NULL ||
- tx_info->control.sta == ampdu_pars->sta);
+ rc = rc && (tx_info->rate_driver_data[0] == NULL || ampdu_pars->sta == NULL ||
+ tx_info->rate_driver_data[0] == ampdu_pars->sta);
rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid);
return rc;
}
@@ -1211,8 +1207,8 @@ static void dma_cb_fn_ampdu(void *txi, void *arg_a)
struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi;
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
- (tx_info->control.sta == sta || sta == NULL))
- tx_info->control.sta = NULL;
+ (tx_info->rate_driver_data[0] == sta || sta == NULL))
+ tx_info->rate_driver_data[0] = NULL;
}
/*
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index eb77ac3..9a4c63f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -15,7 +15,9 @@
*/
#include <linux/types.h>
+#include <net/cfg80211.h>
#include <net/mac80211.h>
+#include <net/regulatory.h>
#include <defs.h>
#include "pub.h"
@@ -23,73 +25,17 @@
#include "main.h"
#include "stf.h"
#include "channel.h"
+#include "mac80211_if.h"
/* QDB() macro takes a dB value and converts to a quarter dB value */
#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
-#define LOCALE_CHAN_01_11 (1<<0)
-#define LOCALE_CHAN_12_13 (1<<1)
-#define LOCALE_CHAN_14 (1<<2)
-#define LOCALE_SET_5G_LOW_JP1 (1<<3) /* 34-48, step 2 */
-#define LOCALE_SET_5G_LOW_JP2 (1<<4) /* 34-46, step 4 */
-#define LOCALE_SET_5G_LOW1 (1<<5) /* 36-48, step 4 */
-#define LOCALE_SET_5G_LOW2 (1<<6) /* 52 */
-#define LOCALE_SET_5G_LOW3 (1<<7) /* 56-64, step 4 */
-#define LOCALE_SET_5G_MID1 (1<<8) /* 100-116, step 4 */
-#define LOCALE_SET_5G_MID2 (1<<9) /* 120-124, step 4 */
-#define LOCALE_SET_5G_MID3 (1<<10) /* 128 */
-#define LOCALE_SET_5G_HIGH1 (1<<11) /* 132-140, step 4 */
-#define LOCALE_SET_5G_HIGH2 (1<<12) /* 149-161, step 4 */
-#define LOCALE_SET_5G_HIGH3 (1<<13) /* 165 */
-#define LOCALE_CHAN_52_140_ALL (1<<14)
-#define LOCALE_SET_5G_HIGH4 (1<<15) /* 184-216 */
-
-#define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | \
- LOCALE_SET_5G_LOW2 | \
- LOCALE_SET_5G_LOW3)
-#define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
-#define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2)
-#define LOCALE_CHAN_100_140 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | \
- LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1)
-#define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3)
-#define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4
-
-#define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | \
- LOCALE_CHAN_12_13 | \
- LOCALE_CHAN_14)
-
-#define LOCALE_RADAR_SET_NONE 0
-#define LOCALE_RADAR_SET_1 1
-
-#define LOCALE_RESTRICTED_NONE 0
-#define LOCALE_RESTRICTED_SET_2G_SHORT 1
-#define LOCALE_RESTRICTED_CHAN_165 2
-#define LOCALE_CHAN_ALL_5G 3
-#define LOCALE_RESTRICTED_JAPAN_LEGACY 4
-#define LOCALE_RESTRICTED_11D_2G 5
-#define LOCALE_RESTRICTED_11D_5G 6
-#define LOCALE_RESTRICTED_LOW_HI 7
-#define LOCALE_RESTRICTED_12_13_14 8
-
-#define LOCALE_2G_IDX_i 0
-#define LOCALE_5G_IDX_11 0
#define LOCALE_MIMO_IDX_bn 0
#define LOCALE_MIMO_IDX_11n 0
-/* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
-#define BRCMS_MAXPWR_TBL_SIZE 6
/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14
-/* power level in group of 2.4GHz band channels:
- * maxpwr[0] - CCK channels [1]
- * maxpwr[1] - CCK channels [2-10]
- * maxpwr[2] - CCK channels [11-14]
- * maxpwr[3] - OFDM channels [1]
- * maxpwr[4] - OFDM channels [2-10]
- * maxpwr[5] - OFDM channels [11-14]
- */
-
/* maxpwr mapping to 5GHz band channels:
* maxpwr[0] - channels [34-48]
* maxpwr[1] - channels [52-60]
@@ -101,16 +47,8 @@
#define LC(id) LOCALE_MIMO_IDX_ ## id
-#define LC_2G(id) LOCALE_2G_IDX_ ## id
-
-#define LC_5G(id) LOCALE_5G_IDX_ ## id
-
-#define LOCALES(band2, band5, mimo2, mimo5) \
- {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
-
-/* macro to get 2.4 GHz channel group index for tx power */
-#define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2))
-#define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5))
+#define LOCALES(mimo2, mimo5) \
+ {LC(mimo2), LC(mimo5)}
/* macro to get 5 GHz channel group index for tx power */
#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \
@@ -118,18 +56,37 @@
(((c) < 100) ? 2 : \
(((c) < 149) ? 3 : 4))))
-#define ISDFS_EU(fl) (((fl) & BRCMS_DFS_EU) == BRCMS_DFS_EU)
-
-struct brcms_cm_band {
- /* struct locale_info flags */
- u8 locale_flags;
- /* List of valid channels in the country */
- struct brcms_chanvec valid_channels;
- /* List of restricted use channels */
- const struct brcms_chanvec *restricted_channels;
- /* List of radar sensitive channels */
- const struct brcms_chanvec *radar_channels;
- u8 PAD[8];
+#define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0)
+#define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+#define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+#define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_DFS | \
+ NL80211_RRF_NO_IBSS)
+#define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_DFS | \
+ NL80211_RRF_NO_IBSS)
+#define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+static const struct ieee80211_regdomain brcms_regdom_x2 = {
+ .n_reg_rules = 7,
+ .alpha2 = "X2",
+ .reg_rules = {
+ BRCM_2GHZ_2412_2462,
+ BRCM_2GHZ_2467_2472,
+ BRCM_5GHZ_5180_5240,
+ BRCM_5GHZ_5260_5320,
+ BRCM_5GHZ_5500_5700,
+ BRCM_5GHZ_5745_5825,
+ }
};
/* locale per-channel tx power limits for MIMO frames
@@ -141,337 +98,23 @@ struct locale_mimo_info {
s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE];
/* tx 40 MHz power limits, qdBm units */
s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE];
- u8 flags;
};
/* Country names and abbreviations with locale defined from ISO 3166 */
struct country_info {
- const u8 locale_2G; /* 2.4G band locale */
- const u8 locale_5G; /* 5G band locale */
const u8 locale_mimo_2G; /* 2.4G mimo info */
const u8 locale_mimo_5G; /* 5G mimo info */
};
+struct brcms_regd {
+ struct country_info country;
+ const struct ieee80211_regdomain *regdomain;
+};
+
struct brcms_cm_info {
struct brcms_pub *pub;
struct brcms_c_info *wlc;
- char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */
- uint srom_regrev; /* Regulatory Rev for the SROM ccode */
- const struct country_info *country; /* current country def */
- char ccode[BRCM_CNTRY_BUF_SZ]; /* current internal Country Code */
- uint regrev; /* current Regulatory Revision */
- char country_abbrev[BRCM_CNTRY_BUF_SZ]; /* current advertised ccode */
- /* per-band state (one per phy/radio) */
- struct brcms_cm_band bandstate[MAXBANDS];
- /* quiet channels currently for radar sensitivity or 11h support */
- /* channels on which we cannot transmit */
- struct brcms_chanvec quiet_channels;
-};
-
-/* locale channel and power info. */
-struct locale_info {
- u32 valid_channels;
- /* List of radar sensitive channels */
- u8 radar_channels;
- /* List of channels used only if APs are detected */
- u8 restricted_channels;
- /* Max tx pwr in qdBm for each sub-band */
- s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE];
- /* Country IE advertised max tx pwr in dBm per sub-band */
- s8 pub_maxpwr[BAND_5G_PWR_LVLS];
- u8 flags;
-};
-
-/* Regulatory Matrix Spreadsheet (CLM) MIMO v3.7.9 */
-
-/*
- * Some common channel sets
- */
-
-/* No channels */
-static const struct brcms_chanvec chanvec_none = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* All 2.4 GHz HW channels */
-static const struct brcms_chanvec chanvec_all_2G = {
- {0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* All 5 GHz HW channels */
-static const struct brcms_chanvec chanvec_all_5G = {
- {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x11, 0x11,
- 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x20, 0x22, 0x22, 0x00, 0x00, 0x11,
- 0x11, 0x11, 0x11, 0x01}
-};
-
-/*
- * Radar channel sets
- */
-
-/* Channels 52 - 64, 100 - 140 */
-static const struct brcms_chanvec radar_set1 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */
- 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */
- 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/*
- * Restricted channel sets
- */
-
-/* Channels 34, 38, 42, 46 */
-static const struct brcms_chanvec restricted_set_japan_legacy = {
- {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 12, 13 */
-static const struct brcms_chanvec restricted_set_2g_short = {
- {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channel 165 */
-static const struct brcms_chanvec restricted_chan_165 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 36 - 48 & 149 - 165 */
-static const struct brcms_chanvec restricted_low_hi = {
- {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x20, 0x22, 0x22, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 12 - 14 */
-static const struct brcms_chanvec restricted_set_12_13_14 = {
- {0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* global memory to provide working buffer for expanded locale */
-
-static const struct brcms_chanvec *g_table_radar_set[] = {
- &chanvec_none,
- &radar_set1
-};
-
-static const struct brcms_chanvec *g_table_restricted_chan[] = {
- &chanvec_none, /* restricted_set_none */
- &restricted_set_2g_short,
- &restricted_chan_165,
- &chanvec_all_5G,
- &restricted_set_japan_legacy,
- &chanvec_all_2G, /* restricted_set_11d_2G */
- &chanvec_all_5G, /* restricted_set_11d_5G */
- &restricted_low_hi,
- &restricted_set_12_13_14
-};
-
-static const struct brcms_chanvec locale_2g_01_11 = {
- {0xfe, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_2g_12_13 = {
- {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_2g_14 = {
- {0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW_JP1 = {
- {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW_JP2 = {
- {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW1 = {
- {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_MID1 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_MID2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_MID3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_HIGH1 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x10, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_HIGH2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x20, 0x22, 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_HIGH3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_52_140_ALL = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_HIGH4 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x11, 0x11, 0x11, 0x11}
-};
-
-static const struct brcms_chanvec *g_table_locale_base[] = {
- &locale_2g_01_11,
- &locale_2g_12_13,
- &locale_2g_14,
- &locale_5g_LOW_JP1,
- &locale_5g_LOW_JP2,
- &locale_5g_LOW1,
- &locale_5g_LOW2,
- &locale_5g_LOW3,
- &locale_5g_MID1,
- &locale_5g_MID2,
- &locale_5g_MID3,
- &locale_5g_HIGH1,
- &locale_5g_HIGH2,
- &locale_5g_HIGH3,
- &locale_5g_52_140_ALL,
- &locale_5g_HIGH4
-};
-
-static void brcms_c_locale_add_channels(struct brcms_chanvec *target,
- const struct brcms_chanvec *channels)
-{
- u8 i;
- for (i = 0; i < sizeof(struct brcms_chanvec); i++)
- target->vec[i] |= channels->vec[i];
-}
-
-static void brcms_c_locale_get_channels(const struct locale_info *locale,
- struct brcms_chanvec *channels)
-{
- u8 i;
-
- memset(channels, 0, sizeof(struct brcms_chanvec));
-
- for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
- if (locale->valid_channels & (1 << i))
- brcms_c_locale_add_channels(channels,
- g_table_locale_base[i]);
- }
-}
-
-/*
- * Locale Definitions - 2.4 GHz
- */
-static const struct locale_info locale_i = { /* locale i. channel 1 - 13 */
- LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13,
- LOCALE_RADAR_SET_NONE,
- LOCALE_RESTRICTED_SET_2G_SHORT,
- {QDB(19), QDB(19), QDB(19),
- QDB(19), QDB(19), QDB(19)},
- {20, 20, 20, 0},
- BRCMS_EIRP
-};
-
-/*
- * Locale Definitions - 5 GHz
- */
-static const struct locale_info locale_11 = {
- /* locale 11. channel 36 - 48, 52 - 64, 100 - 140, 149 - 165 */
- LOCALE_CHAN_36_64 | LOCALE_CHAN_100_140 | LOCALE_CHAN_149_165,
- LOCALE_RADAR_SET_1,
- LOCALE_RESTRICTED_NONE,
- {QDB(21), QDB(21), QDB(21), QDB(21), QDB(21)},
- {23, 23, 23, 30, 30},
- BRCMS_EIRP | BRCMS_DFS_EU
-};
-
-static const struct locale_info *g_locale_2g_table[] = {
- &locale_i
-};
-
-static const struct locale_info *g_locale_5g_table[] = {
- &locale_11
+ const struct brcms_regd *world_regd;
};
/*
@@ -484,7 +127,6 @@ static const struct locale_mimo_info locale_bn = {
{0, 0, QDB(13), QDB(13), QDB(13),
QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
QDB(13), 0, 0},
- 0
};
static const struct locale_mimo_info *g_mimo_2g_table[] = {
@@ -497,114 +139,20 @@ static const struct locale_mimo_info *g_mimo_2g_table[] = {
static const struct locale_mimo_info locale_11n = {
{ /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)},
{QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)},
- 0
};
static const struct locale_mimo_info *g_mimo_5g_table[] = {
&locale_11n
};
-static const struct {
- char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */
- struct country_info country;
-} cntry_locales[] = {
+static const struct brcms_regd cntry_locales[] = {
+ /* Worldwide RoW 2, must always be at index 0 */
{
- "X2", LOCALES(i, 11, bn, 11n)}, /* Worldwide RoW 2 */
-};
-
-#ifdef SUPPORT_40MHZ
-/* 20MHz channel info for 40MHz pairing support */
-struct chan20_info {
- u8 sb;
- u8 adj_sbs;
+ .country = LOCALES(bn, 11n),
+ .regdomain = &brcms_regdom_x2,
+ },
};
-/* indicates adjacent channels that are allowed for a 40 Mhz channel and
- * those that permitted by the HT
- */
-struct chan20_info chan20_info[] = {
- /* 11b/11g */
-/* 0 */ {1, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 1 */ {2, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 2 */ {3, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 3 */ {4, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 4 */ {5, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 5 */ {6, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 6 */ {7, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 7 */ {8, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 8 */ {9, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 9 */ {10, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 10 */ {11, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 11 */ {12, (CH_LOWER_SB)},
-/* 12 */ {13, (CH_LOWER_SB)},
-/* 13 */ {14, (CH_LOWER_SB)},
-
-/* 11a japan high */
-/* 14 */ {34, (CH_UPPER_SB)},
-/* 15 */ {38, (CH_LOWER_SB)},
-/* 16 */ {42, (CH_LOWER_SB)},
-/* 17 */ {46, (CH_LOWER_SB)},
-
-/* 11a usa low */
-/* 18 */ {36, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 19 */ {40, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 20 */ {44, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 21 */ {48, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 22 */ {52, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 23 */ {56, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 24 */ {60, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 25 */ {64, (CH_LOWER_SB | CH_EWA_VALID)},
-
-/* 11a Europe */
-/* 26 */ {100, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 27 */ {104, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 28 */ {108, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 29 */ {112, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 30 */ {116, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 31 */ {120, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 32 */ {124, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 33 */ {128, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 34 */ {132, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 35 */ {136, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 36 */ {140, (CH_LOWER_SB)},
-
-/* 11a usa high, ref5 only */
-/* The 0x80 bit in pdiv means these are REF5, other entries are REF20 */
-/* 37 */ {149, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 38 */ {153, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 39 */ {157, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 40 */ {161, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 41 */ {165, (CH_LOWER_SB)},
-
-/* 11a japan */
-/* 42 */ {184, (CH_UPPER_SB)},
-/* 43 */ {188, (CH_LOWER_SB)},
-/* 44 */ {192, (CH_UPPER_SB)},
-/* 45 */ {196, (CH_LOWER_SB)},
-/* 46 */ {200, (CH_UPPER_SB)},
-/* 47 */ {204, (CH_LOWER_SB)},
-/* 48 */ {208, (CH_UPPER_SB)},
-/* 49 */ {212, (CH_LOWER_SB)},
-/* 50 */ {216, (CH_LOWER_SB)}
-};
-#endif /* SUPPORT_40MHZ */
-
-static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx)
-{
- if (locale_idx >= ARRAY_SIZE(g_locale_2g_table))
- return NULL; /* error condition */
-
- return g_locale_2g_table[locale_idx];
-}
-
-static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx)
-{
- if (locale_idx >= ARRAY_SIZE(g_locale_5g_table))
- return NULL; /* error condition */
-
- return g_locale_5g_table[locale_idx];
-}
-
static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table))
@@ -621,13 +169,6 @@ static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx)
return g_mimo_5g_table[locale_idx];
}
-static int
-brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
- char *mapped_ccode, uint *mapped_regrev)
-{
- return false;
-}
-
/*
* Indicates whether the country provided is valid to pass
* to cfg80211 or not.
@@ -662,155 +203,24 @@ static bool brcms_c_country_valid(const char *ccode)
return true;
}
-/* Lookup a country info structure from a null terminated country
- * abbreviation and regrev directly with no translation.
- */
-static const struct country_info *
-brcms_c_country_lookup_direct(const char *ccode, uint regrev)
+static const struct brcms_regd *brcms_world_regd(const char *regdom, int len)
{
- uint size, i;
-
- /* Should just return 0 for single locale driver. */
- /* Keep it this way in case we add more locales. (for now anyway) */
-
- /*
- * all other country def arrays are for regrev == 0, so if
- * regrev is non-zero, fail
- */
- if (regrev > 0)
- return NULL;
-
- /* find matched table entry from country code */
- size = ARRAY_SIZE(cntry_locales);
- for (i = 0; i < size; i++) {
- if (strcmp(ccode, cntry_locales[i].abbrev) == 0)
- return &cntry_locales[i].country;
- }
- return NULL;
-}
-
-static const struct country_info *
-brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm, const char *ccode,
- char *mapped_ccode, uint *mapped_regrev)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- const struct country_info *country;
- uint srom_regrev = wlc_cm->srom_regrev;
- const char *srom_ccode = wlc_cm->srom_ccode;
- int mapped;
-
- /* check for currently supported ccode size */
- if (strlen(ccode) > (BRCM_CNTRY_BUF_SZ - 1)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: ccode \"%s\" too long for "
- "match\n", wlc->pub->unit, __func__, ccode);
- return NULL;
- }
-
- /* default mapping is the given ccode and regrev 0 */
- strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
- *mapped_regrev = 0;
-
- /* If the desired country code matches the srom country code,
- * then the mapped country is the srom regulatory rev.
- * Otherwise look for an aggregate mapping.
- */
- if (!strcmp(srom_ccode, ccode)) {
- *mapped_regrev = srom_regrev;
- mapped = 0;
- wiphy_err(wlc->wiphy, "srom_code == ccode %s\n", __func__);
- } else {
- mapped =
- brcms_c_country_aggregate_map(wlc_cm, ccode, mapped_ccode,
- mapped_regrev);
- }
-
- /* find the matching built-in country definition */
- country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
-
- /* if there is not an exact rev match, default to rev zero */
- if (country == NULL && *mapped_regrev != 0) {
- *mapped_regrev = 0;
- country =
- brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
- }
-
- return country;
-}
-
-/* Lookup a country info structure from a null terminated country code
- * The lookup is case sensitive.
- */
-static const struct country_info *
-brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode)
-{
- const struct country_info *country;
- char mapped_ccode[BRCM_CNTRY_BUF_SZ];
- uint mapped_regrev;
-
- /*
- * map the country code to a built-in country code, regrev, and
- * country_info struct
- */
- country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode,
- &mapped_regrev);
-
- return country;
-}
-
-/*
- * reset the quiet channels vector to the union
- * of the restricted and radar channel sets
- */
-static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint i, j;
- struct brcms_band *band;
- const struct brcms_chanvec *chanvec;
-
- memset(&wlc_cm->quiet_channels, 0, sizeof(struct brcms_chanvec));
-
- band = wlc->band;
- for (i = 0; i < wlc->pub->_nbands;
- i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
-
- /* initialize quiet channels for restricted channels */
- chanvec = wlc_cm->bandstate[band->bandunit].restricted_channels;
- for (j = 0; j < sizeof(struct brcms_chanvec); j++)
- wlc_cm->quiet_channels.vec[j] |= chanvec->vec[j];
+ const struct brcms_regd *regd = NULL;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(cntry_locales); i++) {
+ if (!strncmp(regdom, cntry_locales[i].regdomain->alpha2, len)) {
+ regd = &cntry_locales[i];
+ break;
+ }
}
-}
-
-/* Is the channel valid for the current locale and current band? */
-static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- return ((val < MAXCHANNEL) &&
- isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
- val));
+ return regd;
}
-/* Is the channel valid for the current locale and specified band? */
-static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit, uint val)
-{
- return ((val < MAXCHANNEL)
- && isset(wlc_cm->bandstate[bandunit].valid_channels.vec, val));
-}
-
-/* Is the channel valid for the current locale? (but don't consider channels not
- * available due to bandlocking)
- */
-static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val)
+static const struct brcms_regd *brcms_default_world_regd(void)
{
- struct brcms_c_info *wlc = wlc_cm->wlc;
-
- return brcms_c_valid_channel20(wlc->cmi, val) ||
- (!wlc->bandlocked
- && brcms_c_valid_channel20_in_band(wlc->cmi,
- OTHERBANDUNIT(wlc), val));
+ return &cntry_locales[0];
}
/* JP, J1 - J10 are Japan ccodes */
@@ -820,12 +230,6 @@ static bool brcms_c_japan_ccode(const char *ccode)
(ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9')));
}
-/* Returns true if currently set country is Japan or variant */
-static bool brcms_c_japan(struct brcms_c_info *wlc)
-{
- return brcms_c_japan_ccode(wlc->cmi->country_abbrev);
-}
-
static void
brcms_c_channel_min_txpower_limits_with_local_constraint(
struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
@@ -901,140 +305,16 @@ brcms_c_channel_min_txpower_limits_with_local_constraint(
}
-/* Update the radio state (enable/disable) and tx power targets
- * based on a new set of channel/regulatory information
- */
-static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint chan;
- struct txpwr_limits txpwr;
-
- /* search for the existence of any valid channel */
- for (chan = 0; chan < MAXCHANNEL; chan++) {
- if (brcms_c_valid_channel20_db(wlc->cmi, chan))
- break;
- }
- if (chan == MAXCHANNEL)
- chan = INVCHANNEL;
-
- /*
- * based on the channel search above, set or
- * clear WL_RADIO_COUNTRY_DISABLE.
- */
- if (chan == INVCHANNEL) {
- /*
- * country/locale with no valid channels, set
- * the radio disable bit
- */
- mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
- wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\" "
- "nbands %d bandlocked %d\n", wlc->pub->unit,
- __func__, wlc_cm->country_abbrev, wlc->pub->_nbands,
- wlc->bandlocked);
- } else if (mboolisset(wlc->pub->radio_disabled,
- WL_RADIO_COUNTRY_DISABLE)) {
- /*
- * country/locale with valid channel, clear
- * the radio disable bit
- */
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
- }
-
- /*
- * Now that the country abbreviation is set, if the radio supports 2G,
- * then set channel 14 restrictions based on the new locale.
- */
- if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
- wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
- brcms_c_japan(wlc) ? true :
- false);
-
- if (wlc->pub->up && chan != INVCHANNEL) {
- brcms_c_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
- brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm,
- &txpwr, BRCMS_TXPWR_MAX);
- wlc_phy_txpower_limit_set(wlc->band->pi, &txpwr, wlc->chanspec);
- }
-}
-
-static int
-brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
- const struct country_info *country)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint i, j;
- struct brcms_band *band;
- const struct locale_info *li;
- struct brcms_chanvec sup_chan;
- const struct locale_mimo_info *li_mimo;
-
- band = wlc->band;
- for (i = 0; i < wlc->pub->_nbands;
- i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
-
- li = (band->bandtype == BRCM_BAND_5G) ?
- brcms_c_get_locale_5g(country->locale_5G) :
- brcms_c_get_locale_2g(country->locale_2G);
- wlc_cm->bandstate[band->bandunit].locale_flags = li->flags;
- li_mimo = (band->bandtype == BRCM_BAND_5G) ?
- brcms_c_get_mimo_5g(country->locale_mimo_5G) :
- brcms_c_get_mimo_2g(country->locale_mimo_2G);
-
- /* merge the mimo non-mimo locale flags */
- wlc_cm->bandstate[band->bandunit].locale_flags |=
- li_mimo->flags;
-
- wlc_cm->bandstate[band->bandunit].restricted_channels =
- g_table_restricted_chan[li->restricted_channels];
- wlc_cm->bandstate[band->bandunit].radar_channels =
- g_table_radar_set[li->radar_channels];
-
- /*
- * set the channel availability, masking out the channels
- * that may not be supported on this phy.
- */
- wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
- &sup_chan);
- brcms_c_locale_get_channels(li,
- &wlc_cm->bandstate[band->bandunit].
- valid_channels);
- for (j = 0; j < sizeof(struct brcms_chanvec); j++)
- wlc_cm->bandstate[band->bandunit].valid_channels.
- vec[j] &= sup_chan.vec[j];
- }
-
- brcms_c_quiet_channels_reset(wlc_cm);
- brcms_c_channels_commit(wlc_cm);
-
- return 0;
-}
-
/*
* set the driver's current country and regulatory information
* using a country code as the source. Look up built in country
* information found with the country code.
*/
static void
-brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
- const char *country_abbrev,
- const char *ccode, uint regrev,
- const struct country_info *country)
+brcms_c_set_country(struct brcms_cm_info *wlc_cm,
+ const struct brcms_regd *regd)
{
- const struct locale_info *locale;
struct brcms_c_info *wlc = wlc_cm->wlc;
- char prev_country_abbrev[BRCM_CNTRY_BUF_SZ];
-
- /* save current country state */
- wlc_cm->country = country;
-
- memset(&prev_country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
- strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
- BRCM_CNTRY_BUF_SZ - 1);
-
- strncpy(wlc_cm->country_abbrev, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
- strncpy(wlc_cm->ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
- wlc_cm->regrev = regrev;
if ((wlc->pub->_n_enab & SUPPORT_11N) !=
wlc->protection->nmode_user)
@@ -1042,75 +322,19 @@ brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
- /* set or restore gmode as required by regulatory */
- locale = brcms_c_get_locale_2g(country->locale_2G);
- if (locale && (locale->flags & BRCMS_NO_OFDM))
- brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
- else
- brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
- brcms_c_channels_init(wlc_cm, country);
+ brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
return;
}
-static int
-brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
- const char *country_abbrev,
- const char *ccode, int regrev)
-{
- const struct country_info *country;
- char mapped_ccode[BRCM_CNTRY_BUF_SZ];
- uint mapped_regrev;
-
- /* if regrev is -1, lookup the mapped country code,
- * otherwise use the ccode and regrev directly
- */
- if (regrev == -1) {
- /*
- * map the country code to a built-in country
- * code, regrev, and country_info
- */
- country =
- brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode,
- &mapped_regrev);
- } else {
- /* find the matching built-in country definition */
- country = brcms_c_country_lookup_direct(ccode, regrev);
- strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
- mapped_regrev = regrev;
- }
-
- if (country == NULL)
- return -EINVAL;
-
- /* set the driver state for the country */
- brcms_c_set_country_common(wlc_cm, country_abbrev, mapped_ccode,
- mapped_regrev, country);
-
- return 0;
-}
-
-/*
- * set the driver's current country and regulatory information using
- * a country code as the source. Lookup built in country information
- * found with the country code.
- */
-static int
-brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode)
-{
- char country_abbrev[BRCM_CNTRY_BUF_SZ];
- strncpy(country_abbrev, ccode, BRCM_CNTRY_BUF_SZ);
- return brcms_c_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1);
-}
-
struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
{
struct brcms_cm_info *wlc_cm;
- char country_abbrev[BRCM_CNTRY_BUF_SZ];
- const struct country_info *country;
struct brcms_pub *pub = wlc->pub;
struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
+ const char *ccode = sprom->alpha2;
+ int ccode_len = sizeof(sprom->alpha2);
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
@@ -1122,24 +346,27 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
wlc->cmi = wlc_cm;
/* store the country code for passing up as a regulatory hint */
- if (sprom->alpha2 && brcms_c_country_valid(sprom->alpha2))
- strncpy(wlc->pub->srom_ccode, sprom->alpha2, sizeof(sprom->alpha2));
+ wlc_cm->world_regd = brcms_world_regd(ccode, ccode_len);
+ if (brcms_c_country_valid(ccode))
+ strncpy(wlc->pub->srom_ccode, ccode, ccode_len);
/*
- * internal country information which must match
- * regulatory constraints in firmware
+ * If no custom world domain is found in the SROM, use the
+ * default "X2" domain.
*/
- memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
- strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1);
- country = brcms_c_country_lookup(wlc, country_abbrev);
+ if (!wlc_cm->world_regd) {
+ wlc_cm->world_regd = brcms_default_world_regd();
+ ccode = wlc_cm->world_regd->regdomain->alpha2;
+ ccode_len = BRCM_CNTRY_BUF_SZ - 1;
+ }
/* save default country for exiting 11d regulatory mode */
- strncpy(wlc->country_default, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
+ strncpy(wlc->country_default, ccode, ccode_len);
/* initialize autocountry_default to driver default */
- strncpy(wlc->autocountry_default, "X2", BRCM_CNTRY_BUF_SZ - 1);
+ strncpy(wlc->autocountry_default, ccode, ccode_len);
- brcms_c_set_countrycode(wlc_cm, country_abbrev);
+ brcms_c_set_country(wlc_cm, wlc_cm->world_regd);
return wlc_cm;
}
@@ -1149,31 +376,15 @@ void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm)
kfree(wlc_cm);
}
-u8
-brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit)
-{
- return wlc_cm->bandstate[bandunit].locale_flags;
-}
-
-static bool
-brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, u16 chspec)
-{
- return (wlc_cm->wlc->pub->_n_enab & SUPPORT_11N) &&
- CHSPEC_IS40(chspec) ?
- (isset(wlc_cm->quiet_channels.vec,
- lower_20_sb(CHSPEC_CHANNEL(chspec))) ||
- isset(wlc_cm->quiet_channels.vec,
- upper_20_sb(CHSPEC_CHANNEL(chspec)))) :
- isset(wlc_cm->quiet_channels.vec, CHSPEC_CHANNEL(chspec));
-}
-
void
brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
u8 local_constraint_qdbm)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
+ struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
+ const struct ieee80211_reg_rule *reg_rule;
struct txpwr_limits txpwr;
+ int ret;
brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
@@ -1181,8 +392,15 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
wlc_cm, &txpwr, local_constraint_qdbm
);
+ /* set or restore gmode as required by regulatory */
+ ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule);
+ if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
+ brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
+ else
+ brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
+
brcms_b_set_chanspec(wlc->hw, chanspec,
- (brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0),
+ !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN),
&txpwr);
}
@@ -1191,15 +409,14 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
struct txpwr_limits *txpwr)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
+ struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
uint i;
uint chan;
int maxpwr;
int delta;
const struct country_info *country;
struct brcms_band *band;
- const struct locale_info *li;
int conducted_max = BRCMS_TXPWR_MAX;
- int conducted_ofdm_max = BRCMS_TXPWR_MAX;
const struct locale_mimo_info *li_mimo;
int maxpwr20, maxpwr40;
int maxpwr_idx;
@@ -1207,67 +424,35 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
memset(txpwr, 0, sizeof(struct txpwr_limits));
- if (!brcms_c_valid_chanspec_db(wlc_cm, chanspec)) {
- country = brcms_c_country_lookup(wlc, wlc->autocountry_default);
- if (country == NULL)
- return;
- } else {
- country = wlc_cm->country;
- }
+ if (WARN_ON(!ch))
+ return;
+
+ country = &wlc_cm->world_regd->country;
chan = CHSPEC_CHANNEL(chanspec);
band = wlc->bandstate[chspec_bandunit(chanspec)];
- li = (band->bandtype == BRCM_BAND_5G) ?
- brcms_c_get_locale_5g(country->locale_5G) :
- brcms_c_get_locale_2g(country->locale_2G);
-
li_mimo = (band->bandtype == BRCM_BAND_5G) ?
brcms_c_get_mimo_5g(country->locale_mimo_5G) :
brcms_c_get_mimo_2g(country->locale_mimo_2G);
- if (li->flags & BRCMS_EIRP) {
- delta = band->antgain;
- } else {
- delta = 0;
- if (band->antgain > QDB(6))
- delta = band->antgain - QDB(6); /* Excess over 6 dB */
- }
+ delta = band->antgain;
- if (li == &locale_i) {
+ if (band->bandtype == BRCM_BAND_2G)
conducted_max = QDB(22);
- conducted_ofdm_max = QDB(22);
- }
+
+ maxpwr = QDB(ch->max_power) - delta;
+ maxpwr = max(maxpwr, 0);
+ maxpwr = min(maxpwr, conducted_max);
/* CCK txpwr limits for 2.4G band */
if (band->bandtype == BRCM_BAND_2G) {
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_CCK(chan)];
-
- maxpwr = maxpwr - delta;
- maxpwr = max(maxpwr, 0);
- maxpwr = min(maxpwr, conducted_max);
-
for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
txpwr->cck[i] = (u8) maxpwr;
}
- /* OFDM txpwr limits for 2.4G or 5G bands */
- if (band->bandtype == BRCM_BAND_2G)
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_OFDM(chan)];
- else
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_5G(chan)];
-
- maxpwr = maxpwr - delta;
- maxpwr = max(maxpwr, 0);
- maxpwr = min(maxpwr, conducted_ofdm_max);
-
- /* Keep OFDM lmit below CCK limit */
- if (band->bandtype == BRCM_BAND_2G)
- maxpwr = min_t(int, maxpwr, txpwr->cck[0]);
-
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
txpwr->ofdm[i] = (u8) maxpwr;
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
/*
* OFDM 40 MHz SISO has the same power as the corresponding
* MCS0-7 rate unless overriden by the locale specific code.
@@ -1282,14 +467,9 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
txpwr->ofdm_40_cdd[i] = 0;
}
- /* MIMO/HT specific limits */
- if (li_mimo->flags & BRCMS_EIRP) {
- delta = band->antgain;
- } else {
- delta = 0;
- if (band->antgain > QDB(6))
- delta = band->antgain - QDB(6); /* Excess over 6 dB */
- }
+ delta = 0;
+ if (band->antgain > QDB(6))
+ delta = band->antgain - QDB(6); /* Excess over 6 dB */
if (band->bandtype == BRCM_BAND_2G)
maxpwr_idx = (chan - 1);
@@ -1431,8 +611,7 @@ static bool brcms_c_chspec_malformed(u16 chanspec)
* and they are also a legal HT combination
*/
static bool
-brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
- bool dualband)
+brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
u8 channel = CHSPEC_CHANNEL(chspec);
@@ -1448,59 +627,163 @@ brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
chspec_bandunit(chspec))
return false;
- /* Check a 20Mhz channel */
- if (CHSPEC_IS20(chspec)) {
- if (dualband)
- return brcms_c_valid_channel20_db(wlc_cm->wlc->cmi,
- channel);
- else
- return brcms_c_valid_channel20(wlc_cm->wlc->cmi,
- channel);
+ return true;
+}
+
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec)
+{
+ return brcms_c_valid_chanspec_ext(wlc_cm, chspec);
+}
+
+static bool brcms_is_radar_freq(u16 center_freq)
+{
+ return center_freq >= 5260 && center_freq <= 5700;
+}
+
+static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ int i;
+
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ if (!brcms_is_radar_freq(ch->center_freq))
+ continue;
+
+ /*
+ * All channels in this range should be passive and have
+ * DFS enabled.
+ */
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+ ch->flags |= IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN;
}
-#ifdef SUPPORT_40MHZ
- /*
- * We know we are now checking a 40MHZ channel, so we should
- * only be here for NPHYS
- */
- if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) {
- u8 upper_sideband = 0, idx;
- u8 num_ch20_entries =
- sizeof(chan20_info) / sizeof(struct chan20_info);
-
- if (!VALID_40CHANSPEC_IN_BAND(wlc, chspec_bandunit(chspec)))
- return false;
-
- if (dualband) {
- if (!brcms_c_valid_channel20_db(wlc->cmi,
- lower_20_sb(channel)) ||
- !brcms_c_valid_channel20_db(wlc->cmi,
- upper_20_sb(channel)))
- return false;
- } else {
- if (!brcms_c_valid_channel20(wlc->cmi,
- lower_20_sb(channel)) ||
- !brcms_c_valid_channel20(wlc->cmi,
- upper_20_sb(channel)))
- return false;
+}
+
+static void
+brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ const struct ieee80211_reg_rule *rule;
+ int band, i, ret;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ sband = wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ if (ch->flags &
+ (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR))
+ continue;
+
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+ ret = freq_reg_info(wiphy, ch->center_freq,
+ 0, &rule);
+ if (ret)
+ continue;
+
+ if (!(rule->flags & NL80211_RRF_NO_IBSS))
+ ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
+ if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN))
+ ch->flags &=
+ ~IEEE80211_CHAN_PASSIVE_SCAN;
+ } else if (ch->beacon_found) {
+ ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN);
+ }
}
+ }
+}
- /* find the lower sideband info in the sideband array */
- for (idx = 0; idx < num_ch20_entries; idx++) {
- if (chan20_info[idx].sb == lower_20_sb(channel))
- upper_sideband = chan20_info[idx].adj_sbs;
+static int brcms_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct brcms_info *wl = hw->priv;
+ struct brcms_c_info *wlc = wl->wlc;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ int band, i;
+ bool ch_found = false;
+
+ brcms_reg_apply_radar_flags(wiphy);
+
+ if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
+ brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
+
+ /* Disable radio if all channels disallowed by regulatory */
+ for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
+ sband = wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (i = 0; !ch_found && i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+ ch_found = true;
}
- /* check that the lower sideband allows an upper sideband */
- if ((upper_sideband & (CH_UPPER_SB | CH_EWA_VALID)) ==
- (CH_UPPER_SB | CH_EWA_VALID))
- return true;
- return false;
}
-#endif /* 40 MHZ */
- return false;
+ if (ch_found) {
+ mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
+ } else {
+ mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
+ wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\"\n",
+ wlc->pub->unit, __func__, request->alpha2);
+ }
+
+ if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
+ wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
+ brcms_c_japan_ccode(request->alpha2));
+
+ return 0;
}
-bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec)
+void brcms_c_regd_init(struct brcms_c_info *wlc)
{
- return brcms_c_valid_chanspec_ext(wlc_cm, chspec, true);
+ struct wiphy *wiphy = wlc->wiphy;
+ const struct brcms_regd *regd = wlc->cmi->world_regd;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ struct brcms_chanvec sup_chan;
+ struct brcms_band *band;
+ int band_idx, i;
+
+ /* Disable any channels not supported by the phy */
+ for (band_idx = 0; band_idx < wlc->pub->_nbands; band_idx++) {
+ band = wlc->bandstate[band_idx];
+
+ wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
+ &sup_chan);
+
+ if (band_idx == BAND_2G_INDEX)
+ sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (!isset(sup_chan.vec, ch->hw_value))
+ ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+ }
+
+ wlc->wiphy->reg_notifier = brcms_reg_notifier;
+ wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_STRICT_REGULATORY;
+ wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain);
+ brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 808cb4f..006483a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -37,9 +37,6 @@ brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
-extern u8 brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit);
-
extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
u16 chspec);
@@ -49,5 +46,6 @@ extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
u16 chanspec,
u8 local_constraint_qdbm);
+extern void brcms_c_regd_init(struct brcms_c_info *wlc);
#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 11054ae..5e53305 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -573,6 +573,7 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
struct dma_info *di;
u8 rev = core->id.rev;
uint size;
+ struct si_info *sii = container_of(sih, struct si_info, pub);
/* allocate private info structure */
di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
@@ -633,16 +634,20 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
di->ddoffsetlow = 0;
di->dataoffsetlow = 0;
- /* add offset for pcie with DMA64 bus */
- di->ddoffsetlow = 0;
- di->ddoffsethigh = SI_PCIE_DMA_H32;
+ /* for pci bus, add offset */
+ if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) {
+ /* add offset for pcie with DMA64 bus */
+ di->ddoffsetlow = 0;
+ di->ddoffsethigh = SI_PCIE_DMA_H32;
+ }
di->dataoffsetlow = di->ddoffsetlow;
di->dataoffsethigh = di->ddoffsethigh;
+
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
- if ((core->id.id == SDIOD_CORE_ID)
+ if ((core->id.id == BCMA_CORE_SDIO_DEV)
&& ((rev > 0) && (rev <= 2)))
di->addrext = false;
- else if ((core->id.id == I2S_CORE_ID) &&
+ else if ((core->id.id == BCMA_CORE_I2S) &&
((rev == 0) || (rev == 1)))
di->addrext = false;
else
@@ -1433,7 +1438,7 @@ void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
struct ieee80211_tx_info *tx_info;
while (i != end) {
- skb = (struct sk_buff *)di->txp[i];
+ skb = di->txp[i];
if (skb != NULL) {
tx_info = (struct ieee80211_tx_info *)skb->cb;
(callback_fnc)(tx_info, arg_a);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 50f92a0..9e79d47 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -267,6 +267,7 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct brcms_info *wl = hw->priv;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
spin_lock_bh(&wl->lock);
if (!wl->pub->up) {
@@ -275,6 +276,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto done;
}
brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
+ tx_info->rate_driver_data[0] = tx_info->control.sta;
done:
spin_unlock_bh(&wl->lock);
}
@@ -319,8 +321,7 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
return;
spin_lock_bh(&wl->lock);
- status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
- wl->wlc->hw->deviceid);
+ status = brcms_c_chipmatch(wl->wlc->hw->d11core);
spin_unlock_bh(&wl->lock);
if (!status) {
wiphy_err(wl->wiphy,
@@ -721,14 +722,6 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush,
};
-/*
- * is called in brcms_bcma_probe() context, therefore no locking required.
- */
-static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
-{
- return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
-}
-
void brcms_dpc(unsigned long data)
{
struct brcms_info *wl;
@@ -1058,6 +1051,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
goto fail;
}
+ brcms_c_regd_init(wl->wlc);
+
memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN);
if (WARN_ON(!is_valid_ether_addr(perm)))
goto fail;
@@ -1068,9 +1063,9 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
"%d\n", __func__, err);
- if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode))
- wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
- __func__, err);
+ if (wl->pub->srom_ccode[0] &&
+ regulatory_hint(wl->wiphy, wl->pub->srom_ccode))
+ wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__);
n_adapters_found++;
return wl;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 19db405..03ca653 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -18,6 +18,7 @@
#include <linux/pci_ids.h>
#include <linux/if_ether.h>
+#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <brcm_hw_ids.h>
#include <aiutils.h>
@@ -268,7 +269,7 @@ struct brcms_c_bit_desc {
*/
/* Starting corerev for the fifo size table */
-#define XMTFIFOTBL_STARTREV 20
+#define XMTFIFOTBL_STARTREV 17
struct d11init {
__le16 addr;
@@ -332,6 +333,12 @@ const u8 wlc_prio2prec_map[] = {
};
static const u16 xmtfifo_sz[][NFIFO] = {
+ /* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */
+ {20, 192, 192, 21, 17, 5},
+ /* corerev 18: */
+ {0, 0, 0, 0, 0, 0},
+ /* corerev 19: */
+ {0, 0, 0, 0, 0, 0},
/* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
/* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */
@@ -342,6 +349,14 @@ static const u16 xmtfifo_sz[][NFIFO] = {
{20, 192, 192, 21, 17, 5},
/* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
{9, 58, 22, 14, 14, 5},
+ /* corerev 25: */
+ {0, 0, 0, 0, 0, 0},
+ /* corerev 26: */
+ {0, 0, 0, 0, 0, 0},
+ /* corerev 27: */
+ {0, 0, 0, 0, 0, 0},
+ /* corerev 28: 2304, 14848, 5632, 3584, 3584, 1280 */
+ {9, 58, 22, 14, 14, 5},
};
#ifdef DEBUG
@@ -878,7 +893,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info = IEEE80211_SKB_CB(p);
h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
- if (tx_info->control.sta)
+ if (tx_info->rate_driver_data[0])
scb = &wlc->pri_scb;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
@@ -1941,7 +1956,8 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
* accesses phyreg throughput mac. This can be skipped since
* only mac reg is accessed below
*/
- flags |= SICF_PCLKE;
+ if (D11REV_GE(wlc_hw->corerev, 18))
+ flags |= SICF_PCLKE;
/*
* TODO: test suspend/resume
@@ -2022,7 +2038,8 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
* phyreg throughput mac, AND phy_reset is skipped at early stage when
* band->pi is invalid. need to enable PHY CLK
*/
- flags |= SICF_PCLKE;
+ if (D11REV_GE(wlc_hw->corerev, 18))
+ flags |= SICF_PCLKE;
/*
* reset the core
@@ -2125,8 +2142,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
struct bcma_device *core = wlc_hw->d11core;
- if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
- (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43224) ||
+ (ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
@@ -2790,7 +2807,7 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
tmp = 0;
if (on) {
- if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+ if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
bcma_set32(core, D11REGOFFS(clk_ctl_st),
CCS_ERSRC_REQ_HT |
CCS_ERSRC_REQ_D11PLL |
@@ -3139,20 +3156,6 @@ void brcms_c_reset(struct brcms_c_info *wlc)
brcms_b_reset(wlc->hw);
}
-/* Return the channel the driver should initialize during brcms_c_init.
- * the channel may have to be changed from the currently configured channel
- * if other configurations are in conflict (bandlocked, 11n mode disabled,
- * invalid channel for current country, etc.)
- */
-static u16 brcms_c_init_chanspec(struct brcms_c_info *wlc)
-{
- u16 chanspec =
- 1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
- WL_CHANSPEC_BAND_2G;
-
- return chanspec;
-}
-
void brcms_c_init_scb(struct scb *scb)
{
int i;
@@ -4231,9 +4234,8 @@ static void brcms_c_radio_timer(void *arg)
}
/* common low-level watchdog code */
-static void brcms_b_watchdog(void *arg)
+static void brcms_b_watchdog(struct brcms_c_info *wlc)
{
- struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
struct brcms_hardware *wlc_hw = wlc->hw;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
@@ -4254,10 +4256,8 @@ static void brcms_b_watchdog(void *arg)
}
/* common watchdog code */
-static void brcms_c_watchdog(void *arg)
+static void brcms_c_watchdog(struct brcms_c_info *wlc)
{
- struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
-
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
if (!wlc->pub->up)
@@ -4297,7 +4297,9 @@ static void brcms_c_watchdog(void *arg)
static void brcms_c_watchdog_by_timer(void *arg)
{
- brcms_c_watchdog(arg);
+ struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
+
+ brcms_c_watchdog(wlc);
}
static bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit)
@@ -4467,11 +4469,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
}
/* verify again the device is supported */
- if (core->bus->hosttype == BCMA_HOSTTYPE_PCI &&
- !brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
- "vendor/device (0x%x/0x%x)\n",
- unit, pcidev->vendor, pcidev->device);
+ if (!brcms_c_chipmatch(core)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported device\n",
+ unit);
err = 12;
goto fail;
}
@@ -4541,7 +4541,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
else
wlc_hw->_nbands = 1;
- if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
+ if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4608,8 +4608,12 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
wlc_hw->machwcap_backup = wlc_hw->machwcap;
/* init tx fifo size */
+ WARN_ON((wlc_hw->corerev - XMTFIFOTBL_STARTREV) < 0 ||
+ (wlc_hw->corerev - XMTFIFOTBL_STARTREV) >
+ ARRAY_SIZE(xmtfifo_sz));
wlc_hw->xmtfifo_sz =
xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)];
+ WARN_ON(!wlc_hw->xmtfifo_sz[0]);
/* Get a phy for this band */
wlc_hw->band->pi =
@@ -5049,7 +5053,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
- && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5129,6 +5133,8 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
/* make interface operational */
int brcms_c_up(struct brcms_c_info *wlc)
{
+ struct ieee80211_channel *ch;
+
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
/* HW is turned off so don't try to access it */
@@ -5141,7 +5147,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
}
if ((wlc->pub->boardflags & BFL_FEM)
- && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc->hw->sih) == BCMA_CHIP_ID_BCM4313)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT))
brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5195,8 +5201,9 @@ int brcms_c_up(struct brcms_c_info *wlc)
wlc->pub->up = true;
if (wlc->bandinit_pending) {
+ ch = wlc->pub->ieee_hw->conf.channel;
brcms_c_suspend_mac_and_wait(wlc);
- brcms_c_set_chanspec(wlc, wlc->default_bss->chanspec);
+ brcms_c_set_chanspec(wlc, ch20mhz_chspec(ch->hw_value));
wlc->bandinit_pending = false;
brcms_c_enable_mac(wlc);
}
@@ -5397,11 +5404,6 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
else
return -EINVAL;
- /* Legacy or bust when no OFDM is supported by regulatory */
- if ((brcms_c_channel_locale_flags_in_band(wlc->cmi, band->bandunit) &
- BRCMS_NO_OFDM) && (gmode != GMODE_LEGACY_B))
- return -EINVAL;
-
/* update configuration value */
if (config)
brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
@@ -5782,8 +5784,12 @@ void brcms_c_print_txstatus(struct tx_status *txs)
(txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
}
-bool brcms_c_chipmatch(u16 vendor, u16 device)
+static bool brcms_c_chipmatch_pci(struct bcma_device *core)
{
+ struct pci_dev *pcidev = core->bus->host_pci;
+ u16 vendor = pcidev->vendor;
+ u16 device = pcidev->device;
+
if (vendor != PCI_VENDOR_ID_BROADCOM) {
pr_err("unknown vendor id %04x\n", vendor);
return false;
@@ -5802,6 +5808,30 @@ bool brcms_c_chipmatch(u16 vendor, u16 device)
return false;
}
+static bool brcms_c_chipmatch_soc(struct bcma_device *core)
+{
+ struct bcma_chipinfo *chipinfo = &core->bus->chipinfo;
+
+ if (chipinfo->id == BCMA_CHIP_ID_BCM4716)
+ return true;
+
+ pr_err("unknown chip id %04x\n", chipinfo->id);
+ return false;
+}
+
+bool brcms_c_chipmatch(struct bcma_device *core)
+{
+ switch (core->bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ return brcms_c_chipmatch_pci(core);
+ case BCMA_HOSTTYPE_SOC:
+ return brcms_c_chipmatch_soc(core);
+ default:
+ pr_err("unknown host type: %i\n", core->bus->hosttype);
+ return false;
+ }
+}
+
#if defined(DEBUG)
void brcms_c_print_txdesc(struct d11txh *txh)
{
@@ -8201,19 +8231,12 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
{
struct bcma_device *core = wlc->hw->d11core;
+ struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
u16 chanspec;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- /*
- * This will happen if a big-hammer was executed. In
- * that case, we want to go back to the channel that
- * we were on and not new channel
- */
- if (wlc->pub->associated)
- chanspec = wlc->home_chanspec;
- else
- chanspec = brcms_c_init_chanspec(wlc);
+ chanspec = ch20mhz_chspec(ch->hw_value);
brcms_b_init(wlc->hw, chanspec);
@@ -8318,7 +8341,7 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
struct brcms_pub *pub;
/* allocate struct brcms_c_info state and its substructures */
- wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
+ wlc = brcms_c_attach_malloc(unit, &err, 0);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index 264f8c4..91937c5 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -198,6 +198,8 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
+ struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
+
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
@@ -209,7 +211,8 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
}
- if (++pi->phy_wreg >= pi->phy_wreg_limit) {
+ if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) &&
+ (++pi->phy_wreg >= pi->phy_wreg_limit)) {
(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
pi->phy_wreg = 0;
}
@@ -292,10 +295,13 @@ void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
if (addr == 0x72)
- (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
#else
+ struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
+
bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
- if (++pi->phy_wreg >= pi->phy_wreg_limit) {
+ if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) &&
+ (++pi->phy_wreg >= pi->phy_wreg_limit)) {
pi->phy_wreg = 0;
(void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
}
@@ -837,7 +843,7 @@ wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
pi->tbl_data_hi = tblDataHi;
pi->tbl_data_lo = tblDataLo;
- if (pi->sh->chip == BCM43224_CHIP_ID &&
+ if (pi->sh->chip == BCMA_CHIP_ID_BCM43224 &&
pi->sh->chiprev == 1) {
pi->tbl_addr = tblAddr;
pi->tbl_save_id = tbl_id;
@@ -847,7 +853,7 @@ wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val)
{
- if ((pi->sh->chip == BCM43224_CHIP_ID) &&
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1) &&
(pi->tbl_save_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, pi->tbl_data_lo);
@@ -881,7 +887,7 @@ wlc_phy_write_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
- if ((pi->sh->chip == BCM43224_CHIP_ID) &&
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1) &&
(tbl_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, tblDataLo);
@@ -918,7 +924,7 @@ wlc_phy_read_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
- if ((pi->sh->chip == BCM43224_CHIP_ID) &&
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1)) {
(void)read_phy_reg(pi, tblDataLo);
@@ -2894,7 +2900,7 @@ const u8 *wlc_phy_get_ofdm_rate_lookup(void)
void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
{
- if ((pi->sh->chip == BCM4313_CHIP_ID) &&
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4313) &&
(pi->sh->boardflags & BFL_FEM)) {
if (mode) {
u16 txant = 0;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 13b2615..65db9b7 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14358,7 +14358,7 @@ void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs)
wlc_phy_write_txmacreg_nphy(pi, holdoff, delay);
- if (pi && pi->sh && (pi->sh->_rifs_phy != rifs))
+ if (pi->sh && (pi->sh->_rifs_phy != rifs))
pi->sh->_rifs_phy = rifs;
}
@@ -17893,6 +17893,8 @@ static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi)
nphy_tpc_txgain_ipa_2g_2057rev7;
} else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
+ if (pi->sh->chip == BCMA_CHIP_ID_BCM47162)
+ tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
} else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
} else {
@@ -19254,8 +19256,14 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
case 38:
case 102:
case 118:
- nphy_adj_tone_id_buf[0] = 0;
- nphy_adj_noise_var_buf[0] = 0x0;
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) &&
+ (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) {
+ nphy_adj_tone_id_buf[0] = 32;
+ nphy_adj_noise_var_buf[0] = 0x21f;
+ } else {
+ nphy_adj_tone_id_buf[0] = 0;
+ nphy_adj_noise_var_buf[0] = 0x0;
+ }
break;
case 134:
nphy_adj_tone_id_buf[0] = 32;
@@ -19309,8 +19317,8 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
if ((ISNPHY(pi)) && (NREV_GE(pi->pubpi.phy_rev, 5)) &&
- ((pi->sh->chippkg == BCM4717_PKG_ID) ||
- (pi->sh->chippkg == BCM4718_PKG_ID))) {
+ ((pi->sh->chippkg == BCMA_PKG_ID_BCM4717) ||
+ (pi->sh->chippkg == BCMA_PKG_ID_BCM4718))) {
if ((pi->sh->boardflags & BFL_EXTLNA) &&
(CHSPEC_IS2G(pi->radio_chanspec)))
ai_cc_reg(pi->sh->sih,
@@ -19318,6 +19326,10 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
0x40, 0x40);
}
+ if ((!PHY_IPA(pi)) && (pi->sh->chip == BCMA_CHIP_ID_BCM5357))
+ si_pmu_chipcontrol(pi->sh->sih, 1, CCTRL5357_EXTPA,
+ CCTRL5357_EXTPA);
+
if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
CHSPEC_IS40(pi->radio_chanspec)) {
@@ -20695,12 +20707,22 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
write_radio_reg(pi, RADIO_2056_SYN_PLL_LOOPFILTER2 |
RADIO_2056_SYN, 0x1f);
- write_radio_reg(pi,
- RADIO_2056_SYN_PLL_LOOPFILTER4 |
- RADIO_2056_SYN, 0xb);
- write_radio_reg(pi,
- RADIO_2056_SYN_PLL_CP2 |
- RADIO_2056_SYN, 0x14);
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM47162)) {
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_LOOPFILTER4 |
+ RADIO_2056_SYN, 0x14);
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_CP2 |
+ RADIO_2056_SYN, 0x00);
+ } else {
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_LOOPFILTER4 |
+ RADIO_2056_SYN, 0xb);
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_CP2 |
+ RADIO_2056_SYN, 0x14);
+ }
}
}
@@ -20747,24 +20769,30 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
PADG_IDAC, 0xcc);
- bias = 0x25;
- cascbias = 0x20;
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM47162)) {
+ bias = 0x40;
+ cascbias = 0x45;
+ pag_boost_tune = 0x5;
+ pgag_boost_tune = 0x33;
+ padg_boost_tune = 0x77;
+ mixg_boost_tune = 0x55;
+ } else {
+ bias = 0x25;
+ cascbias = 0x20;
- if ((pi->sh->chip ==
- BCM43224_CHIP_ID)
- || (pi->sh->chip ==
- BCM43225_CHIP_ID)) {
- if (pi->sh->chippkg ==
- BCM43224_FAB_SMIC) {
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 ||
+ pi->sh->chip == BCMA_CHIP_ID_BCM43225) &&
+ pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC) {
bias = 0x2a;
cascbias = 0x38;
}
- }
- pag_boost_tune = 0x4;
- pgag_boost_tune = 0x03;
- padg_boost_tune = 0x77;
- mixg_boost_tune = 0x65;
+ pag_boost_tune = 0x4;
+ pgag_boost_tune = 0x03;
+ padg_boost_tune = 0x77;
+ mixg_boost_tune = 0x65;
+ }
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
INTPAG_IMAIN_STAT, bias);
@@ -20863,11 +20891,10 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
cascbias = 0x30;
- if ((pi->sh->chip == BCM43224_CHIP_ID) ||
- (pi->sh->chip == BCM43225_CHIP_ID)) {
- if (pi->sh->chippkg == BCM43224_FAB_SMIC)
- cascbias = 0x35;
- }
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 ||
+ pi->sh->chip == BCMA_CHIP_ID_BCM43225) &&
+ pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC)
+ cascbias = 0x35;
pabias = (pi->phy_pabias == 0) ? 0x30 : pi->phy_pabias;
@@ -21106,6 +21133,7 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
const struct nphy_sfo_cfg *ci)
{
u16 val;
+ struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
if (CHSPEC_IS5G(chanspec) && !val) {
@@ -21178,22 +21206,32 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
} else if (NREV_GE(pi->pubpi.phy_rev, 7)) {
if (val == 54)
spuravoid = 1;
- } else {
- if (pi->nphy_aband_spurwar_en &&
- ((val == 38) || (val == 102)
- || (val == 118)))
+ } else if (pi->nphy_aband_spurwar_en &&
+ ((val == 38) || (val == 102) || (val == 118))) {
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716)
+ && (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) {
+ spuravoid = 0;
+ } else {
spuravoid = 1;
+ }
}
if (pi->phy_spuravoid == SPURAVOID_FORCEON)
spuravoid = 1;
- wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
- si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
- wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
+ bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc,
+ spuravoid);
+ } else {
+ wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
+ bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc,
+ spuravoid);
+ wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
+ }
- if ((pi->sh->chip == BCM43224_CHIP_ID) ||
- (pi->sh->chip == BCM43225_CHIP_ID)) {
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
if (spuravoid == 1) {
bcma_write16(pi->d11core,
D11REGOFFS(tsf_clk_frac_l),
@@ -21209,7 +21247,9 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
}
}
- wlapi_bmac_core_phypll_reset(pi->sh->physhim);
+ if (!((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM47162)))
+ wlapi_bmac_core_phypll_reset(pi->sh->physhim);
mod_phy_reg(pi, 0x01, (0x1 << 15),
((spuravoid > 0) ? (0x1 << 15) : 0));
@@ -22171,9 +22211,15 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x03, 16,
&auxADC_rssi_ctrlH_save);
- radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
- + 82 * (auxADC_Vl) - 28861 +
- 128) / 256;
+ if (pi->sh->chip == BCMA_CHIP_ID_BCM5357) {
+ radio_temp[0] = (193 * (radio_temp[1] + radio_temp2[1])
+ + 88 * (auxADC_Vl) - 27111 +
+ 128) / 256;
+ } else {
+ radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
+ + 82 * (auxADC_Vl) - 28861 +
+ 128) / 256;
+ }
offset = (s16) pi->phy_tempsense_offset;
@@ -24923,14 +24969,16 @@ wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
if (txgains->useindex) {
phy_a4 = 15 - ((txgains->index) >> 3);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (NREV_GE(pi->pubpi.phy_rev, 6))
+ if (NREV_GE(pi->pubpi.phy_rev, 6) &&
+ pi->sh->chip == BCMA_CHIP_ID_BCM47162) {
+ phy_a5 = 0x10f7 | (phy_a4 << 8);
+ } else if (NREV_GE(pi->pubpi.phy_rev, 6)) {
phy_a5 = 0x00f7 | (phy_a4 << 8);
-
- else
- if (NREV_IS(pi->pubpi.phy_rev, 5))
+ } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
phy_a5 = 0x10f7 | (phy_a4 << 8);
- else
+ } else {
phy_a5 = 0x50f7 | (phy_a4 << 8);
+ }
} else {
phy_a5 = 0x70f7 | (phy_a4 << 8);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 4931d29..7e9df56 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -74,16 +74,6 @@
* PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary
* number to differentiate different PLLs controlled by the same PMU rev.
*/
-/* pllcontrol registers:
- * ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>,
- * p1div, p2div, _bypass_sdmod
- */
-#define PMU1_PLL0_PLLCTL0 0
-#define PMU1_PLL0_PLLCTL1 1
-#define PMU1_PLL0_PLLCTL2 2
-#define PMU1_PLL0_PLLCTL3 3
-#define PMU1_PLL0_PLLCTL4 4
-#define PMU1_PLL0_PLLCTL5 5
/* pmu XtalFreqRatio */
#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
@@ -108,118 +98,14 @@
#define RES4313_HT_AVAIL_RSRC 14
#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
-/* Determine min/max rsrc masks. Value 0 leaves hardware at default. */
-static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
-{
- u32 min_mask = 0, max_mask = 0;
- uint rsrcs;
-
- /* # resources */
- rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
-
- /* determine min/max rsrc masks */
- switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- /* ??? */
- break;
-
- case BCM4313_CHIP_ID:
- min_mask = PMURES_BIT(RES4313_BB_PU_RSRC) |
- PMURES_BIT(RES4313_XTAL_PU_RSRC) |
- PMURES_BIT(RES4313_ALP_AVAIL_RSRC) |
- PMURES_BIT(RES4313_BB_PLL_PWRSW_RSRC);
- max_mask = 0xffff;
- break;
- default:
- break;
- }
-
- *pmin = min_mask;
- *pmax = max_mask;
-}
-
-void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
-{
- u32 tmp = 0;
- struct bcma_device *core;
-
- /* switch to chipc */
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
-
- switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- if (spuravoid == 1) {
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL0);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x11500010);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL1);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x000C0C06);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL2);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x0F600a08);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL3);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x00000000);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL4);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x2001E920);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL5);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x88888815);
- } else {
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL0);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x11100010);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL1);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x000c0c06);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL2);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x03000a08);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL3);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x00000000);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL4);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x200005c0);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL5);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x88888815);
- }
- tmp = 1 << 10;
- break;
-
- default:
- /* bail out */
- return;
- }
-
- bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
-}
-
u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
{
uint delay = PMU_MAX_TRANSITION_DLY;
switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM4313_CHIP_ID:
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43225:
+ case BCMA_CHIP_ID_BCM4313:
delay = 3700;
break;
default:
@@ -270,9 +156,9 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM4313_CHIP_ID:
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43225:
+ case BCMA_CHIP_ID_BCM4313:
/* always 20Mhz */
clock = 20000 * 1000;
break;
@@ -283,51 +169,9 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
}
-/* initialize PMU */
-void si_pmu_init(struct si_pub *sih)
-{
- struct bcma_device *core;
-
- /* select chipc */
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
-
- if (ai_get_pmurev(sih) == 1)
- bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
- ~PCTL_NOILP_ON_WAIT);
- else if (ai_get_pmurev(sih) >= 2)
- bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
-}
-
-/* initialize PMU resources */
-void si_pmu_res_init(struct si_pub *sih)
-{
- struct bcma_device *core;
- u32 min_mask = 0, max_mask = 0;
-
- /* select to chipc */
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
-
- /* Determine min/max rsrc masks */
- si_pmu_res_masks(sih, &min_mask, &max_mask);
-
- /* It is required to program max_mask first and then min_mask */
-
- /* Program max resource mask */
-
- if (max_mask)
- bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
-
- /* Program min resource mask */
-
- if (min_mask)
- bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
-
- /* Add some delay; allow resources to come up and settle. */
- mdelay(2);
-}
-
u32 si_pmu_measure_alpclk(struct si_pub *sih)
{
+ struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *core;
u32 alp_khz;
@@ -335,7 +179,7 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
return 0;
/* Remember original core before switch to chipc */
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ core = sii->icbus->drv_cc.core;
if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
u32 ilp_ctr, alp_hz;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3e39c5e..f7cff87 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,10 +26,7 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_alp_clock(struct si_pub *sih);
extern void si_pmu_pllupd(struct si_pub *sih);
-extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
-extern void si_pmu_init(struct si_pub *sih);
-extern void si_pmu_res_init(struct si_pub *sih);
extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index aa5d67f..5855f4f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -311,7 +311,7 @@ extern uint brcms_c_detach(struct brcms_c_info *wlc);
extern int brcms_c_up(struct brcms_c_info *wlc);
extern uint brcms_c_down(struct brcms_c_info *wlc);
-extern bool brcms_c_chipmatch(u16 vendor, u16 device);
+extern bool brcms_c_chipmatch(struct bcma_device *core);
extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
extern void brcms_c_reset(struct brcms_c_info *wlc);
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index b45ab34..3e6405e 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -43,6 +43,8 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
/* Free the driver packet. Free the tag if present */
void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
{
+ if (!skb)
+ return;
WARN_ON(skb->next);
if (skb->destructor)
/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 333193f..bcc79b4 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -37,5 +37,6 @@
#define BCM4329_CHIP_ID 0x4329
#define BCM4330_CHIP_ID 0x4330
#define BCM4331_CHIP_ID 0x4331
+#define BCM4334_CHIP_ID 0x4334
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h
index 4e9b7e4..123cfa8 100644
--- a/drivers/net/wireless/brcm80211/include/soc.h
+++ b/drivers/net/wireless/brcm80211/include/soc.h
@@ -19,68 +19,6 @@
#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
-/* core codes */
-#define NODEV_CORE_ID 0x700 /* Invalid coreid */
-#define CC_CORE_ID 0x800 /* chipcommon core */
-#define ILINE20_CORE_ID 0x801 /* iline20 core */
-#define SRAM_CORE_ID 0x802 /* sram core */
-#define SDRAM_CORE_ID 0x803 /* sdram core */
-#define PCI_CORE_ID 0x804 /* pci core */
-#define MIPS_CORE_ID 0x805 /* mips core */
-#define ENET_CORE_ID 0x806 /* enet mac core */
-#define CODEC_CORE_ID 0x807 /* v90 codec core */
-#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
-#define ADSL_CORE_ID 0x809 /* ADSL core */
-#define ILINE100_CORE_ID 0x80a /* iline100 core */
-#define IPSEC_CORE_ID 0x80b /* ipsec core */
-#define UTOPIA_CORE_ID 0x80c /* utopia core */
-#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
-#define SOCRAM_CORE_ID 0x80e /* internal memory core */
-#define MEMC_CORE_ID 0x80f /* memc sdram core */
-#define OFDM_CORE_ID 0x810 /* OFDM phy core */
-#define EXTIF_CORE_ID 0x811 /* external interface core */
-#define D11_CORE_ID 0x812 /* 802.11 MAC core */
-#define APHY_CORE_ID 0x813 /* 802.11a phy core */
-#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
-#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
-#define MIPS33_CORE_ID 0x816 /* mips3302 core */
-#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
-#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
-#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
-#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
-#define SDIOH_CORE_ID 0x81b /* sdio host core */
-#define ROBO_CORE_ID 0x81c /* roboswitch core */
-#define ATA100_CORE_ID 0x81d /* parallel ATA core */
-#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
-#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
-#define PCIE_CORE_ID 0x820 /* pci express core */
-#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
-#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
-#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
-#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
-#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
-#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
-#define PMU_CORE_ID 0x827 /* PMU core */
-#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
-#define SDIOD_CORE_ID 0x829 /* SDIO device core */
-#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
-#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
-#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
-#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
-#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
-#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
-#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
-#define SC_CORE_ID 0x831 /* shared common core */
-#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
-#define SPIH_CORE_ID 0x833 /* SPI host core */
-#define I2S_CORE_ID 0x834 /* I2S core */
-#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
-#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
- * maps all unused address ranges
- */
-
/* Common core control flags */
#define SICF_BIST_EN 0x8000
#define SICF_PME_EN 0x4000
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index 75ef8f0..dc447c1 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -58,8 +58,7 @@ static int prism2_stats_proc_read(char *page, char **start, off_t off,
{
char *p = page;
local_info_t *local = (local_info_t *) data;
- struct comm_tallies_sums *sums = (struct comm_tallies_sums *)
- &local->comm_tallies;
+ struct comm_tallies_sums *sums = &local->comm_tallies;
if (off != 0) {
*eof = 1;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9cfae0c..95aa8e1 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
netif_stop_queue(priv->net_dev);
}
-/* Called by register_netdev() */
-static int ipw2100_net_init(struct net_device *dev)
-{
- struct ipw2100_priv *priv = libipw_priv(dev);
-
- return ipw2100_up(priv, 1);
-}
-
static int ipw2100_wdev_init(struct net_device *dev)
{
struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_stop = ipw2100_close,
.ndo_start_xmit = libipw_xmit,
.ndo_change_mtu = libipw_change_mtu,
- .ndo_init = ipw2100_net_init,
.ndo_tx_timeout = ipw2100_tx_timeout,
.ndo_set_mac_address = ipw2100_set_address,
.ndo_validate_addr = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
printk(KERN_INFO DRV_NAME
": Detected Intel PRO/Wireless 2100 Network Connection\n");
+ err = ipw2100_up(priv, 1);
+ if (err)
+ goto fail;
+
err = ipw2100_wdev_init(dev);
if (err)
goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
* network device we would call ipw2100_up. This introduced a race
* condition with newer hotplug configurations (network was coming
* up and making calls before the device was initialized).
- *
- * If we called ipw2100_up before we registered the device, then the
- * device name wasn't registered. So, we instead use the net_dev->init
- * member to call a function that then just turns and calls ipw2100_up.
- * net_dev->init is called after name allocation but before the
- * notifier chain is called */
+ */
err = register_netdev(dev);
if (err) {
printk(KERN_WARNING DRV_NAME
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0036737..0df4591 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2701,6 +2701,20 @@ static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
}
+static void ipw_read_eeprom(struct ipw_priv *priv)
+{
+ int i;
+ __le16 *eeprom = (__le16 *) priv->eeprom;
+
+ IPW_DEBUG_TRACE(">>\n");
+
+ /* read entire contents of eeprom into private buffer */
+ for (i = 0; i < 128; i++)
+ eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
+
+ IPW_DEBUG_TRACE("<<\n");
+}
+
/*
* Either the device driver (i.e. the host) or the firmware can
* load eeprom data into the designated region in SRAM. If neither
@@ -2712,14 +2726,9 @@ static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
static void ipw_eeprom_init_sram(struct ipw_priv *priv)
{
int i;
- __le16 *eeprom = (__le16 *) priv->eeprom;
IPW_DEBUG_TRACE(">>\n");
- /* read entire contents of eeprom into private buffer */
- for (i = 0; i < 128; i++)
- eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
-
/*
If the data looks correct, then copy it to our private
copy. Otherwise let the firmware know to perform the operation
@@ -3643,8 +3652,10 @@ static int ipw_load(struct ipw_priv *priv)
/* ack fw init done interrupt */
ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
- /* read eeprom data and initialize the eeprom region of sram */
+ /* read eeprom data */
priv->eeprom_delay = 1;
+ ipw_read_eeprom(priv);
+ /* initialize the eeprom region of sram */
ipw_eeprom_init_sram(priv);
/* enable interrupts */
@@ -7069,9 +7080,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
}
IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
- err = ipw_send_qos_params_command(priv,
- (struct libipw_qos_parameters *)
- &(qos_parameters[0]));
+ err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
if (err)
IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 4b10157..d4fd29a 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -946,7 +946,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
case IEEE80211_BAND_5GHZ:
rs_sta->expected_tpt = il3945_expected_tpt_a;
break;
- case IEEE80211_NUM_BANDS:
+ default:
BUG();
break;
}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 509301a..34f61a0 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
return 0;
}
- if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
key_flags);
spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
il->stations[sta_id].sta.key.key_flags =
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -5724,7 +5724,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |=
- WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
/*
* For now, disable PS by default because it affects
@@ -5873,6 +5874,16 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ D_MAC80211("leave - ad-hoc group key\n");
+ return -EOPNOTSUPP;
+ }
+
sta_id = il_sta_id_or_broadcast(il, sta);
if (sta_id == IL_INVALID_STATION)
return -EINVAL;
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index cbf2dc1..0370403 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4717,10 +4717,11 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
struct il_tx_queue *txq = &il->txq[cnt];
struct il_queue *q = &txq->q;
unsigned long timeout;
+ unsigned long now = jiffies;
int ret;
if (q->read_ptr == q->write_ptr) {
- txq->time_stamp = jiffies;
+ txq->time_stamp = now;
return 0;
}
@@ -4728,9 +4729,9 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
txq->time_stamp +
msecs_to_jiffies(il->cfg->wd_timeout);
- if (time_after(jiffies, timeout)) {
+ if (time_after(now, timeout)) {
IL_ERR("Queue %d stuck for %u ms.\n", q->id,
- il->cfg->wd_timeout);
+ jiffies_to_msecs(now - txq->time_stamp));
ret = il_force_reset(il, false);
return (ret == -EAGAIN) ? 0 : 1;
}
@@ -4767,14 +4768,12 @@ il_bg_watchdog(unsigned long data)
return;
/* monitor and check for other stuck queues */
- if (il_is_any_associated(il)) {
- for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == il->cmd_queue)
- continue;
- if (il_check_stuck_queue(il, cnt))
- return;
- }
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == il->cmd_queue)
+ continue;
+ if (il_check_stuck_queue(il, cnt))
+ return;
}
mod_timer(&il->watchdog,
@@ -5360,7 +5359,7 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_ASSOC) {
D_MAC80211("ASSOC %d\n", bss_conf->assoc);
if (bss_conf->assoc) {
- il->timestamp = bss_conf->last_tsf;
+ il->timestamp = bss_conf->sync_tsf;
if (!il_is_rfkill(il))
il->ops->post_associate(il);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index db6c6e5..727fbb5 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,6 +6,7 @@ config IWLWIFI
select LEDS_CLASS
select LEDS_TRIGGERS
select MAC80211_LEDS
+ select IWLDVM
---help---
Select to build the driver supporting the:
@@ -41,6 +42,10 @@ config IWLWIFI
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwlwifi.
+config IWLDVM
+ tristate "Intel Wireless WiFi"
+ depends on IWLWIFI
+
menu "Debugging Options"
depends on IWLWIFI
@@ -137,11 +142,3 @@ config IWLWIFI_EXPERIMENTAL_MFP
even if the microcode doesn't advertise it.
Say Y only if you want to experiment with MFP.
-
-config IWLWIFI_UCODE16
- bool "support uCode 16.0"
- depends on IWLWIFI
- help
- This option enables support for uCode version 16.0.
-
- Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 406f297..170ec33 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,28 +1,19 @@
-# WIFI
+# common
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
-iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
-iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwl-debug.o
-iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
-iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
-
-iwlwifi-objs += iwl-eeprom.o iwl-power.o
-iwlwifi-objs += iwl-scan.o iwl-led.o
-iwlwifi-objs += iwl-agn-rxon.o iwl-agn-devices.o
-iwlwifi-objs += iwl-5000.o
-iwlwifi-objs += iwl-6000.o
-iwlwifi-objs += iwl-1000.o
-iwlwifi-objs += iwl-2000.o
-iwlwifi-objs += iwl-pci.o
+iwlwifi-objs += iwl-io.o
iwlwifi-objs += iwl-drv.o
+iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-notif-wait.o
-iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
-
+iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
+iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
-iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
-iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
-CFLAGS_iwl-devtrace.o := -I$(src)
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
-ccflags-y += -D__CHECK_ENDIAN__
+
+obj-$(CONFIG_IWLDVM) += dvm/
+
+CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
new file mode 100644
index 0000000..5ff76b2
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -0,0 +1,13 @@
+# DVM
+obj-$(CONFIG_IWLDVM) += iwldvm.o
+iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
+iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
+
+iwldvm-objs += power.o
+iwldvm-objs += scan.o led.o
+iwldvm-objs += rxon.o devices.o
+
+iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
+
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 79c0fe0..9bb16bd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -63,9 +63,10 @@
#ifndef __iwl_agn_h__
#define __iwl_agn_h__
-#include "iwl-dev.h"
#include "iwl-config.h"
+#include "dev.h"
+
/* The first 11 queues (0-10) are used otherwise */
#define IWLAGN_FIRST_AMPDU_QUEUE 11
@@ -91,7 +92,6 @@ extern struct iwl_lib_ops iwl6030_lib;
#define STATUS_CT_KILL 1
#define STATUS_ALIVE 2
#define STATUS_READY 3
-#define STATUS_GEO_CONFIGURED 4
#define STATUS_EXIT_PENDING 5
#define STATUS_STATISTICS 6
#define STATUS_SCANNING 7
@@ -101,6 +101,7 @@ extern struct iwl_lib_ops iwl6030_lib;
#define STATUS_CHANNEL_SWITCH_PENDING 11
#define STATUS_SCAN_COMPLETE 12
#define STATUS_POWER_PMI 13
+#define STATUS_SCAN_ROC_EXPIRED 14
struct iwl_ucode_capabilities;
@@ -255,6 +256,10 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
enum iwl_scan_type scan_type,
enum ieee80211_band band);
+void iwl_scan_roc_expired(struct iwl_priv *priv);
+void iwl_scan_offchannel_skb(struct iwl_priv *priv);
+void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
+
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
* ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
@@ -264,7 +269,7 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
-#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
+#define IWL_SCAN_CHECK_WATCHDOG (HZ * 15)
/* bt coex */
@@ -390,8 +395,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
}
extern int iwl_alive_start(struct iwl_priv *priv);
-/* svtool */
+
+/* testmode support */
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
int len);
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
@@ -399,13 +406,16 @@ extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
struct netlink_callback *cb,
void *data, int len);
extern void iwl_testmode_init(struct iwl_priv *priv);
-extern void iwl_testmode_cleanup(struct iwl_priv *priv);
+extern void iwl_testmode_free(struct iwl_priv *priv);
+
#else
+
static inline
int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
return -ENOSYS;
}
+
static inline
int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
@@ -413,12 +423,12 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
{
return -ENOSYS;
}
-static inline
-void iwl_testmode_init(struct iwl_priv *priv)
+
+static inline void iwl_testmode_init(struct iwl_priv *priv)
{
}
-static inline
-void iwl_testmode_cleanup(struct iwl_priv *priv)
+
+static inline void iwl_testmode_free(struct iwl_priv *priv)
{
}
#endif
@@ -437,10 +447,8 @@ static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
static inline int iwl_is_ready(struct iwl_priv *priv)
{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
+ /* The adapter is 'ready' if READY EXIT_PENDING is not set */
return test_bit(STATUS_READY, &priv->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
!test_bit(STATUS_EXIT_PENDING, &priv->status);
}
@@ -518,85 +526,4 @@ static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
return s;
return "UNKNOWN";
}
-
-/* API method exported for mvm hybrid state */
-void iwl_setup_deferred_work(struct iwl_priv *priv);
-int iwl_send_wimax_coex(struct iwl_priv *priv);
-int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwl_option_config(struct iwl_priv *priv);
-void iwl_set_hw_params(struct iwl_priv *priv);
-void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
-int iwl_init_drv(struct iwl_priv *priv);
-void iwl_uninit_drv(struct iwl_priv *priv);
-void iwl_send_bt_config(struct iwl_priv *priv);
-void iwl_rf_kill_ct_config(struct iwl_priv *priv);
-int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwl_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change);
-int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwlagn_check_needed_chains(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_bss_conf *bss_conf);
-void iwlagn_chain_noise_reset(struct iwl_priv *priv);
-int iwlagn_update_beacon(struct iwl_priv *priv,
- struct ieee80211_vif *vif);
-void iwl_tt_handler(struct iwl_priv *priv);
-void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode);
-void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue);
-void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
-void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
-void iwl_nic_error(struct iwl_op_mode *op_mode);
-void iwl_cmd_queue_full(struct iwl_op_mode *op_mode);
-void iwl_nic_config(struct iwl_op_mode *op_mode);
-int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, bool set);
-void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
- enum ieee80211_rssi_event rssi_event);
-int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw);
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
-void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue);
-void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch);
-int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state);
-int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size);
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta);
-void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast);
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data);
-void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key);
-int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
-void iwlagn_mac_stop(struct ieee80211_hw *hw);
-void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 95f27f1..f2dd671 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -63,10 +63,11 @@
#include <linux/slab.h>
#include <net/mac80211.h>
-#include "iwl-dev.h"
-#include "iwl-agn-calib.h"
#include "iwl-trans.h"
-#include "iwl-agn.h"
+
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
/*****************************************************************************
* INIT calibrations framework
@@ -832,14 +833,14 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= priv->eeprom_data->valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
* priv->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(priv->eeprom_data->valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
@@ -853,7 +854,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* connect the first valid tx chain
*/
first_chain =
- find_first_chain(priv->hw_params.valid_tx_ant);
+ find_first_chain(priv->eeprom_data->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
IWL_DEBUG_CALIB(priv,
@@ -863,13 +864,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
+ if (active_chains != priv->eeprom_data->valid_rx_ant &&
active_chains != priv->chain_noise_data.active_chains)
IWL_DEBUG_CALIB(priv,
"Detected that not all antennas are connected! "
"Connected: %#x, valid: %#x.\n",
active_chains,
- priv->hw_params.valid_rx_ant);
+ priv->eeprom_data->valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
@@ -1054,7 +1055,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
priv->cfg->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
- data->active_chains = priv->hw_params.valid_rx_ant;
+ data->active_chains = priv->eeprom_data->valid_rx_ant;
for (i = 0; i < NUM_RX_CHAINS; i++)
if (!(data->active_chains & (1<<i)))
data->disconn_array[i] = 1;
@@ -1083,8 +1084,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
min_average_noise, min_average_noise_antenna_i);
- iwlagn_gain_computation(priv, average_noise,
- find_first_chain(priv->hw_params.valid_rx_ant));
+ iwlagn_gain_computation(
+ priv, average_noise,
+ find_first_chain(priv->eeprom_data->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index dbe1378..2349f39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -62,8 +62,8 @@
#ifndef __iwl_calib_h__
#define __iwl_calib_h__
-#include "iwl-dev.h"
-#include "iwl-commands.h"
+#include "dev.h"
+#include "commands.h"
void iwl_chain_noise_calibration(struct iwl_priv *priv);
void iwl_sensitivity_calibration(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 9af6a23..4a361c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -61,9 +61,9 @@
*
*****************************************************************************/
/*
- * Please use this file (iwl-commands.h) only for uCode API definitions.
+ * Please use this file (commands.h) only for uCode API definitions.
* Please use iwl-xxxx-hw.h for hardware-related definitions.
- * Please use iwl-dev.h for driver implementation definitions.
+ * Please use dev.h for driver implementation definitions.
*/
#ifndef __iwl_commands_h__
@@ -190,6 +190,44 @@ enum {
REPLY_MAX = 0xff
};
+/*
+ * Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate
+ * - 4 standard TX queues
+ * - the command queue
+ * - 4 PAN TX queues
+ * - the PAN multicast queue, and
+ * - the AUX (TX during scan dwell) queue.
+ */
+#define IWL_MIN_NUM_QUEUES 11
+
+/*
+ * Command queue depends on iPAN support.
+ */
+#define IWL_DEFAULT_CMD_QUEUE_NUM 4
+#define IWL_IPAN_CMD_QUEUE_NUM 9
+
+#define IWL_TX_FIFO_BK 0 /* shared */
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2 /* shared */
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
+#define IWL_TX_FIFO_BE_IPAN 4
+#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
+#define IWL_TX_FIFO_VO_IPAN 5
+/* re-uses the VO FIFO, uCode will properly flush/schedule */
+#define IWL_TX_FIFO_AUX 5
+#define IWL_TX_FIFO_UNUSED 255
+
+#define IWLAGN_CMD_FIFO_NUM 7
+
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE 8
+
/******************************************************************************
* (0)
* Commonly used structures and definitions:
@@ -197,9 +235,6 @@ enum {
*
*****************************************************************************/
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
/**
* iwlagn rate_n_flags bit fields
*
@@ -758,8 +793,6 @@ struct iwl_qosparam_cmd {
#define IWLAGN_BROADCAST_ID 15
#define IWLAGN_STATION_COUNT 16
-#define IWL_INVALID_STATION 255
-#define IWL_MAX_TID_COUNT 8
#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
@@ -1872,6 +1905,7 @@ struct iwl_bt_cmd {
#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
+#define IWLAGN_BT_PRIO_BOOST_DEFAULT32 0xF0F0F0F0
#define IWLAGN_BT_MAX_KILL_DEFAULT 5
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index e7c157e..46782f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -30,16 +30,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
-
#include <linux/ieee80211.h>
#include <net/mac80211.h>
-
-
-#include "iwl-dev.h"
#include "iwl-debug.h"
#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -87,7 +83,7 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
#define DEBUGFS_READ_FILE_OPS(name) \
DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .read = iwl_dbgfs_##name##_read, \
+ .read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
};
@@ -307,13 +303,13 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
const u8 *ptr;
char *buf;
u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+ size_t eeprom_len = priv->eeprom_blob_size;
buf_size = 4 * eeprom_len + 256;
if (eeprom_len % 16)
return -ENODATA;
- ptr = priv->eeprom;
+ ptr = priv->eeprom_blob;
if (!ptr)
return -ENOMEM;
@@ -322,11 +318,9 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
if (!buf)
return -ENOMEM;
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
- "version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM", eeprom_ver);
+ eeprom_ver = priv->eeprom_data->eeprom_version;
+ pos += scnprintf(buf + pos, buf_size - pos,
+ "NVM version: 0x%x\n", eeprom_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -351,9 +345,6 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
char *buf;
ssize_t ret;
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
- return -EAGAIN;
-
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -426,8 +417,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
test_bit(STATUS_ALIVE, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
test_bit(STATUS_READY, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
test_bit(STATUS_EXIT_PENDING, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
@@ -1341,17 +1330,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
pos += scnprintf(buf + pos, bufsz - pos,
"tx power: (1/2 dB step)\n");
- if ((priv->hw_params.valid_tx_ant & ANT_A) &&
+ if ((priv->eeprom_data->valid_tx_ant & ANT_A) &&
tx->tx_power.ant_a)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna A:",
tx->tx_power.ant_a);
- if ((priv->hw_params.valid_tx_ant & ANT_B) &&
+ if ((priv->eeprom_data->valid_tx_ant & ANT_B) &&
tx->tx_power.ant_b)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna B:",
tx->tx_power.ant_b);
- if ((priv->hw_params.valid_tx_ant & ANT_C) &&
+ if ((priv->eeprom_data->valid_tx_ant & ANT_C) &&
tx->tx_power.ant_c)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna C:",
@@ -2239,6 +2228,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
return count;
}
+#ifdef CONFIG_IWLWIFI_DEBUG
static ssize_t iwl_dbgfs_log_event_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2265,6 +2255,10 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
char buf[8];
int buf_size;
+ /* check that the interface is up */
+ if (!iwl_is_ready(priv))
+ return -EAGAIN;
+
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))
@@ -2276,6 +2270,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
return count;
}
+#endif
static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
char __user *user_buf,
@@ -2345,7 +2340,9 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
DEBUGFS_READ_FILE_OPS(reply_tx_error);
DEBUGFS_WRITE_FILE_OPS(echo_test);
+#ifdef CONFIG_IWLWIFI_DEBUG
DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+#endif
DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
/*
@@ -2405,7 +2402,9 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
+#ifdef CONFIG_IWLWIFI_DEBUG
DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
+#endif
if (iwl_advanced_bt_coexist(priv))
DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 7006237..054f728 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -24,8 +24,8 @@
*
*****************************************************************************/
/*
- * Please use this file (iwl-dev.h) for driver implementation definitions.
- * Please use iwl-commands.h for uCode API definitions.
+ * Please use this file (dev.h) for driver implementation definitions.
+ * Please use commands.h for uCode API definitions.
*/
#ifndef __iwl_dev_h__
@@ -39,17 +39,20 @@
#include <linux/mutex.h>
#include "iwl-fw.h"
-#include "iwl-eeprom.h"
+#include "iwl-eeprom-parse.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
#include "iwl-agn-hw.h"
-#include "iwl-led.h"
-#include "iwl-power.h"
-#include "iwl-agn-rs.h"
-#include "iwl-agn-tt.h"
-#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "iwl-notif-wait.h"
+#include "iwl-trans.h"
+
+#include "led.h"
+#include "power.h"
+#include "rs.h"
+#include "tt.h"
+
+#include "iwl-test.h"
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
@@ -87,49 +90,6 @@
#define IWL_NUM_SCAN_RATES (2)
-/*
- * One for each channel, holds all channel setup data
- * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
- * with one another!
- */
-struct iwl_channel_info {
- struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
- struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
- * HT40 channel */
-
- u8 channel; /* channel number */
- u8 flags; /* flags copied from EEPROM */
- s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
- s8 min_power; /* always 0 */
- s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
-
- u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
- u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
- enum ieee80211_band band;
-
- /* HT40 channel info */
- s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- u8 ht40_flags; /* flags copied from EEPROM */
- u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
-};
-
-/*
- * Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate
- * - 4 standard TX queues
- * - the command queue
- * - 4 PAN TX queues
- * - the PAN multicast queue, and
- * - the AUX (TX during scan dwell) queue.
- */
-#define IWL_MIN_NUM_QUEUES 11
-
-/*
- * Command queue depends on iPAN support.
- */
-#define IWL_DEFAULT_CMD_QUEUE_NUM 4
-#define IWL_IPAN_CMD_QUEUE_NUM 9
#define IEEE80211_DATA_LEN 2304
#define IEEE80211_4ADDR_LEN 30
@@ -153,29 +113,6 @@ union iwl_ht_rate_supp {
};
};
-#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
-#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
-#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
-#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
-#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
-
-/*
- * Maximal MPDU density for TX aggregation
- * 4 - 2us density
- * 5 - 4us density
- * 6 - 8us density
- * 7 - 16us density
- */
-#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
-#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
-#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
-#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
-#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
-#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
-#define CFG_HT_MPDU_DENSITY_MIN (0x1)
-
struct iwl_ht_config {
bool single_chain_sufficient;
enum ieee80211_smps_mode smps; /* current smps mode */
@@ -445,23 +382,6 @@ enum {
MEASUREMENT_ACTIVE = (1 << 1),
};
-enum iwl_nvm_type {
- NVM_DEVICE_TYPE_EEPROM = 0,
- NVM_DEVICE_TYPE_OTP,
-};
-
-/*
- * Two types of OTP memory access modes
- * IWL_OTP_ACCESS_ABSOLUTE - absolute address mode,
- * based on physical memory addressing
- * IWL_OTP_ACCESS_RELATIVE - relative address mode,
- * based on logical memory addressing
- */
-enum iwl_access_mode {
- IWL_OTP_ACCESS_ABSOLUTE,
- IWL_OTP_ACCESS_RELATIVE,
-};
-
/* reply_tx_statistics (for _agn devices) */
struct reply_tx_error_statistics {
u32 pp_delay;
@@ -632,10 +552,6 @@ enum iwl_scan_type {
*
* @tx_chains_num: Number of TX chains
* @rx_chains_num: Number of RX chains
- * @valid_tx_ant: usable antennas for TX
- * @valid_rx_ant: usable antennas for RX
- * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
- * @sku: sku read from EEPROM
* @ct_kill_threshold: temperature threshold - in hw dependent unit
* @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
* relevant for 1000, 6000 and up
@@ -645,11 +561,7 @@ enum iwl_scan_type {
struct iwl_hw_params {
u8 tx_chains_num;
u8 rx_chains_num;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u8 ht40_channel;
bool use_rts_for_aggregation;
- u16 sku;
u32 ct_kill_threshold;
u32 ct_kill_exit_threshold;
@@ -664,31 +576,10 @@ struct iwl_lib_ops {
/* device specific configuration */
void (*nic_config)(struct iwl_priv *priv);
- /* eeprom operations (as defined in iwl-eeprom.h) */
- struct iwl_eeprom_ops eeprom_ops;
-
/* temperature */
void (*temperature)(struct iwl_priv *priv);
};
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
-struct iwl_testmode_trace {
- u32 buff_size;
- u32 total_size;
- u32 num_chunks;
- u8 *cpu_addr;
- u8 *trace_addr;
- dma_addr_t dma_addr;
- bool trace_enabled;
-};
-struct iwl_testmode_mem {
- u32 buff_size;
- u32 num_chunks;
- u8 *buff_addr;
- bool read_in_progress;
-};
-#endif
-
struct iwl_wipan_noa_data {
struct rcu_head rcu_head;
u32 length;
@@ -735,8 +626,6 @@ struct iwl_priv {
/* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
- struct ieee80211_channel *ieee_channels;
- struct ieee80211_rate *ieee_rates;
struct list_head calib_results;
@@ -747,16 +636,12 @@ struct iwl_priv {
enum ieee80211_band band;
u8 valid_contexts;
- void (*pre_rx_handler)(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb);
int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
struct iwl_notif_wait_data notif_wait;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
/* spectrum measurement report caching */
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
@@ -787,11 +672,6 @@ struct iwl_priv {
bool ucode_loaded;
bool init_ucode_run; /* Don't run init uCode again */
- /* we allocate array of iwl_channel_info for NIC's valid channels.
- * Access via channel # using indirect index array */
- struct iwl_channel_info *channel_info; /* channel info array */
- u8 channel_count; /* # of channels */
-
u8 plcp_delta_threshold;
/* thermal calibration */
@@ -846,6 +726,7 @@ struct iwl_priv {
struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
unsigned long ucode_key_table;
struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
+ atomic_t num_aux_in_flight;
u8 mac80211_registered;
@@ -950,10 +831,8 @@ struct iwl_priv {
struct delayed_work scan_check;
- /* TX Power */
+ /* TX Power settings */
s8 tx_power_user_lmt;
- s8 tx_power_device_lmt;
- s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
s8 tx_power_next;
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -964,9 +843,10 @@ struct iwl_priv {
void *wowlan_sram;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- enum iwl_nvm_type nvm_device_type;
+ struct iwl_eeprom_data *eeprom_data;
+ /* eeprom blob for debugfs/testmode */
+ u8 *eeprom_blob;
+ size_t eeprom_blob_size;
struct work_struct txpower_work;
u32 calib_disabled;
@@ -979,9 +859,9 @@ struct iwl_priv {
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
+
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
- struct iwl_testmode_trace testmode_trace;
- struct iwl_testmode_mem testmode_mem;
+ struct iwl_test tst;
u32 tm_fixed_rate;
#endif
@@ -1001,8 +881,6 @@ struct iwl_priv {
enum iwl_ucode_type cur_ucode;
}; /*iwl_priv */
-extern struct kmem_cache *iwl_tx_cmd_pool;
-
static inline struct iwl_rxon_context *
iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
{
@@ -1036,36 +914,4 @@ static inline int iwl_is_any_associated(struct iwl_priv *priv)
return false;
}
-static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
-{
- if (ch_info == NULL)
- return 0;
- return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
-}
-
-static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
-{
- return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
-}
-
-static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info)
-{
- return ch_info->band == IEEE80211_BAND_5GHZ;
-}
-
-static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info)
-{
- return ch_info->band == IEEE80211_BAND_2GHZ;
-}
-
-static inline int is_channel_passive(const struct iwl_channel_info *ch)
-{
- return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
-}
-
-static inline int is_channel_ibss(const struct iwl_channel_info *ch)
-{
- return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
-}
-
#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 48533b3..349c205 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -27,11 +27,14 @@
/*
* DVM device-specific data & functions
*/
-#include "iwl-agn.h"
-#include "iwl-dev.h"
-#include "iwl-commands.h"
#include "iwl-io.h"
#include "iwl-prph.h"
+#include "iwl-eeprom-parse.h"
+
+#include "agn.h"
+#include "dev.h"
+#include "commands.h"
+
/*
* 1000 series
@@ -58,11 +61,6 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 1000 series */
static void iwl1000_nic_config(struct iwl_priv *priv)
{
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
/* Setting digital SVR for 1000 card to 1.32V */
/* locking is acquired in iwl_set_bits_mask_prph() function */
iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
@@ -170,16 +168,6 @@ static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
- else
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -189,17 +177,6 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
struct iwl_lib_ops iwl1000_lib = {
.set_hw_params = iwl1000_hw_set_hw_params,
.nic_config = iwl1000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- },
.temperature = iwlagn_temperature,
};
@@ -219,8 +196,6 @@ static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 2000 series */
static void iwl2000_nic_config(struct iwl_priv *priv)
{
- iwl_rf_config(priv);
-
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
@@ -251,16 +226,6 @@ static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
- else
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -270,36 +235,12 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
struct iwl_lib_ops iwl2000_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
.nic_config = iwl2000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .enhanced_txpower = true,
- },
.temperature = iwlagn_temperature,
};
struct iwl_lib_ops iwl2030_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
.nic_config = iwl2000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .enhanced_txpower = true,
- },
.temperature = iwlagn_temperature,
};
@@ -309,19 +250,6 @@ struct iwl_lib_ops iwl2030_lib = {
*/
/* NIC configuration for 5000 series */
-static void iwl5000_nic_config(struct iwl_priv *priv)
-{
- iwl_rf_config(priv);
-
- /* W/A : NIC is stuck in a reset state after Early PCIe power off
- * (PCIe power is lost before PERST# is asserted),
- * causing ME FW to lose ownership and not being able to obtain it back.
- */
- iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
- ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
-}
-
static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.min_nrg_cck = 100,
.auto_corr_min_ofdm = 90,
@@ -376,11 +304,9 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
{
u16 temperature, voltage;
- __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
- EEPROM_KELVIN_TEMPERATURE);
- temperature = le16_to_cpu(temp_calib[0]);
- voltage = le16_to_cpu(temp_calib[1]);
+ temperature = le16_to_cpu(priv->eeprom_data->kelvin_temperature);
+ voltage = le16_to_cpu(priv->eeprom_data->kelvin_voltage);
/* offset = temp - volt / coeff */
return (s32)(temperature -
@@ -404,14 +330,6 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -420,14 +338,6 @@ static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -455,7 +365,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl5000_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
@@ -505,14 +414,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
cmd.switch_time);
- ch_info = iwl_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
+ cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
return iwl_dvm_send_cmd(priv, &hcmd);
}
@@ -520,36 +422,12 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
struct iwl_lib_ops iwl5000_lib = {
.set_hw_params = iwl5000_hw_set_hw_params,
.set_channel_switch = iwl5000_hw_channel_switch,
- .nic_config = iwl5000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- },
.temperature = iwlagn_temperature,
};
struct iwl_lib_ops iwl5150_lib = {
.set_hw_params = iwl5150_hw_set_hw_params,
.set_channel_switch = iwl5000_hw_channel_switch,
- .nic_config = iwl5000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- },
.temperature = iwl5150_temperature,
};
@@ -570,8 +448,6 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
- iwl_rf_config(priv);
-
switch (priv->cfg->device_family) {
case IWL_DEVICE_FAMILY_6005:
case IWL_DEVICE_FAMILY_6030:
@@ -584,13 +460,13 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
break;
case IWL_DEVICE_FAMILY_6050:
/* Indicate calibration version to uCode. */
- if (iwl_eeprom_calib_version(priv) >= 6)
+ if (priv->eeprom_data->calib_version >= 6)
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
break;
case IWL_DEVICE_FAMILY_6150:
/* Indicate calibration version to uCode. */
- if (iwl_eeprom_calib_version(priv) >= 6)
+ if (priv->eeprom_data->calib_version >= 6)
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
@@ -627,17 +503,6 @@ static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
- else
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -654,7 +519,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl6000_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
@@ -704,14 +568,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
cmd.switch_time);
- ch_info = iwl_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
+ cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
return iwl_dvm_send_cmd(priv, &hcmd);
}
@@ -720,18 +577,6 @@ struct iwl_lib_ops iwl6000_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.set_channel_switch = iwl6000_hw_channel_switch,
.nic_config = iwl6000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- .enhanced_txpower = true,
- },
.temperature = iwlagn_temperature,
};
@@ -739,17 +584,5 @@ struct iwl_lib_ops iwl6030_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.set_channel_switch = iwl6000_hw_channel_switch,
.nic_config = iwl6000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- .enhanced_txpower = true,
- },
.temperature = iwlagn_temperature,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index 4700041..bf479f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -34,12 +34,11 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
-
-#include "iwl-dev.h"
-#include "iwl-agn.h"
#include "iwl-io.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
/* Throughput OFF time(ms) ON time (ms)
* >300 25 25
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index b02a853..b02a853 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index e55ec6c..bef88c1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -33,13 +33,14 @@
#include <linux/sched.h>
#include <net/mac80211.h>
-#include "iwl-dev.h"
#include "iwl-io.h"
#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
@@ -58,8 +59,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
/* half dBm need to multiply */
tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
- if (priv->tx_power_lmt_in_half_dbm &&
- priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
+ if (tx_power_cmd.global_lmt > priv->eeprom_data->max_tx_pwr_half_dbm) {
/*
* For the newer devices which using enhanced/extend tx power
* table in EEPROM, the format is in half dBm. driver need to
@@ -71,7 +71,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
* "tx_power_user_lmt" is higher than EEPROM value (in
* half-dBm format), lower the tx power based on EEPROM
*/
- tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
+ tx_power_cmd.global_lmt =
+ priv->eeprom_data->max_tx_pwr_half_dbm;
}
tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
@@ -159,7 +160,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -264,6 +265,8 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
bt_cmd_v2.tx_prio_boost = 0;
bt_cmd_v2.rx_prio_boost = 0;
} else {
+ /* older version only has 8 bits */
+ WARN_ON(priv->cfg->bt_params->bt_prio_boost & ~0xFF);
bt_cmd_v1.prio_boost =
priv->cfg->bt_params->bt_prio_boost;
bt_cmd_v1.tx_prio_boost = 0;
@@ -617,6 +620,11 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int ave_rssi;
+ if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
+ IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
+ return false;
+ }
+
ave_rssi = ieee80211_ave_rssi(ctx->vif);
if (!ave_rssi) {
/* no rssi data, no changes to reduce tx power */
@@ -818,7 +826,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (priv->chain_noise_data.active_chains)
active_chains = priv->chain_noise_data.active_chains;
else
- active_chains = priv->hw_params.valid_rx_ant;
+ active_chains = priv->eeprom_data->valid_rx_ant;
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist &&
@@ -1259,7 +1267,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
* the mutex, this ensures we don't try to send two
* (or more) synchronous commands at a time.
*/
- if (cmd->flags & CMD_SYNC)
+ if (!(cmd->flags & CMD_ASYNC))
lockdep_assert_held(&priv->mutex);
if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index ab2f4d7..a5f7bce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -38,19 +38,20 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
+#include <net/ieee80211_radiotap.h>
#include <net/mac80211.h>
#include <asm/div64.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
#include "iwl-io.h"
-#include "iwl-agn-calib.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
+
/*****************************************************************************
*
* mac80211 entry point functions
@@ -154,6 +155,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_SCAN_WHILE_IDLE;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
/*
* Including the following line will crash some AP's. This
@@ -162,7 +164,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
*/
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
@@ -199,6 +201,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
+#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
priv->trans->ops->wowlan_suspend &&
device_can_wakeup(priv->trans->dev)) {
@@ -217,6 +220,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->wiphy->wowlan.pattern_max_len =
IWLAGN_WOWLAN_MAX_PATTERN_LEN;
}
+#endif
if (iwlwifi_mod_params.power_save)
hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -235,12 +239,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+ if (priv->eeprom_data->bands[IEEE80211_BAND_2GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+ &priv->eeprom_data->bands[IEEE80211_BAND_2GHZ];
+ if (priv->eeprom_data->bands[IEEE80211_BAND_5GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
+ &priv->eeprom_data->bands[IEEE80211_BAND_5GHZ];
hw->wiphy->hw_version = priv->trans->hw_id;
@@ -249,6 +253,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
ret = ieee80211_register_hw(priv->hw);
if (ret) {
IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+ iwl_leds_exit(priv);
return ret;
}
priv->mac80211_registered = 1;
@@ -338,7 +343,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
return 0;
}
-void iwlagn_mac_stop(struct ieee80211_hw *hw)
+static void iwlagn_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -366,9 +371,9 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data)
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -394,7 +399,8 @@ void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
#ifdef CONFIG_PM_SLEEP
-int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -417,8 +423,6 @@ int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
if (ret)
goto error;
- device_set_wakeup_enable(priv->trans->dev, true);
-
iwl_trans_wowlan_suspend(priv->trans);
goto out;
@@ -472,7 +476,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
}
if (priv->wowlan_sram)
- _iwl_read_targ_mem_words(
+ _iwl_read_targ_mem_dwords(
priv->trans, 0x800000,
priv->wowlan_sram,
img->sec[IWL_UCODE_SECTION_DATA].len / 4);
@@ -485,8 +489,6 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
priv->wowlan = false;
- device_set_wakeup_enable(priv->trans->dev, false);
-
iwlagn_prepare_restart(priv);
memset((void *)&ctx->active, 0, sizeof(ctx->active));
@@ -501,9 +503,15 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
return 1;
}
+static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+ device_set_wakeup_enable(priv->trans->dev, enabled);
+}
#endif
-void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -514,21 +522,21 @@ void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
-void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
+static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
}
-int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -628,11 +636,11 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
+static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret = -EINVAL;
@@ -641,7 +649,7 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
- if (!(priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE))
+ if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE))
return -EACCES;
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -659,7 +667,7 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
break;
case IEEE80211_AMPDU_TX_START:
- if (!priv->trans->ops->tx_agg_setup)
+ if (!priv->trans->ops->txq_enable)
break;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
break;
@@ -754,11 +762,11 @@ static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
return ret;
}
-int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
+static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -793,6 +801,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
switch (op) {
case ADD:
ret = iwlagn_mac_sta_add(hw, vif, sta);
+ if (ret)
+ break;
+ /*
+ * Clear the in-progress flag, the AP station entry was added
+ * but we'll initialize LQ only when we've associated (which
+ * would also clear the in-progress flag). This is necessary
+ * in case we never initialize LQ because association fails.
+ */
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[iwl_sta_id(sta)].used &=
+ ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_bh(&priv->sta_lock);
break;
case REMOVE:
ret = iwlagn_mac_sta_remove(hw, vif, sta);
@@ -837,11 +857,10 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
return ret;
}
-void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
+static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- const struct iwl_channel_info *ch_info;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = ch_switch->channel;
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
@@ -878,12 +897,6 @@ void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
if (le16_to_cpu(ctx->active.channel) == ch)
goto out;
- ch_info = iwl_get_channel_info(priv, channel->band, ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
priv->current_ht_config.smps = conf->smps_mode;
/* Configure HT40 channels */
@@ -932,10 +945,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
ieee80211_chswitch_done(ctx->vif, is_success);
}
-void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
__le32 filter_or = 0, filter_nand = 0;
@@ -982,7 +995,7 @@ void iwlagn_configure_filter(struct ieee80211_hw *hw,
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
-void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1035,8 +1048,18 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- err = -EBUSY;
- goto out;
+ /* mac80211 should not scan while ROC or ROC while scanning */
+ if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ iwl_scan_cancel_timeout(priv, 100);
+
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ err = -EBUSY;
+ goto out;
+ }
}
priv->hw_roc_channel = channel;
@@ -1109,7 +1132,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
return err;
}
-int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1126,8 +1149,8 @@ int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
return 0;
}
-void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
- enum ieee80211_rssi_event rssi_event)
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+ enum ieee80211_rssi_event rssi_event)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1151,8 +1174,8 @@ void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, bool set)
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1161,9 +1184,9 @@ int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
return 0;
}
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
+static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1205,7 +1228,7 @@ int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
return 0;
}
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1221,7 +1244,8 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return iwlagn_commit_rxon(priv, ctx);
}
-int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+static int iwl_setup_interface(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
{
struct ieee80211_vif *vif = ctx->vif;
int err, ac;
@@ -1341,9 +1365,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
return err;
}
-void iwl_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
+static void iwl_teardown_interface(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool mode_change)
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
@@ -1399,13 +1423,11 @@ static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
}
static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_context *tmp;
+ struct iwl_rxon_context *ctx, *tmp;
enum nl80211_iftype newviftype = newtype;
u32 interface_modes;
int err;
@@ -1416,6 +1438,18 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
+ ctx = iwl_rxon_ctx_from_vif(vif);
+
+ /*
+ * To simplify this code, only support changes on the
+ * BSS context. The PAN context is usually reassigned
+ * by creating/removing P2P interfaces anyway.
+ */
+ if (ctx->ctxid != IWL_RXON_CTX_BSS) {
+ err = -EBUSY;
+ goto out;
+ }
+
if (!ctx->vif || !iwl_is_ready_rf(priv)) {
/*
* Huh? But wait ... this can maybe happen when
@@ -1425,32 +1459,19 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
goto out;
}
+ /* Check if the switch is supported in the same context */
interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
if (!(interface_modes & BIT(newtype))) {
err = -EBUSY;
goto out;
}
- /*
- * Refuse a change that should be done by moving from the PAN
- * context to the BSS context instead, if the BSS context is
- * available and can support the new interface type.
- */
- if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
- (bss_ctx->interface_modes & BIT(newtype) ||
- bss_ctx->exclusive_interface_modes & BIT(newtype))) {
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- err = -EBUSY;
- goto out;
- }
-
if (ctx->exclusive_interface_modes & BIT(newtype)) {
for_each_context(priv, tmp) {
if (ctx == tmp)
continue;
- if (!tmp->vif)
+ if (!tmp->is_active)
continue;
/*
@@ -1484,9 +1505,9 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
return err;
}
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
+static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret;
@@ -1541,10 +1562,10 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
}
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
+static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -1581,6 +1602,7 @@ struct ieee80211_ops iwlagn_hw_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = iwlagn_mac_suspend,
.resume = iwlagn_mac_resume,
+ .set_wakeup = iwlagn_mac_set_wakeup,
#endif
.add_interface = iwlagn_mac_add_interface,
.remove_interface = iwlagn_mac_remove_interface,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index ec36e2b..84d3db5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -44,15 +44,19 @@
#include <asm/div64.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-eeprom-parse.h"
#include "iwl-io.h"
-#include "iwl-agn-calib.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
#include "iwl-modparams.h"
+#include "iwl-prph.h"
+
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
+
/******************************************************************************
*
@@ -78,7 +82,8 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwlagn");
+
+static const struct iwl_op_mode_ops iwl_dvm_ops;
void iwl_update_chain_flags(struct iwl_priv *priv)
{
@@ -180,7 +185,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
rate = info->control.rates[0].idx;
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ priv->eeprom_data->valid_tx_ant);
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* In mac80211, rates for 5 GHz start at 0 */
@@ -403,7 +408,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
base = priv->device_pointers.log_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read));
+ iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
capacity = read.capacity;
mode = read.mode;
num_wraps = read.wrap_counter;
@@ -513,49 +518,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
* queue/FIFO/AC mapping definitions
*/
-#define IWL_TX_FIFO_BK 0 /* shared */
-#define IWL_TX_FIFO_BE 1
-#define IWL_TX_FIFO_VI 2 /* shared */
-#define IWL_TX_FIFO_VO 3
-#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
-#define IWL_TX_FIFO_BE_IPAN 4
-#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
-#define IWL_TX_FIFO_VO_IPAN 5
-/* re-uses the VO FIFO, uCode will properly flush/schedule */
-#define IWL_TX_FIFO_AUX 5
-#define IWL_TX_FIFO_UNUSED -1
-
-#define IWLAGN_CMD_FIFO_NUM 7
-
-/*
- * This queue number is required for proper operation
- * because the ucode will stop/start the scheduler as
- * required.
- */
-#define IWL_IPAN_MCAST_QUEUE 8
-
-static const u8 iwlagn_default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWLAGN_CMD_FIFO_NUM,
-};
-
-static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL_TX_FIFO_BK_IPAN,
- IWL_TX_FIFO_BE_IPAN,
- IWL_TX_FIFO_VI_IPAN,
- IWL_TX_FIFO_VO_IPAN,
- IWL_TX_FIFO_BE_IPAN,
- IWLAGN_CMD_FIFO_NUM,
- IWL_TX_FIFO_AUX,
-};
-
static const u8 iwlagn_bss_ac_to_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
@@ -578,7 +540,7 @@ static const u8 iwlagn_pan_ac_to_queue[] = {
7, 6, 5, 4,
};
-void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
+static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
{
int i;
@@ -645,7 +607,7 @@ void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
}
-void iwl_rf_kill_ct_config(struct iwl_priv *priv)
+static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
{
struct iwl_ct_kill_config cmd;
struct iwl_ct_kill_throttling_config adv_cmd;
@@ -726,7 +688,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
}
}
-void iwl_send_bt_config(struct iwl_priv *priv)
+static void iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
.lead_time = BT_LEAD_TIME_DEF,
@@ -814,7 +776,7 @@ int iwl_alive_start(struct iwl_priv *priv)
ieee80211_wake_queues(priv->hw);
/* Configure Tx antenna selection based on H/W config */
- iwlagn_send_tx_ant_config(priv, priv->hw_params.valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, priv->eeprom_data->valid_tx_ant);
if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
struct iwl_rxon_cmd *active_rxon =
@@ -932,11 +894,12 @@ void iwl_down(struct iwl_priv *priv)
priv->ucode_loaded = false;
iwl_trans_stop_device(priv->trans);
+ /* Set num_aux_in_flight must be done after the transport is stopped */
+ atomic_set(&priv->num_aux_in_flight, 0);
+
/* Clear out all status bits but a few that are stable across reset */
priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
test_bit(STATUS_FW_ERROR, &priv->status) <<
STATUS_FW_ERROR |
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
@@ -1078,7 +1041,7 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
*
*****************************************************************************/
-void iwl_setup_deferred_work(struct iwl_priv *priv)
+static void iwl_setup_deferred_work(struct iwl_priv *priv)
{
priv->workqueue = create_singlethread_workqueue(DRV_NAME);
@@ -1123,224 +1086,14 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
del_timer_sync(&priv->ucode_trace);
}
-static void iwl_init_hw_rates(struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwl_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |=
- (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
-{
- u16 max_bit_rate = 0;
- u8 rx_chains_num = priv->hw_params.rx_chains_num;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
-
- ht_info->cap = 0;
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
- ht_info->ht_supported = true;
-
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->ht_greenfield_support)
- ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (priv->hw_params.ht40_channel & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- ht_info->mcs.rx_mask[4] = 0x01;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
- }
-
- if (iwlwifi_mod_params.amsdu_size_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
- ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
- ht_info->mcs.rx_mask[0] = 0xFF;
- if (rx_chains_num >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains_num >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
-
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains_num;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains_num != rx_chains_num) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-}
-
-/**
- * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-static int iwl_init_geos(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *channels;
- struct ieee80211_channel *geo_ch;
- struct ieee80211_rate *rates;
- int i = 0;
- s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
- priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
- IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
- return 0;
- }
-
- channels = kcalloc(priv->channel_count,
- sizeof(struct ieee80211_channel), GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
- GFP_KERNEL);
- if (!rates) {
- kfree(channels);
- return -ENOMEM;
- }
-
- /* 5.2GHz channels start after the 2.4GHz channels */
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
- /* just OFDM */
- sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
- iwl_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_5GHZ);
-
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
- sband->channels = channels;
- /* OFDM & CCK */
- sband->bitrates = rates;
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
- iwl_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_2GHZ);
-
- priv->ieee_channels = channels;
- priv->ieee_rates = rates;
-
- for (i = 0; i < priv->channel_count; i++) {
- ch = &priv->channel_info[i];
-
- /* FIXME: might be removed if scan is OK */
- if (!is_channel_valid(ch))
- continue;
-
- sband = &priv->bands[ch->band];
-
- geo_ch = &sband->channels[sband->n_channels++];
-
- geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel, ch->band);
- geo_ch->max_power = ch->max_power_avg;
- geo_ch->max_antenna_gain = 0xff;
- geo_ch->hw_value = ch->channel;
-
- if (is_channel_valid(ch)) {
- if (!(ch->flags & EEPROM_CHANNEL_IBSS))
- geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
- if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
- geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
- if (ch->flags & EEPROM_CHANNEL_RADAR)
- geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
- geo_ch->flags |= ch->ht40_extension_channel;
-
- if (ch->max_power_avg > max_tx_power)
- max_tx_power = ch->max_power_avg;
- } else {
- geo_ch->flags |= IEEE80211_CHAN_DISABLED;
- }
-
- IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
- ch->channel, geo_ch->center_freq,
- is_channel_a_band(ch) ? "5.2" : "2.4",
- geo_ch->flags & IEEE80211_CHAN_DISABLED ?
- "restricted" : "valid",
- geo_ch->flags);
- }
-
- priv->tx_power_device_lmt = max_tx_power;
- priv->tx_power_user_lmt = max_tx_power;
- priv->tx_power_next = max_tx_power;
-
- if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->hw_params.sku & EEPROM_SKU_CAP_BAND_52GHZ) {
- IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
- "Please send your %s to maintainer.\n",
- priv->trans->hw_id_str);
- priv->hw_params.sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
- }
-
- if (iwlwifi_mod_params.disable_5ghz)
- priv->bands[IEEE80211_BAND_5GHZ].n_channels = 0;
-
- IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
- priv->bands[IEEE80211_BAND_2GHZ].n_channels,
- priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
- return 0;
-}
-
-/*
- * iwl_free_geos - undo allocations in iwl_init_geos
- */
-static void iwl_free_geos(struct iwl_priv *priv)
-{
- kfree(priv->ieee_channels);
- kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-
-int iwl_init_drv(struct iwl_priv *priv)
+static int iwl_init_drv(struct iwl_priv *priv)
{
- int ret;
-
spin_lock_init(&priv->sta_lock);
mutex_init(&priv->mutex);
INIT_LIST_HEAD(&priv->calib_results);
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
priv->plcp_delta_threshold =
@@ -1371,31 +1124,11 @@ int iwl_init_drv(struct iwl_priv *priv)
priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
}
- ret = iwl_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- ret = iwl_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl_init_hw_rates(priv->ieee_rates);
-
return 0;
-
-err_free_channel_map:
- iwl_free_channel_map(priv);
-err:
- return ret;
}
-void iwl_uninit_drv(struct iwl_priv *priv)
+static void iwl_uninit_drv(struct iwl_priv *priv)
{
- iwl_free_geos(priv);
- iwl_free_channel_map(priv);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
kfree(rcu_dereference_raw(priv->noa_data));
@@ -1405,15 +1138,12 @@ void iwl_uninit_drv(struct iwl_priv *priv)
#endif
}
-void iwl_set_hw_params(struct iwl_priv *priv)
+static void iwl_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->ht_params)
priv->hw_params.use_rts_for_aggregation =
priv->cfg->ht_params->use_rts_for_aggregation;
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
- priv->hw_params.sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
-
/* Device-specific setup */
priv->lib->set_hw_params(priv);
}
@@ -1421,7 +1151,7 @@ void iwl_set_hw_params(struct iwl_priv *priv)
/* show what optional capabilities we have */
-void iwl_option_config(struct iwl_priv *priv)
+static void iwl_option_config(struct iwl_priv *priv)
{
#ifdef CONFIG_IWLWIFI_DEBUG
IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
@@ -1454,6 +1184,42 @@ void iwl_option_config(struct iwl_priv *priv)
#endif
}
+static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
+{
+ u16 radio_cfg;
+
+ priv->eeprom_data->sku = priv->eeprom_data->sku;
+
+ if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !priv->cfg->ht_params) {
+ IWL_ERR(priv, "Invalid 11n configuration\n");
+ return -EINVAL;
+ }
+
+ if (!priv->eeprom_data->sku) {
+ IWL_ERR(priv, "Invalid device sku\n");
+ return -EINVAL;
+ }
+
+ IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
+
+ radio_cfg = priv->eeprom_data->radio_cfg;
+
+ priv->hw_params.tx_chains_num =
+ num_of_ant(priv->eeprom_data->valid_tx_ant);
+ if (priv->cfg->rx_with_siso_diversity)
+ priv->hw_params.rx_chains_num = 1;
+ else
+ priv->hw_params.rx_chains_num =
+ num_of_ant(priv->eeprom_data->valid_rx_ant);
+
+ IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ priv->eeprom_data->valid_tx_ant,
+ priv->eeprom_data->valid_rx_ant);
+
+ return 0;
+}
+
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
const struct iwl_fw *fw)
@@ -1466,7 +1232,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
struct iwl_trans_config trans_cfg;
static const u8 no_reclaim_cmds[] = {
REPLY_RX_PHY_CMD,
- REPLY_RX,
REPLY_RX_MPDU_CMD,
REPLY_COMPRESSED_BA,
STATISTICS_NOTIFICATION,
@@ -1539,8 +1304,12 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
trans_cfg.queue_watchdog_timeout =
priv->cfg->base_params->wd_timeout;
else
- trans_cfg.queue_watchdog_timeout = IWL_WATCHHDOG_DISABLED;
+ trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
trans_cfg.command_names = iwl_dvm_cmd_strings;
+ trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
+
+ WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
+ priv->cfg->base_params->num_of_queues);
ucode_flags = fw->ucode_capa.flags;
@@ -1551,15 +1320,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
- trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
- trans_cfg.n_queue_to_fifo =
- ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
} else {
priv->sta_key_max_num = STA_KEY_MAX_NUM;
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
- trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
- trans_cfg.n_queue_to_fifo =
- ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
}
/* Configure transport layer */
@@ -1599,25 +1362,33 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
goto out_free_hw;
/* Read the EEPROM */
- if (iwl_eeprom_init(priv, priv->trans->hw_rev)) {
+ if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
+ &priv->eeprom_blob_size)) {
IWL_ERR(priv, "Unable to init EEPROM\n");
goto out_free_hw;
}
+
/* Reset chip to save power until we load uCode during "up". */
iwl_trans_stop_hw(priv->trans, false);
- if (iwl_eeprom_check_version(priv))
+ priv->eeprom_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
+ priv->eeprom_blob,
+ priv->eeprom_blob_size);
+ if (!priv->eeprom_data)
+ goto out_free_eeprom_blob;
+
+ if (iwl_eeprom_check_version(priv->eeprom_data, priv->trans))
goto out_free_eeprom;
if (iwl_eeprom_init_hw_params(priv))
goto out_free_eeprom;
/* extract MAC Address */
- iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
+ memcpy(priv->addresses[0].addr, priv->eeprom_data->hw_addr, ETH_ALEN);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
priv->hw->wiphy->addresses = priv->addresses;
priv->hw->wiphy->n_addresses = 1;
- num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
+ num_mac = priv->eeprom_data->n_hw_addrs;
if (num_mac > 1) {
memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
ETH_ALEN);
@@ -1630,7 +1401,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/
iwl_set_hw_params(priv);
- if (!(priv->hw_params.sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
+ if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
/*
@@ -1640,9 +1411,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
priv->sta_key_max_num = STA_KEY_MAX_NUM;
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
- trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
- trans_cfg.n_queue_to_fifo =
- ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
/* Configure transport layer again*/
iwl_trans_configure(priv->trans, &trans_cfg);
@@ -1660,9 +1428,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
atomic_set(&priv->queue_stop_count[i], 0);
}
- WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
- IWLAGN_CMD_FIFO_NUM);
-
if (iwl_init_drv(priv))
goto out_free_eeprom;
@@ -1711,8 +1476,10 @@ out_destroy_workqueue:
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
iwl_uninit_drv(priv);
+out_free_eeprom_blob:
+ kfree(priv->eeprom_blob);
out_free_eeprom:
- iwl_eeprom_free(priv);
+ iwl_free_eeprom_data(priv->eeprom_data);
out_free_hw:
ieee80211_free_hw(priv->hw);
out:
@@ -1720,7 +1487,7 @@ out:
return op_mode;
}
-void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
+static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -1728,7 +1495,7 @@ void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
iwl_dbgfs_unregister(priv);
- iwl_testmode_cleanup(priv);
+ iwl_testmode_free(priv);
iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
@@ -1737,7 +1504,8 @@ void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
priv->ucode_loaded = false;
iwl_trans_stop_device(priv->trans);
- iwl_eeprom_free(priv);
+ kfree(priv->eeprom_blob);
+ iwl_free_eeprom_data(priv->eeprom_data);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
@@ -1850,7 +1618,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
/*TODO: Update dbgfs with ISR error stats obtained below */
- iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
+ iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -2185,7 +1953,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
}
}
-void iwl_nic_error(struct iwl_op_mode *op_mode)
+static void iwl_nic_error(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -2198,7 +1966,7 @@ void iwl_nic_error(struct iwl_op_mode *op_mode)
iwlagn_fw_error(priv, false);
}
-void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
+static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -2208,11 +1976,60 @@ void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
}
}
-void iwl_nic_config(struct iwl_op_mode *op_mode)
+#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
+
+static void iwl_nic_config(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ u16 radio_cfg = priv->eeprom_data->radio_cfg;
+
+ /* SKU Control */
+ iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
+ (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
+ (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
+ u32 reg_val =
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <<
+ CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) <<
+ CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg) <<
+ CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
+
+ IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg),
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+ } else {
+ WARN_ON(1);
+ }
- priv->lib->nic_config(priv);
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ /* W/A : NIC is stuck in a reset state after Early PCIe power off
+ * (PCIe power is lost before PERST# is asserted),
+ * causing ME FW to lose ownership and not being able to obtain it back.
+ */
+ iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+
+ if (priv->lib->nic_config)
+ priv->lib->nic_config(priv);
}
static void iwl_wimax_active(struct iwl_op_mode *op_mode)
@@ -2223,7 +2040,7 @@ static void iwl_wimax_active(struct iwl_op_mode *op_mode)
IWL_ERR(priv, "RF is used by WiMAX\n");
}
-void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
+static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
int mq = priv->queue_to_mac80211[queue];
@@ -2242,7 +2059,7 @@ void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
ieee80211_stop_queue(priv->hw, mq);
}
-void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
+static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
int mq = priv->queue_to_mac80211[queue];
@@ -2282,16 +2099,17 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
priv->passive_no_rx = false;
}
-void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
struct ieee80211_tx_info *info;
info = IEEE80211_SKB_CB(skb);
- kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
+ iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
dev_kfree_skb_any(skb);
}
-void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -2303,7 +2121,7 @@ void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
}
-const struct iwl_op_mode_ops iwl_dvm_ops = {
+static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
.rx = iwl_rx_dispatch,
@@ -2322,9 +2140,6 @@ const struct iwl_op_mode_ops iwl_dvm_ops = {
* driver and module entry point
*
*****************************************************************************/
-
-struct kmem_cache *iwl_tx_cmd_pool;
-
static int __init iwl_init(void)
{
@@ -2332,36 +2147,25 @@ static int __init iwl_init(void)
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
pr_info(DRV_COPYRIGHT "\n");
- iwl_tx_cmd_pool = kmem_cache_create("iwl_dev_cmd",
- sizeof(struct iwl_device_cmd),
- sizeof(void *), 0, NULL);
- if (!iwl_tx_cmd_pool)
- return -ENOMEM;
-
ret = iwlagn_rate_control_register();
if (ret) {
pr_err("Unable to register rate control algorithm: %d\n", ret);
- goto error_rc_register;
+ return ret;
}
- ret = iwl_pci_register_driver();
- if (ret)
- goto error_pci_register;
- return ret;
+ ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
+ if (ret) {
+ pr_err("Unable to register op_mode: %d\n", ret);
+ iwlagn_rate_control_unregister();
+ }
-error_pci_register:
- iwlagn_rate_control_unregister();
-error_rc_register:
- kmem_cache_destroy(iwl_tx_cmd_pool);
return ret;
}
+module_init(iwl_init);
static void __exit iwl_exit(void)
{
- iwl_pci_unregister_driver();
+ iwl_opmode_deregister("iwldvm");
iwlagn_rate_control_unregister();
- kmem_cache_destroy(iwl_tx_cmd_pool);
}
-
module_exit(iwl_exit);
-module_init(iwl_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 544ddf1..518cf37 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -31,18 +31,15 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
-
#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-agn.h"
#include "iwl-io.h"
-#include "iwl-commands.h"
#include "iwl-debug.h"
-#include "iwl-power.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+#include "commands.h"
+#include "power.h"
/*
* Setting power level allows the card to go to sleep when not busy.
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index 21afc92..a2cee7f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -28,7 +28,7 @@
#ifndef __iwl_power_setting_h__
#define __iwl_power_setting_h__
-#include "iwl-commands.h"
+#include "commands.h"
struct iwl_power_mgr {
struct iwl_powertable_cmd sleep_cmd;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 51e1a69..6fddd27 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -35,10 +35,8 @@
#include <linux/workqueue.h>
-#include "iwl-dev.h"
-#include "iwl-agn.h"
-#include "iwl-op-mode.h"
-#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
#define RS_NAME "iwl-agn-rs"
@@ -819,7 +817,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
tbl->is_ht40 = 0;
tbl->is_SGI = 0;
@@ -884,6 +882,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
(priv->bt_full_concurrent != full_concurrent)) {
priv->bt_full_concurrent = full_concurrent;
+ priv->last_bt_traffic_load = priv->bt_traffic_load;
/* Update uCode's rate table. */
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
@@ -1446,7 +1445,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
int ret = 0;
u8 update_search_tbl_counter = 0;
@@ -1464,7 +1463,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
tbl->action != IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1488,7 +1487,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
valid_tx_ant =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
}
start_action = tbl->action;
@@ -1622,7 +1621,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1640,7 +1639,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
@@ -1658,7 +1657,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
valid_tx_ant =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
@@ -1794,7 +1793,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1964,7 +1963,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
int ret;
u8 update_search_tbl_counter = 0;
@@ -2698,7 +2697,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
i = lq_sta->last_txrate_idx;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = priv->eeprom_data->valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
@@ -2892,15 +2891,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/* These values will be overridden later */
lq_sta->lq.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ priv->eeprom_data->valid_tx_ant &
+ ~first_antenna(priv->eeprom_data->valid_tx_ant);
if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ priv->eeprom_data->valid_tx_ant;
}
/* as default allow aggregation for all tids */
@@ -2946,7 +2945,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
}
/* How many times should we repeat the initial rate? */
@@ -2978,7 +2977,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv->bt_full_concurrent)
valid_tx_ant = ANT_A;
else
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = priv->eeprom_data->valid_tx_ant;
}
/* Fill rest of rate table */
@@ -3012,7 +3011,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
}
/* Indicate to uCode which entries might be MIMO.
@@ -3099,7 +3098,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u8 ant_sel_tx;
priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = priv->eeprom_data->valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
ant_sel_tx =
((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3170,9 +3169,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ (priv->eeprom_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (priv->eeprom_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (priv->eeprom_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 82d02e1..ad3aea8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -29,9 +29,10 @@
#include <net/mac80211.h>
-#include "iwl-commands.h"
#include "iwl-config.h"
+#include "commands.h"
+
struct iwl_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 403de96..fee5cff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -32,12 +32,10 @@
#include <linux/sched.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
#include "iwl-io.h"
-#include "iwl-agn-calib.h"
-#include "iwl-agn.h"
-#include "iwl-modparams.h"
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
#define IWL_CMD_ENTRY(x) [x] = #x
@@ -90,7 +88,6 @@ const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD),
IWL_CMD_ENTRY(REPLY_RX_PHY_CMD),
IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD),
- IWL_CMD_ENTRY(REPLY_RX),
IWL_CMD_ENTRY(REPLY_COMPRESSED_BA),
IWL_CMD_ENTRY(CALIBRATION_CFG_CMD),
IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION),
@@ -897,8 +894,7 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+/* Called for REPLY_RX_MPDU_CMD */
static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
@@ -913,37 +909,17 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
u32 ampdu_status;
u32 rate_n_flags;
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->data;
- header = (struct ieee80211_hdr *)(pkt->data + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->last_phy_res_valid) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return 0;
- }
- phy_res = &priv->last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
- header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
- ampdu_status = iwlagn_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
+ if (!priv->last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return 0;
}
+ phy_res = &priv->last_phy_res;
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
+ ampdu_status = iwlagn_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
@@ -1012,6 +988,8 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
rx_status.flag |= RX_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ rx_status.flag |= RX_FLAG_HT_GF;
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
@@ -1124,8 +1102,6 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- void (*pre_rx_handler)(struct iwl_priv *,
- struct iwl_rx_cmd_buffer *);
int err = 0;
/*
@@ -1135,19 +1111,19 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
*/
iwl_notification_wait_notify(&priv->notif_wait, pkt);
- /* RX data may be forwarded to userspace (using pre_rx_handler) in one
- * of two cases: the first, that the user owns the uCode through
- * testmode - in such case the pre_rx_handler is set and no further
- * processing takes place. The other case is when the user want to
- * monitor the rx w/o affecting the regular flow - the pre_rx_handler
- * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+ /*
+ * RX data may be forwarded to userspace in one
+ * of two cases: the user owns the fw through testmode or when
+ * the user requested to monitor the rx w/o affecting the regular flow.
+ * In these cases the iwl_test object will handle forwarding the rx
+ * data to user space.
+ * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
* continues.
- * We need to use ACCESS_ONCE to prevent a case where the handler
- * changes between the check and the call.
*/
- pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler);
- if (pre_rx_handler)
- pre_rx_handler(priv, rxb);
+ iwl_test_rx(&priv->tst, rxb);
+#endif
+
if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
/* Based on type of command response or notification,
* handle those that need handling via function in
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 0a3aa7c..1089639 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -25,11 +25,11 @@
*****************************************************************************/
#include <linux/etherdevice.h>
-#include "iwl-dev.h"
-#include "iwl-agn.h"
-#include "iwl-agn-calib.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+#include "calib.h"
/*
* initialize rxon structure with default values from eeprom
@@ -37,8 +37,6 @@
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- const struct iwl_channel_info *ch_info;
-
memset(&ctx->staging, 0, sizeof(ctx->staging));
if (!ctx->vif) {
@@ -80,14 +78,8 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
#endif
- ch_info = iwl_get_channel_info(priv, priv->band,
- le16_to_cpu(ctx->active.channel));
-
- if (!ch_info)
- ch_info = &priv->channel_info[0];
-
- ctx->staging.channel = cpu_to_le16(ch_info->channel);
- priv->band = ch_info->band;
+ ctx->staging.channel = cpu_to_le16(priv->hw->conf.channel->hw_value);
+ priv->band = priv->hw->conf.channel->band;
iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
@@ -175,7 +167,8 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
return ret;
}
-void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+static void iwlagn_update_qos(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
{
int ret;
@@ -202,8 +195,8 @@ void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
}
-int iwlagn_update_beacon(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
+static int iwlagn_update_beacon(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
lockdep_assert_held(&priv->mutex);
@@ -215,7 +208,7 @@ int iwlagn_update_beacon(struct iwl_priv *priv,
}
static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
+ struct iwl_rxon_context *ctx)
{
int ret = 0;
struct iwl_rxon_assoc_cmd rxon_assoc;
@@ -427,10 +420,10 @@ static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
return -EINVAL;
}
- if (tx_power > priv->tx_power_device_lmt) {
+ if (tx_power > DIV_ROUND_UP(priv->eeprom_data->max_tx_pwr_half_dbm, 2)) {
IWL_WARN(priv,
"Requested user TXPOWER %d above upper limit %d.\n",
- tx_power, priv->tx_power_device_lmt);
+ tx_power, priv->eeprom_data->max_tx_pwr_half_dbm);
return -EINVAL;
}
@@ -863,8 +856,8 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
* or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
* a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
*/
-int iwl_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
+static int iwl_full_rxon_required(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
{
const struct iwl_rxon_cmd *staging = &ctx->staging;
const struct iwl_rxon_cmd *active = &ctx->active;
@@ -1189,7 +1182,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
struct iwl_rxon_context *ctx;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = conf->channel;
- const struct iwl_channel_info *ch_info;
int ret = 0;
IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
@@ -1223,14 +1215,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ch_info = iwl_get_channel_info(priv, channel->band,
- channel->hw_value);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto out;
- }
-
for_each_context(priv, ctx) {
/* Configure HT40 channels */
if (ctx->ht.enabled != conf_is_ht(conf))
@@ -1294,9 +1278,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
return ret;
}
-void iwlagn_check_needed_chains(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_bss_conf *bss_conf)
+static void iwlagn_check_needed_chains(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_bss_conf *bss_conf)
{
struct ieee80211_vif *vif = ctx->vif;
struct iwl_rxon_context *tmp;
@@ -1388,7 +1372,7 @@ void iwlagn_check_needed_chains(struct iwl_priv *priv,
ht_conf->single_chain_sufficient = !need_multiple;
}
-void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
{
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret;
@@ -1463,7 +1447,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
- priv->timestamp = bss_conf->last_tsf;
+ priv->timestamp = bss_conf->sync_tsf;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 031d8e2..e3467fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -30,11 +30,8 @@
#include <linux/etherdevice.h>
#include <net/mac80211.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-trans.h"
+#include "dev.h"
+#include "agn.h"
/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
* sending probe req. This should be set long enough to hear probe responses
@@ -54,6 +51,9 @@
#define IWL_CHANNEL_TUNE_TIME 5
#define MAX_SCAN_CHANNEL 50
+/* For reset radio, need minimal dwell time only */
+#define IWL_RADIO_RESET_DWELL_TIME 5
+
static int iwl_send_scan_abort(struct iwl_priv *priv)
{
int ret;
@@ -67,7 +67,6 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
* to receive scan abort command or it does not perform
* hardware scan currently */
if (!test_bit(STATUS_READY, &priv->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
!test_bit(STATUS_SCAN_HW, &priv->status) ||
test_bit(STATUS_FW_ERROR, &priv->status))
return -EIO;
@@ -101,11 +100,8 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
- if (priv->scan_type == IWL_SCAN_ROC) {
- ieee80211_remain_on_channel_expired(priv->hw);
- priv->hw_roc_channel = NULL;
- schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
- }
+ if (priv->scan_type == IWL_SCAN_ROC)
+ iwl_scan_roc_expired(priv);
priv->scan_type = IWL_SCAN_NORMAL;
priv->scan_vif = NULL;
@@ -134,11 +130,8 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
goto out_settings;
}
- if (priv->scan_type == IWL_SCAN_ROC) {
- ieee80211_remain_on_channel_expired(priv->hw);
- priv->hw_roc_channel = NULL;
- schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
- }
+ if (priv->scan_type == IWL_SCAN_ROC)
+ iwl_scan_roc_expired(priv);
if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
int err;
@@ -403,15 +396,21 @@ static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
{
struct iwl_rxon_context *ctx;
+ int limits[NUM_IWL_RXON_CTX] = {};
+ int n_active = 0;
+ u16 limit;
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
/*
* If we're associated, we clamp the dwell time 98%
- * of the smallest beacon interval (minus 2 * channel
- * tune time)
+ * of the beacon interval (minus 2 * channel tune time)
+ * If both contexts are active, we have to restrict to
+ * 1/2 of the minimum of them, because they might be in
+ * lock-step with the time inbetween only half of what
+ * time we'd have in each of them.
*/
for_each_context(priv, ctx) {
- u16 value;
-
switch (ctx->staging.dev_type) {
case RXON_DEV_TYPE_P2P:
/* no timing constraints */
@@ -431,14 +430,25 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
break;
}
- value = ctx->beacon_int;
- if (!value)
- value = IWL_PASSIVE_DWELL_BASE;
- value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
- dwell_time = min(value, dwell_time);
+ limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE;
}
- return dwell_time;
+ switch (n_active) {
+ case 0:
+ return dwell_time;
+ case 2:
+ limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+ limit /= 2;
+ dwell_time = min(limit, dwell_time);
+ /* fall through to limit further */
+ case 1:
+ limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+ limit /= n_active;
+ return min(limit, dwell_time);
+ default:
+ WARN_ON_ONCE(1);
+ return dwell_time;
+ }
}
static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
@@ -453,27 +463,17 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
/* Return valid, unused, channel for a passive scan to reset the RF */
static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum ieee80211_band band)
{
- const struct iwl_channel_info *ch_info;
- int i;
- u8 channel = 0;
- u8 min, max;
+ struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
struct iwl_rxon_context *ctx;
+ int i;
- if (band == IEEE80211_BAND_5GHZ) {
- min = 14;
- max = priv->channel_count;
- } else {
- min = 0;
- max = 14;
- }
-
- for (i = min; i < max; i++) {
+ for (i = 0; i < sband->n_channels; i++) {
bool busy = false;
for_each_context(priv, ctx) {
- busy = priv->channel_info[i].channel ==
+ busy = sband->channels[i].hw_value ==
le16_to_cpu(ctx->staging.channel);
if (busy)
break;
@@ -482,54 +482,46 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
if (busy)
continue;
- channel = priv->channel_info[i].channel;
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (is_channel_valid(ch_info))
- break;
+ if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED))
+ return sband->channels[i].hw_value;
}
- return channel;
+ return 0;
}
-static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- struct iwl_scan_channel *scan_ch)
+static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
{
const struct ieee80211_supported_band *sband;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added = 0;
- u16 channel = 0;
+ u16 channel;
sband = iwl_get_hw_mode(priv, band);
if (!sband) {
IWL_ERR(priv, "invalid band\n");
- return added;
+ return 0;
}
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
channel = iwl_get_single_channel_number(priv, band);
if (channel) {
scan_ch->channel = cpu_to_le16(channel);
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ scan_ch->active_dwell =
+ cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
+ scan_ch->passive_dwell =
+ cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
/* Set txpower levels to defaults */
scan_ch->dsp_atten = 110;
if (band == IEEE80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
+ return 1;
+ }
+
+ IWL_ERR(priv, "no valid channel found\n");
+ return 0;
}
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
@@ -540,7 +532,6 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
{
struct ieee80211_channel *chan;
const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
u16 passive_dwell = 0;
u16 active_dwell = 0;
int added, i;
@@ -565,16 +556,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
channel = chan->hw_value;
scan_ch->channel = cpu_to_le16(channel);
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ if (!is_active || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
@@ -678,12 +660,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
+ u8 rx_ant = priv->eeprom_data->valid_rx_ant;
u8 rate;
bool is_active = false;
int chan_mod;
u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
+ u8 scan_tx_antennas = priv->eeprom_data->valid_tx_ant;
int ret;
int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
@@ -755,6 +737,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
switch (priv->scan_type) {
case IWL_SCAN_RADIO_RESET:
IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ /*
+ * Override quiet time as firmware checks that active
+ * dwell is >= quiet; since we use passive scan it'll
+ * not actually be used.
+ */
+ scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
break;
case IWL_SCAN_NORMAL:
if (priv->scan_request->n_ssids) {
@@ -893,7 +881,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
/* MIMO is not used here, but value is required */
rx_chain |=
- priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ priv->eeprom_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -928,7 +916,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
switch (priv->scan_type) {
case IWL_SCAN_RADIO_RESET:
scan->channel_count =
- iwl_get_single_channel_for_scan(priv, vif, band,
+ iwl_get_channel_for_reset_scan(priv, vif, band,
(void *)&scan->data[cmd_len]);
break;
case IWL_SCAN_NORMAL:
@@ -994,8 +982,10 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
set_bit(STATUS_SCAN_HW, &priv->status);
ret = iwlagn_set_pan_params(priv);
- if (ret)
+ if (ret) {
+ clear_bit(STATUS_SCAN_HW, &priv->status);
return ret;
+ }
ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret) {
@@ -1008,7 +998,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
void iwl_init_scan_params(struct iwl_priv *priv)
{
- u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
+ u8 ant_idx = fls(priv->eeprom_data->valid_tx_ant) - 1;
if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
@@ -1158,3 +1148,40 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
mutex_unlock(&priv->mutex);
}
}
+
+void iwl_scan_roc_expired(struct iwl_priv *priv)
+{
+ /*
+ * The status bit should be set here, to prevent a race
+ * where the atomic_read returns 1, but before the execution continues
+ * iwl_scan_offchannel_skb_status() checks if the status bit is set
+ */
+ set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
+
+ if (atomic_read(&priv->num_aux_in_flight) == 0) {
+ ieee80211_remain_on_channel_expired(priv->hw);
+ priv->hw_roc_channel = NULL;
+ schedule_delayed_work(&priv->hw_roc_disable_work,
+ 10 * HZ);
+
+ clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
+ } else {
+ IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
+ atomic_read(&priv->num_aux_in_flight));
+ }
+}
+
+void iwl_scan_offchannel_skb(struct iwl_priv *priv)
+{
+ WARN_ON(!priv->hw_roc_start_notified);
+ atomic_inc(&priv->num_aux_in_flight);
+}
+
+void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
+{
+ if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
+ test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
+ iwl_scan_roc_expired(priv);
+ }
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index b31584e..b29b798 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -28,10 +28,9 @@
*****************************************************************************/
#include <linux/etherdevice.h>
#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
+#include "dev.h"
+#include "agn.h"
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -171,26 +170,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
return cmd.handler_status;
}
-static bool iwl_is_channel_extension(struct iwl_priv *priv,
- enum ieee80211_band band,
- u16 channel, u8 extension_chan_offset)
-{
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info))
- return false;
-
- if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40PLUS);
- else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40MINUS);
-
- return false;
-}
-
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_sta_ht_cap *ht_cap)
@@ -198,21 +177,25 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
return false;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (priv->disable_ht40)
+ return false;
+#endif
+
/*
- * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
- * the bit will not set if it is pure 40MHz case
+ * Remainder of this function checks ht_cap, but if it's
+ * NULL then we can do HT40 (special case for RXON)
*/
- if (ht_cap && !ht_cap->ht_supported)
+ if (!ht_cap)
+ return true;
+
+ if (!ht_cap->ht_supported)
return false;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (priv->disable_ht40)
+ if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
return false;
-#endif
- return iwl_is_channel_extension(priv, priv->band,
- le16_to_cpu(ctx->staging.channel),
- ctx->ht.extension_chan_offset);
+ return true;
}
static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -236,6 +219,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
+ sta->addr,
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
"static" :
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
@@ -649,23 +633,23 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+ rate_flags |= first_antenna(priv->eeprom_data->valid_tx_ant) <<
RATE_MCS_ANT_POS;
rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
link_cmd->general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ priv->eeprom_data->valid_tx_ant &
+ ~first_antenna(priv->eeprom_data->valid_tx_ant);
if (!link_cmd->general_params.dual_stream_ant_msk) {
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ priv->eeprom_data->valid_tx_ant;
}
link_cmd->agg_params.agg_dis_start_th =
@@ -772,7 +756,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
~IWL_STA_DRIVER_ACTIVE;
priv->stations[i].used &=
~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_bh(&priv->sta_lock);
+ continue;
}
/*
* Rate scaling has already been initialized, send
@@ -1267,7 +1251,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
key_flags |= STA_KEY_MULTICAST_MSK;
sta_cmd.key.key_flags = key_flags;
- sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
+ sta_cmd.key.key_offset = keyconf->hw_key_idx;
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
new file mode 100644
index 0000000..57b918c
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -0,0 +1,471 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <net/net_namespace.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include <net/netlink.h>
+
+#include "iwl-debug.h"
+#include "iwl-trans.h"
+#include "dev.h"
+#include "agn.h"
+#include "iwl-test.h"
+#include "iwl-testmode.h"
+
+static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ return iwl_dvm_send_cmd(priv, cmd);
+}
+
+static bool iwl_testmode_valid_hw_addr(u32 addr)
+{
+ if (iwlagn_hw_valid_rtc_data_addr(addr))
+ return true;
+
+ if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
+ addr < IWLAGN_RTC_INST_UPPER_BOUND)
+ return true;
+
+ return false;
+}
+
+static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ return priv->fw->ucode_ver;
+}
+
+static struct sk_buff*
+iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
+}
+
+static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ return cfg80211_testmode_reply(skb);
+}
+
+static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
+ int len)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
+ GFP_ATOMIC);
+}
+
+static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ return cfg80211_testmode_event(skb, GFP_ATOMIC);
+}
+
+static struct iwl_test_ops tst_ops = {
+ .send_cmd = iwl_testmode_send_cmd,
+ .valid_hw_addr = iwl_testmode_valid_hw_addr,
+ .get_fw_ver = iwl_testmode_get_fw_ver,
+ .alloc_reply = iwl_testmode_alloc_reply,
+ .reply = iwl_testmode_reply,
+ .alloc_event = iwl_testmode_alloc_event,
+ .event = iwl_testmode_event,
+};
+
+void iwl_testmode_init(struct iwl_priv *priv)
+{
+ iwl_test_init(&priv->tst, priv->trans, &tst_ops);
+}
+
+void iwl_testmode_free(struct iwl_priv *priv)
+{
+ iwl_test_free(&priv->tst);
+}
+
+static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
+{
+ struct iwl_notification_wait calib_wait;
+ static const u8 calib_complete[] = {
+ CALIBRATION_COMPLETE_NOTIFICATION
+ };
+ int ret;
+
+ iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
+ calib_complete, ARRAY_SIZE(calib_complete),
+ NULL, NULL);
+ ret = iwl_init_alive_start(priv);
+ if (ret) {
+ IWL_ERR(priv, "Fail init calibration: %d\n", ret);
+ goto cfg_init_calib_error;
+ }
+
+ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
+ if (ret)
+ IWL_ERR(priv, "Error detecting"
+ " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
+ return ret;
+
+cfg_init_calib_error:
+ iwl_remove_notification(&priv->notif_wait, &calib_wait);
+ return ret;
+}
+
+/*
+ * This function handles the user application commands for driver.
+ *
+ * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
+ * handlers respectively.
+ *
+ * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
+ * value of the actual command execution is replied to the user application.
+ *
+ * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
+ * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
+ * IWL_TM_CMD_DEV2APP_SYNC_RSP.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ struct iwl_trans *trans = priv->trans;
+ struct sk_buff *skb;
+ unsigned char *rsp_data_ptr = NULL;
+ int status = 0, rsp_data_len = 0;
+ u32 inst_size = 0, data_size = 0;
+ const struct fw_img *img;
+
+ switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+ case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
+ rsp_data_ptr = (unsigned char *)priv->cfg->name;
+ rsp_data_len = strlen(priv->cfg->name);
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
+ rsp_data_len + 20);
+ if (!skb) {
+ IWL_ERR(priv, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+ IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
+ nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
+ rsp_data_len, rsp_data_ptr))
+ goto nla_put_failure;
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
+ if (status)
+ IWL_ERR(priv, "Error loading init ucode: %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
+ iwl_testmode_cfg_init_calib(priv);
+ priv->ucode_loaded = false;
+ iwl_trans_stop_device(trans);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
+ if (status) {
+ IWL_ERR(priv,
+ "Error loading runtime ucode: %d\n", status);
+ break;
+ }
+ status = iwl_alive_start(priv);
+ if (status)
+ IWL_ERR(priv,
+ "Error starting the device: %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ iwl_scan_cancel_timeout(priv, 200);
+ priv->ucode_loaded = false;
+ iwl_trans_stop_device(trans);
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
+ if (status) {
+ IWL_ERR(priv,
+ "Error loading WOWLAN ucode: %d\n", status);
+ break;
+ }
+ status = iwl_alive_start(priv);
+ if (status)
+ IWL_ERR(priv,
+ "Error starting the device: %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_EEPROM:
+ if (priv->eeprom_blob) {
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
+ priv->eeprom_blob_size + 20);
+ if (!skb) {
+ IWL_ERR(priv, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+ IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
+ nla_put(skb, IWL_TM_ATTR_EEPROM,
+ priv->eeprom_blob_size,
+ priv->eeprom_blob))
+ goto nla_put_failure;
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_ERR(priv, "Error sending msg : %d\n",
+ status);
+ } else
+ return -ENODATA;
+ break;
+
+ case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+ if (!tb[IWL_TM_ATTR_FIXRATE]) {
+ IWL_ERR(priv, "Missing fixrate setting\n");
+ return -ENOMSG;
+ }
+ priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
+ if (!skb) {
+ IWL_ERR(priv, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (!priv->ucode_loaded) {
+ IWL_ERR(priv, "No uCode has not been loaded\n");
+ return -EINVAL;
+ } else {
+ img = &priv->fw->img[priv->cur_ucode];
+ inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
+ data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
+ }
+ if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
+ nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
+ nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
+ goto nla_put_failure;
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown testmode driver command ID\n");
+ return -ENOSYS;
+ }
+ return status;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+/*
+ * This function handles the user application switch ucode ownership.
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
+ * decide who the current owner of the uCode
+ *
+ * If the current owner is OWNERSHIP_TM, then the only host command
+ * can deliver to uCode is from testmode, all the other host commands
+ * will dropped.
+ *
+ * default driver is the owner of uCode in normal operational mode
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ u8 owner;
+
+ if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
+ IWL_ERR(priv, "Missing ucode owner\n");
+ return -ENOMSG;
+ }
+
+ owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
+ if (owner == IWL_OWNERSHIP_DRIVER) {
+ priv->ucode_owner = owner;
+ iwl_test_enable_notifications(&priv->tst, false);
+ } else if (owner == IWL_OWNERSHIP_TM) {
+ priv->ucode_owner = owner;
+ iwl_test_enable_notifications(&priv->tst, true);
+ } else {
+ IWL_ERR(priv, "Invalid owner\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* The testmode gnl message handler that takes the gnl message from the
+ * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
+ * invoke the corresponding handlers.
+ *
+ * This function is invoked when there is user space application sending
+ * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
+ * by nl80211.
+ *
+ * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
+ * dispatching it to the corresponding handler.
+ *
+ * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
+ * -ENOSYS is replied to the user application if the command is unknown;
+ * Otherwise, the command is dispatched to the respective handler.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @data: pointer to user space message
+ * @len: length in byte of @data
+ */
+int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
+{
+ struct nlattr *tb[IWL_TM_ATTR_MAX];
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ int result;
+
+ result = iwl_test_parse(&priv->tst, tb, data, len);
+ if (result)
+ return result;
+
+ /* in case multiple accesses to the device happens */
+ mutex_lock(&priv->mutex);
+ switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+ case IWL_TM_CMD_APP2DEV_UCODE:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
+ case IWL_TM_CMD_APP2DEV_END_TRACE:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
+ case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
+ result = iwl_test_handle_cmd(&priv->tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
+ case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
+ case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
+ case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
+ case IWL_TM_CMD_APP2DEV_GET_EEPROM:
+ case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
+ IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
+ result = iwl_testmode_driver(hw, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_OWNERSHIP:
+ IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
+ result = iwl_testmode_ownership(hw, tb);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown testmode command\n");
+ result = -ENOSYS;
+ break;
+ }
+ mutex_unlock(&priv->mutex);
+
+ if (result)
+ IWL_ERR(priv, "Test cmd failed result=%d\n", result);
+ return result;
+}
+
+int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ int result;
+ u32 cmd;
+
+ if (cb->args[3]) {
+ /* offset by 1 since commands start at 0 */
+ cmd = cb->args[3] - 1;
+ } else {
+ struct nlattr *tb[IWL_TM_ATTR_MAX];
+
+ result = iwl_test_parse(&priv->tst, tb, data, len);
+ if (result)
+ return result;
+
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+ cb->args[3] = cmd + 1;
+ }
+
+ /* in case multiple accesses to the device happens */
+ mutex_lock(&priv->mutex);
+ result = iwl_test_dump(&priv->tst, cmd, skb, cb);
+ mutex_unlock(&priv->mutex);
+ return result;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index a5cfe0a..eb86443 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -31,17 +31,14 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
-
#include <net/mac80211.h>
-
-#include "iwl-agn.h"
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
#include "iwl-io.h"
-#include "iwl-commands.h"
-#include "iwl-debug.h"
-#include "iwl-agn-tt.h"
#include "iwl-modparams.h"
+#include "iwl-debug.h"
+#include "agn.h"
+#include "dev.h"
+#include "commands.h"
+#include "tt.h"
/* default Thermal Throttling transaction table
* Current state | Throttling Down | Throttling Up
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 86bbf47..44c7c8f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -28,7 +28,7 @@
#ifndef __iwl_tt_setting_h__
#define __iwl_tt_setting_h__
-#include "iwl-commands.h"
+#include "commands.h"
#define IWL_ABSOLUTE_ZERO 0
#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 3366e2e..5971a23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -32,12 +32,11 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ieee80211.h>
-
-#include "iwl-dev.h"
#include "iwl-io.h"
-#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
+#include "iwl-agn-hw.h"
+#include "dev.h"
+#include "agn.h"
static const u8 tid_to_ac[] = {
IEEE80211_AC_BE,
@@ -187,7 +186,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_idx = info->control.rates[0].idx;
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
+ rate_idx = rate_lowest_index(
+ &priv->eeprom_data->bands[info->band],
info->control.sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
if (info->band == IEEE80211_BAND_5GHZ)
@@ -207,10 +207,11 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- first_antenna(priv->hw_params.valid_tx_ant));
+ first_antenna(priv->eeprom_data->valid_tx_ant));
} else
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(
+ priv, priv->mgmt_tx_ant,
+ priv->eeprom_data->valid_tx_ant);
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* Set the rate in the TX cmd */
@@ -296,7 +297,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_device_cmd *dev_cmd = NULL;
+ struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u8 hdr_len;
@@ -378,7 +379,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_AMPDU)
is_agg = true;
- dev_cmd = kmem_cache_alloc(iwl_tx_cmd_pool, GFP_ATOMIC);
+ dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
if (unlikely(!dev_cmd))
goto drop_unlock_priv;
@@ -402,6 +403,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
info->driver_data[0] = ctx;
info->driver_data[1] = dev_cmd;
+ /* From now on, we cannot access info->control */
spin_lock(&priv->sta_lock);
@@ -486,11 +488,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta_priv && sta_priv->client && !is_agg)
atomic_inc(&sta_priv->pending_frames);
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ iwl_scan_offchannel_skb(priv);
+
return 0;
drop_unlock_sta:
if (dev_cmd)
- kmem_cache_free(iwl_tx_cmd_pool, dev_cmd);
+ iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
spin_unlock(&priv->sta_lock);
drop_unlock_priv:
return -1;
@@ -597,7 +602,7 @@ turn_off:
* time, or we hadn't time to drain the AC queues.
*/
if (agg_state == IWL_AGG_ON)
- iwl_trans_tx_agg_disable(priv->trans, txq_id);
+ iwl_trans_txq_disable(priv->trans, txq_id);
else
IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
agg_state);
@@ -686,9 +691,8 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
- iwl_trans_tx_agg_setup(priv->trans, q, fifo,
- sta_priv->sta_id, tid,
- buf_size, ssn);
+ iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
+ buf_size, ssn);
/*
* If the limit is 0, then it wasn't initialised yet,
@@ -753,8 +757,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
IWL_DEBUG_TX_QUEUES(priv,
"Can continue DELBA flow ssn = next_recl ="
" %d", tid_data->next_reclaimed);
- iwl_trans_tx_agg_disable(priv->trans,
- tid_data->agg.txq_id);
+ iwl_trans_txq_disable(priv->trans,
+ tid_data->agg.txq_id);
iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
tid_data->agg.state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
@@ -1136,6 +1140,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct sk_buff *skb;
struct iwl_rxon_context *ctx;
bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
+ bool is_offchannel_skb;
tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
IWLAGN_TX_RES_TID_POS;
@@ -1149,6 +1154,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
__skb_queue_head_init(&skbs);
+ is_offchannel_skb = false;
+
if (tx_resp->frame_count == 1) {
u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1176,7 +1183,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
}
/*we can free until ssn % q.n_bd not inclusive */
- WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs));
+ WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
+ txq_id, ssn, &skbs));
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
@@ -1189,8 +1197,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
info = IEEE80211_SKB_CB(skb);
ctx = info->driver_data[0];
- kmem_cache_free(iwl_tx_cmd_pool,
- (info->driver_data[1]));
+ iwl_trans_free_tx_cmd(priv->trans,
+ info->driver_data[1]);
memset(&info->status, 0, sizeof(info->status));
@@ -1225,10 +1233,19 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
+ is_offchannel_skb =
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
freed++;
}
WARN_ON(!is_agg && freed != 1);
+
+ /*
+ * An offchannel frame can be send only on the AUX queue, where
+ * there is no aggregation (and reordering) so it only is single
+ * skb is expected to be processed.
+ */
+ WARN_ON(is_offchannel_skb && freed != 1);
}
iwl_check_abort_status(priv, tx_resp->frame_count, status);
@@ -1239,6 +1256,9 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
ieee80211_tx_status(priv->hw, skb);
}
+ if (is_offchannel_skb)
+ iwl_scan_offchannel_skb_status(priv);
+
return 0;
}
@@ -1341,7 +1361,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
WARN_ON_ONCE(1);
info = IEEE80211_SKB_CB(skb);
- kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
+ iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
if (freed == 1) {
/* this is the first skb we deliver in this batch */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index bc40dc6..6d8d6dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -30,15 +30,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include "iwl-dev.h"
#include "iwl-io.h"
#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
-#include "iwl-agn-calib.h"
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-op-mode.h"
+#include "dev.h"
+#include "agn.h"
+#include "calib.h"
+
/******************************************************************************
*
* uCode download functions
@@ -60,8 +61,7 @@ iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
static int iwl_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
- __le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+ __le16 *xtal_calib = priv->eeprom_data->xtal_calib;
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -72,12 +72,10 @@ static int iwl_set_Xtal_calib(struct iwl_priv *priv)
static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_cmd cmd;
- __le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
+ cmd.radio_sensor_offset = priv->eeprom_data->raw_temperature;
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
@@ -89,27 +87,17 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
- __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
- EEPROM_KELVIN_TEMPERATURE);
- __le16 *offset_calib_low =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
- struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
- EEPROM_CALIB_ALL);
- memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
- sizeof(*offset_calib_high));
- memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
- sizeof(*offset_calib_low));
- if (!(cmd.radio_sensor_offset_low)) {
+ cmd.radio_sensor_offset_high = priv->eeprom_data->kelvin_temperature;
+ cmd.radio_sensor_offset_low = priv->eeprom_data->raw_temperature;
+ if (!cmd.radio_sensor_offset_low) {
IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
- memcpy(&cmd.burntVoltageRef, &hdr->voltage,
- sizeof(hdr->voltage));
+ cmd.burntVoltageRef = priv->eeprom_data->calib_voltage;
IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
@@ -177,7 +165,7 @@ int iwl_init_alive_start(struct iwl_priv *priv)
return 0;
}
-int iwl_send_wimax_coex(struct iwl_priv *priv)
+static int iwl_send_wimax_coex(struct iwl_priv *priv)
{
struct iwl_wimax_coex_cmd coex_cmd;
@@ -238,13 +226,50 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
return ret;
}
+static const u8 iwlagn_default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+
+static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+ IWL_TX_FIFO_BK_IPAN,
+ IWL_TX_FIFO_BE_IPAN,
+ IWL_TX_FIFO_VI_IPAN,
+ IWL_TX_FIFO_VO_IPAN,
+ IWL_TX_FIFO_BE_IPAN,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_AUX,
+};
static int iwl_alive_notify(struct iwl_priv *priv)
{
+ const u8 *queue_to_txf;
+ u8 n_queues;
int ret;
+ int i;
iwl_trans_fw_alive(priv->trans);
+ if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
+ priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
+ n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
+ queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
+ } else {
+ n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
+ queue_to_txf = iwlagn_default_queue_to_tx_fifo;
+ }
+
+ for (i = 0; i < n_queues; i++)
+ if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
+ iwl_trans_ac_txq_enable(priv->trans, i,
+ queue_to_txf[i]);
+
priv->passive_no_rx = false;
priv->transport_queue_stop = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 67b28aa..87f465a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -113,7 +113,7 @@ enum iwl_led_mode {
#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0
/* TX queue watchdog timeouts in mSecs */
-#define IWL_WATCHHDOG_DISABLED 0
+#define IWL_WATCHDOG_DISABLED 0
#define IWL_DEF_WD_TIMEOUT 2000
#define IWL_LONG_WD_TIMEOUT 10000
#define IWL_MAX_WD_TIMEOUT 120000
@@ -143,7 +143,7 @@ enum iwl_led_mode {
* @chain_noise_scale: default chain noise scale used for gain computation
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
- * @shadow_reg_enable: HW shadhow register bit
+ * @shadow_reg_enable: HW shadow register support
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
* @no_idle_support: do not support idle mode
*/
@@ -177,18 +177,39 @@ struct iwl_base_params {
struct iwl_bt_params {
bool advanced_bt_coexist;
u8 bt_init_traffic_load;
- u8 bt_prio_boost;
+ u32 bt_prio_boost;
u16 agg_time_limit;
bool bt_sco_disable;
bool bt_session_2;
};
+
/*
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
+ * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
*/
struct iwl_ht_params {
+ enum ieee80211_smps_mode smps_mode;
const bool ht_greenfield_support; /* if used set to true */
bool use_rts_for_aggregation;
- enum ieee80211_smps_mode smps_mode;
+ u8 ht40_bands;
+};
+
+/*
+ * information on how to parse the EEPROM
+ */
+#define EEPROM_REG_BAND_1_CHANNELS 0x08
+#define EEPROM_REG_BAND_2_CHANNELS 0x26
+#define EEPROM_REG_BAND_3_CHANNELS 0x42
+#define EEPROM_REG_BAND_4_CHANNELS 0x5C
+#define EEPROM_REG_BAND_5_CHANNELS 0x74
+#define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82
+#define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92
+#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
+#define EEPROM_REGULATORY_BAND_NO_HT40 0
+
+struct iwl_eeprom_params {
+ const u8 regulatory_bands[7];
+ bool enhanced_txpower;
};
/**
@@ -243,6 +264,7 @@ struct iwl_cfg {
/* params likely to change within a device family */
const struct iwl_ht_params *ht_params;
const struct iwl_bt_params *bt_params;
+ const struct iwl_eeprom_params *eeprom_params;
const bool need_temp_offset_calib; /* if used set to true */
const bool no_xtal_calib;
enum iwl_led_mode led_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 5975054..34a5287 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -97,13 +97,10 @@
/*
* Hardware revision info
* Bit fields:
- * 31-8: Reserved
- * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
+ * 31-16: Reserved
+ * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
* 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
* 1-0: "Dash" (-) value, as in A-1, etc.
- *
- * NOTE: Revision step affects calculation of CCK txpower for 4965.
- * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
*/
#define CSR_HW_REV (CSR_BASE+0x028)
@@ -155,9 +152,21 @@
#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
/* Bits for CSR_HW_IF_CONFIG_REG */
-#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
-#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
+#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
+#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
+#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
+#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
+
+#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
+#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
+#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
@@ -270,7 +279,10 @@
/* HW REV */
-#define CSR_HW_REV_TYPE_MSK (0x00001F0)
+#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
+#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+
+#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
#define CSR_HW_REV_TYPE_5300 (0x0000020)
#define CSR_HW_REV_TYPE_5350 (0x0000030)
#define CSR_HW_REV_TYPE_5100 (0x0000050)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 2d1b428..87535a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -61,7 +61,11 @@
*
*****************************************************************************/
+#define DEBUG
+
+#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/export.h>
#include "iwl-debug.h"
#include "iwl-devtrace.h"
@@ -81,8 +85,11 @@ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
}
__iwl_fn(warn)
+EXPORT_SYMBOL_GPL(__iwl_warn);
__iwl_fn(info)
+EXPORT_SYMBOL_GPL(__iwl_info);
__iwl_fn(crit)
+EXPORT_SYMBOL_GPL(__iwl_crit);
void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
const char *fmt, ...)
@@ -103,6 +110,7 @@ void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
trace_iwlwifi_err(&vaf);
va_end(args);
}
+EXPORT_SYMBOL_GPL(__iwl_err);
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
void __iwl_dbg(struct device *dev,
@@ -119,10 +127,11 @@ void __iwl_dbg(struct device *dev,
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit()))
- dev_err(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
+ dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
function, &vaf);
#endif
trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
va_end(args);
}
+EXPORT_SYMBOL_GPL(__iwl_dbg);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 8376b84..42b20b0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -38,13 +38,14 @@ static inline bool iwl_have_debug_level(u32 level)
}
void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
- const char *fmt, ...);
-void __iwl_warn(struct device *dev, const char *fmt, ...);
-void __iwl_info(struct device *dev, const char *fmt, ...);
-void __iwl_crit(struct device *dev, const char *fmt, ...);
+ const char *fmt, ...) __printf(4, 5);
+void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
+void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
+void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
/* No matter what is m (priv, bus, trans), this will work */
#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
+#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a)
#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
@@ -52,9 +53,9 @@ void __iwl_crit(struct device *dev, const char *fmt, ...);
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
void __iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
- const char *fmt, ...);
+ const char *fmt, ...) __printf(5, 6);
#else
-static inline void
+__printf(5, 6) static inline void
__iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
const char *fmt, ...)
@@ -69,6 +70,8 @@ do { \
#define IWL_DEBUG(m, level, fmt, args...) \
__iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
+#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
+ __iwl_dbg((dev), level, false, __func__, fmt, ##args)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
__iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
@@ -153,7 +156,7 @@ do { \
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
+#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 91f45e7..70191dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -42,4 +42,9 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_info);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_warn);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_crit);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_err);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dbg);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 06203d6..06ca505 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -28,6 +28,7 @@
#define __IWLWIFI_DEVICE_TRACE
#include <linux/tracepoint.h>
+#include <linux/device.h>
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
@@ -175,7 +176,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_msg
-#define MAX_MSG_LEN 100
+#define MAX_MSG_LEN 110
DECLARE_EVENT_CLASS(iwlwifi_msg_event,
TP_PROTO(struct va_format *vaf),
@@ -188,7 +189,7 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
MAX_MSG_LEN, vaf->fmt,
*vaf->va) >= MAX_MSG_LEN);
),
- TP_printk("%s", (char *)__get_dynamic_array(msg))
+ TP_printk("%s", __get_str(msg))
);
DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 3c72bad..cc41cfa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -77,8 +77,33 @@
/* private includes */
#include "iwl-fw-file.h"
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
/**
* struct iwl_drv - drv common data
+ * @list: list of drv structures using this opmode
* @fw: the iwl_fw structure
* @op_mode: the running op_mode
* @trans: transport layer
@@ -89,6 +114,7 @@
* @request_firmware_complete: the firmware has been obtained from user space
*/
struct iwl_drv {
+ struct list_head list;
struct iwl_fw fw;
struct iwl_op_mode *op_mode;
@@ -102,7 +128,19 @@ struct iwl_drv {
struct completion request_firmware_complete;
};
-
+#define DVM_OP_MODE 0
+#define MVM_OP_MODE 1
+
+/* Protects the table contents, i.e. the ops pointer & drv list */
+static struct mutex iwlwifi_opmode_table_mtx;
+static struct iwlwifi_opmode_table {
+ const char *name; /* name: iwldvm, iwlmvm, etc */
+ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
+ struct list_head drv; /* list of devices using this op_mode */
+} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
+ { .name = "iwldvm", .ops = NULL },
+ { .name = "iwlmvm", .ops = NULL },
+};
/*
* struct fw_sec: Just for the image parsing proccess.
@@ -657,17 +695,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL;
}
-static int alloc_pci_desc(struct iwl_drv *drv,
- struct iwl_firmware_pieces *pieces,
- enum iwl_ucode_type type)
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+ struct iwl_firmware_pieces *pieces,
+ enum iwl_ucode_type type)
{
int i;
for (i = 0;
i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
i++)
if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
- get_sec(pieces, type, i)))
- return -1;
+ get_sec(pieces, type, i)))
+ return -ENOMEM;
return 0;
}
@@ -721,7 +759,6 @@ static int validate_sec_sizes(struct iwl_drv *drv,
return 0;
}
-
/**
* iwl_ucode_callback - callback when firmware was loaded
*
@@ -733,6 +770,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
struct iwl_ucode_header *ucode;
+ struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces pieces;
const unsigned int api_max = drv->cfg->ucode_api_max;
@@ -740,6 +778,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
const unsigned int api_min = drv->cfg->ucode_api_min;
u32 api_ver;
int i;
+ bool load_module = false;
fw->ucode_capa.max_probe_length = 200;
fw->ucode_capa.standard_phy_calibration_size =
@@ -825,8 +864,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
- if (alloc_pci_desc(drv, &pieces, i))
- goto err_pci_alloc;
+ if (iwl_alloc_ucode(drv, &pieces, i))
+ goto out_free_fw;
/* Now that we can no longer fail, copy information */
@@ -861,13 +900,40 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
- complete(&drv->request_firmware_complete);
- drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+ op = &iwlwifi_opmode_table[DVM_OP_MODE];
- if (!drv->op_mode)
- goto out_unbind;
+ /* add this device to the list of devices using this op_mode */
+ list_add_tail(&drv->list, &op->drv);
+
+ if (op->ops) {
+ const struct iwl_op_mode_ops *ops = op->ops;
+ drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
+
+ if (!drv->op_mode) {
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ goto out_unbind;
+ }
+ } else {
+ load_module = true;
+ }
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ /*
+ * Complete the firmware request last so that
+ * a driver unbind (stop) doesn't run while we
+ * are doing the start() above.
+ */
+ complete(&drv->request_firmware_complete);
+
+ /*
+ * Load the module last so we don't block anything
+ * else from proceeding if the module fails to load
+ * or hangs loading.
+ */
+ if (load_module)
+ request_module("%s", op->name);
return;
try_again:
@@ -877,7 +943,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
goto out_unbind;
return;
- err_pci_alloc:
+ out_free_fw:
IWL_ERR(drv, "failed to allocate pci memory\n");
iwl_dealloc_ucode(drv);
release_firmware(ucode_raw);
@@ -901,6 +967,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
drv->cfg = cfg;
init_completion(&drv->request_firmware_complete);
+ INIT_LIST_HEAD(&drv->list);
ret = iwl_request_firmware(drv, true);
@@ -923,6 +990,16 @@ void iwl_drv_stop(struct iwl_drv *drv)
iwl_dealloc_ucode(drv);
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+ /*
+ * List is empty (this item wasn't added)
+ * when firmware loading failed -- in that
+ * case we can't remove it from any list.
+ */
+ if (!list_empty(&drv->list))
+ list_del(&drv->list);
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+
kfree(drv);
}
@@ -936,8 +1013,78 @@ struct iwl_mod_params iwlwifi_mod_params = {
.power_level = IWL_POWER_INDEX_1,
.bt_ch_announce = true,
.auto_agg = true,
+ .wd_disable = true,
/* the rest are 0 by default */
};
+EXPORT_SYMBOL_GPL(iwlwifi_mod_params);
+
+int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
+{
+ int i;
+ struct iwl_drv *drv;
+
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+ for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
+ if (strcmp(iwlwifi_opmode_table[i].name, name))
+ continue;
+ iwlwifi_opmode_table[i].ops = ops;
+ list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
+ drv->op_mode = ops->start(drv->trans, drv->cfg,
+ &drv->fw);
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ return 0;
+ }
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(iwl_opmode_register);
+
+void iwl_opmode_deregister(const char *name)
+{
+ int i;
+ struct iwl_drv *drv;
+
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+ for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
+ if (strcmp(iwlwifi_opmode_table[i].name, name))
+ continue;
+ iwlwifi_opmode_table[i].ops = NULL;
+
+ /* call the stop routine for all devices */
+ list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
+ if (drv->op_mode) {
+ iwl_op_mode_stop(drv->op_mode);
+ drv->op_mode = NULL;
+ }
+ }
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ return;
+ }
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+}
+EXPORT_SYMBOL_GPL(iwl_opmode_deregister);
+
+static int __init iwl_drv_init(void)
+{
+ int i;
+
+ mutex_init(&iwlwifi_opmode_table_mtx);
+
+ for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
+ INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
+
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ return iwl_pci_register_driver();
+}
+module_init(iwl_drv_init);
+
+static void __exit iwl_drv_exit(void)
+{
+ iwl_pci_unregister_driver();
+}
+module_exit(iwl_drv_exit);
#ifdef CONFIG_IWLWIFI_DEBUG
module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
new file mode 100644
index 0000000..f10170f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -0,0 +1,903 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-modparams.h"
+#include "iwl-eeprom-parse.h"
+
+/* EEPROM offset definitions */
+
+/* indirect access definitions */
+#define ADDRESS_MSK 0x0000FFFF
+#define INDIRECT_TYPE_MSK 0x000F0000
+#define INDIRECT_HOST 0x00010000
+#define INDIRECT_GENERAL 0x00020000
+#define INDIRECT_REGULATORY 0x00030000
+#define INDIRECT_CALIBRATION 0x00040000
+#define INDIRECT_PROCESS_ADJST 0x00050000
+#define INDIRECT_OTHERS 0x00060000
+#define INDIRECT_TXP_LIMIT 0x00070000
+#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
+#define INDIRECT_ADDRESS 0x00100000
+
+/* corresponding link offsets in EEPROM */
+#define EEPROM_LINK_HOST (2*0x64)
+#define EEPROM_LINK_GENERAL (2*0x65)
+#define EEPROM_LINK_REGULATORY (2*0x66)
+#define EEPROM_LINK_CALIBRATION (2*0x67)
+#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
+#define EEPROM_LINK_OTHERS (2*0x69)
+#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
+#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
+
+/* General */
+#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
+#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
+#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
+#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
+#define EEPROM_VERSION (2*0x44) /* 2 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
+#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
+#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
+#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
+
+/* calibration */
+struct iwl_eeprom_calib_hdr {
+ u8 version;
+ u8 pa_type;
+ __le16 voltage;
+} __packed;
+
+#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
+#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
+
+/* temperature */
+#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
+
+/*
+ * EEPROM bands
+ * These are the channel numbers from each band in the order
+ * that they are stored in the EEPROM band information. Note
+ * that EEPROM bands aren't the same as mac80211 bands, and
+ * there are even special "ht40 bands" in the EEPROM.
+ */
+static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
+ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
+ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
+ 145, 149, 153, 157, 161, 165
+};
+
+static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
+ 1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
+ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \
+ ARRAY_SIZE(iwl_eeprom_band_2) + \
+ ARRAY_SIZE(iwl_eeprom_band_3) + \
+ ARRAY_SIZE(iwl_eeprom_band_4) + \
+ ARRAY_SIZE(iwl_eeprom_band_5))
+
+/* rate data (static) */
+static struct ieee80211_rate iwl_cfg80211_rates[] = {
+ { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
+ { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
+ { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
+ { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
+ { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
+ { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
+ { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
+ { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
+ { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
+};
+#define RATES_24_OFFS 0
+#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
+#define RATES_52_OFFS 4
+#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
+
+/* EEPROM reading functions */
+
+static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
+{
+ if (WARN_ON(offset + sizeof(u16) > eeprom_size))
+ return 0;
+ return le16_to_cpup((__le16 *)(eeprom + offset));
+}
+
+static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
+ u32 address)
+{
+ u16 offset = 0;
+
+ if ((address & INDIRECT_ADDRESS) == 0)
+ return address;
+
+ switch (address & INDIRECT_TYPE_MSK) {
+ case INDIRECT_HOST:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_HOST);
+ break;
+ case INDIRECT_GENERAL:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_GENERAL);
+ break;
+ case INDIRECT_REGULATORY:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_REGULATORY);
+ break;
+ case INDIRECT_TXP_LIMIT:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_TXP_LIMIT);
+ break;
+ case INDIRECT_TXP_LIMIT_SIZE:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_TXP_LIMIT_SIZE);
+ break;
+ case INDIRECT_CALIBRATION:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_CALIBRATION);
+ break;
+ case INDIRECT_PROCESS_ADJST:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_PROCESS_ADJST);
+ break;
+ case INDIRECT_OTHERS:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_OTHERS);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* translate the offset from words to byte */
+ return (address & ADDRESS_MSK) + (offset << 1);
+}
+
+static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
+ u32 offset)
+{
+ u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
+
+ if (WARN_ON(address >= eeprom_size))
+ return NULL;
+
+ return &eeprom[address];
+}
+
+static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
+ struct iwl_eeprom_data *data)
+{
+ struct iwl_eeprom_calib_hdr *hdr;
+
+ hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_CALIB_ALL);
+ if (!hdr)
+ return -ENODATA;
+ data->calib_version = hdr->version;
+ data->calib_voltage = hdr->voltage;
+
+ return 0;
+}
+
+/**
+ * enum iwl_eeprom_channel_flags - channel flags in EEPROM
+ * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
+ * @EEPROM_CHANNEL_ACTIVE: active scanning allowed
+ * @EEPROM_CHANNEL_RADAR: radar detection required
+ * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
+ */
+enum iwl_eeprom_channel_flags {
+ EEPROM_CHANNEL_VALID = BIT(0),
+ EEPROM_CHANNEL_IBSS = BIT(1),
+ EEPROM_CHANNEL_ACTIVE = BIT(3),
+ EEPROM_CHANNEL_RADAR = BIT(4),
+ EEPROM_CHANNEL_WIDE = BIT(5),
+ EEPROM_CHANNEL_DFS = BIT(7),
+};
+
+/**
+ * struct iwl_eeprom_channel - EEPROM channel data
+ * @flags: %EEPROM_CHANNEL_* flags
+ * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
+ */
+struct iwl_eeprom_channel {
+ u8 flags;
+ s8 max_power_avg;
+} __packed;
+
+
+enum iwl_eeprom_enhanced_txpwr_flags {
+ IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
+ IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
+ IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
+ IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
+ IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
+ IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
+ IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
+ IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
+};
+
+/**
+ * iwl_eeprom_enhanced_txpwr structure
+ * @flags: entry flags
+ * @channel: channel number
+ * @chain_a_max_pwr: chain a max power in 1/2 dBm
+ * @chain_b_max_pwr: chain b max power in 1/2 dBm
+ * @chain_c_max_pwr: chain c max power in 1/2 dBm
+ * @delta_20_in_40: 20-in-40 deltas (hi/lo)
+ * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
+ * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
+ *
+ * This structure presents the enhanced regulatory tx power limit layout
+ * in an EEPROM image.
+ */
+struct iwl_eeprom_enhanced_txpwr {
+ u8 flags;
+ u8 channel;
+ s8 chain_a_max;
+ s8 chain_b_max;
+ s8 chain_c_max;
+ u8 delta_20_in_40;
+ s8 mimo2_max;
+ s8 mimo3_max;
+} __packed;
+
+static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_eeprom_data *data,
+ struct iwl_eeprom_enhanced_txpwr *txp)
+{
+ s8 result = 0; /* (.5 dBm) */
+
+ /* Take the highest tx power from any valid chains */
+ if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
+ result = txp->chain_a_max;
+
+ if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
+ result = txp->chain_b_max;
+
+ if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
+ result = txp->chain_c_max;
+
+ if ((data->valid_tx_ant == ANT_AB ||
+ data->valid_tx_ant == ANT_BC ||
+ data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
+ result = txp->mimo2_max;
+
+ if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
+ result = txp->mimo3_max;
+
+ return result;
+}
+
+#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
+#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
+#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
+
+#define TXP_CHECK_AND_PRINT(x) \
+ ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
+
+static void
+iwl_eeprom_enh_txp_read_element(struct iwl_eeprom_data *data,
+ struct iwl_eeprom_enhanced_txpwr *txp,
+ int n_channels, s8 max_txpower_avg)
+{
+ int ch_idx;
+ enum ieee80211_band band;
+
+ band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+
+ for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
+ struct ieee80211_channel *chan = &data->channels[ch_idx];
+
+ /* update matching channel or from common data only */
+ if (txp->channel != 0 && chan->hw_value != txp->channel)
+ continue;
+
+ /* update matching band only */
+ if (band != chan->band)
+ continue;
+
+ if (chan->max_power < max_txpower_avg &&
+ !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
+ chan->max_power = max_txpower_avg;
+ }
+}
+
+static void iwl_eeprom_enhanced_txpower(struct device *dev,
+ struct iwl_eeprom_data *data,
+ const u8 *eeprom, size_t eeprom_size,
+ int n_channels)
+{
+ struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+ int idx, entries;
+ __le16 *txp_len;
+ s8 max_txp_avg_halfdbm;
+
+ BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
+
+ /* the length is in 16-bit words, but we want entries */
+ txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_TXP_SZ_OFFS);
+ entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
+
+ txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_TXP_OFFS);
+
+ for (idx = 0; idx < entries; idx++) {
+ txp = &txp_array[idx];
+ /* skip invalid entries */
+ if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
+ continue;
+
+ IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
+ (txp->channel && (txp->flags &
+ IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
+ "Common " : (txp->channel) ?
+ "Channel" : "Common",
+ (txp->channel),
+ TXP_CHECK_AND_PRINT(VALID),
+ TXP_CHECK_AND_PRINT(BAND_52G),
+ TXP_CHECK_AND_PRINT(OFDM),
+ TXP_CHECK_AND_PRINT(40MHZ),
+ TXP_CHECK_AND_PRINT(HT_AP),
+ TXP_CHECK_AND_PRINT(RES1),
+ TXP_CHECK_AND_PRINT(RES2),
+ TXP_CHECK_AND_PRINT(COMMON_TYPE),
+ txp->flags);
+ IWL_DEBUG_EEPROM(dev,
+ "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n",
+ txp->chain_a_max, txp->chain_b_max,
+ txp->chain_c_max);
+ IWL_DEBUG_EEPROM(dev,
+ "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
+ txp->mimo2_max, txp->mimo3_max,
+ ((txp->delta_20_in_40 & 0xf0) >> 4),
+ (txp->delta_20_in_40 & 0x0f));
+
+ max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
+
+ iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
+ DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
+
+ if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
+ data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
+ }
+}
+
+static void iwl_init_band_reference(const struct iwl_cfg *cfg,
+ const u8 *eeprom, size_t eeprom_size,
+ int eeprom_band, int *eeprom_ch_count,
+ const struct iwl_eeprom_channel **ch_info,
+ const u8 **eeprom_ch_array)
+{
+ u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
+
+ offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
+
+ *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
+
+ switch (eeprom_band) {
+ case 1: /* 2.4GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
+ *eeprom_ch_array = iwl_eeprom_band_1;
+ break;
+ case 2: /* 4.9GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
+ *eeprom_ch_array = iwl_eeprom_band_2;
+ break;
+ case 3: /* 5.2GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
+ *eeprom_ch_array = iwl_eeprom_band_3;
+ break;
+ case 4: /* 5.5GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
+ *eeprom_ch_array = iwl_eeprom_band_4;
+ break;
+ case 5: /* 5.7GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
+ *eeprom_ch_array = iwl_eeprom_band_5;
+ break;
+ case 6: /* 2.4GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
+ *eeprom_ch_array = iwl_eeprom_band_6;
+ break;
+ case 7: /* 5 GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
+ *eeprom_ch_array = iwl_eeprom_band_7;
+ break;
+ default:
+ *eeprom_ch_count = 0;
+ *eeprom_ch_array = NULL;
+ WARN_ON(1);
+ }
+}
+
+#define CHECK_AND_PRINT(x) \
+ ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
+
+static void iwl_mod_ht40_chan_info(struct device *dev,
+ struct iwl_eeprom_data *data, int n_channels,
+ enum ieee80211_band band, u16 channel,
+ const struct iwl_eeprom_channel *eeprom_ch,
+ u8 clear_ht40_extension_channel)
+{
+ struct ieee80211_channel *chan = NULL;
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ if (data->channels[i].band != band)
+ continue;
+ if (data->channels[i].hw_value != channel)
+ continue;
+ chan = &data->channels[i];
+ break;
+ }
+
+ if (!chan)
+ return;
+
+ IWL_DEBUG_EEPROM(dev,
+ "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ channel,
+ band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+ CHECK_AND_PRINT(IBSS),
+ CHECK_AND_PRINT(ACTIVE),
+ CHECK_AND_PRINT(RADAR),
+ CHECK_AND_PRINT(WIDE),
+ CHECK_AND_PRINT(DFS),
+ eeprom_ch->flags,
+ eeprom_ch->max_power_avg,
+ ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
+ : "not ");
+
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ chan->flags &= ~clear_ht40_extension_channel;
+}
+
+#define CHECK_AND_PRINT_I(x) \
+ ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
+
+static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_eeprom_data *data,
+ const u8 *eeprom, size_t eeprom_size)
+{
+ int band, ch_idx;
+ const struct iwl_eeprom_channel *eeprom_ch_info;
+ const u8 *eeprom_ch_array;
+ int eeprom_ch_count;
+ int n_channels = 0;
+
+ /*
+ * Loop through the 5 EEPROM bands and add them to the parse list
+ */
+ for (band = 1; band <= 5; band++) {
+ struct ieee80211_channel *channel;
+
+ iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
+ &eeprom_ch_count, &eeprom_ch_info,
+ &eeprom_ch_array);
+
+ /* Loop through each band adding each of the channels */
+ for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
+ const struct iwl_eeprom_channel *eeprom_ch;
+
+ eeprom_ch = &eeprom_ch_info[ch_idx];
+
+ if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d Flags %x [%sGHz] - No traffic\n",
+ eeprom_ch_array[ch_idx],
+ eeprom_ch_info[ch_idx].flags,
+ (band != 1) ? "5.2" : "2.4");
+ continue;
+ }
+
+ channel = &data->channels[n_channels];
+ n_channels++;
+
+ channel->hw_value = eeprom_ch_array[ch_idx];
+ channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
+ : IEEE80211_BAND_5GHZ;
+ channel->center_freq =
+ ieee80211_channel_to_frequency(
+ channel->hw_value, channel->band);
+
+ /* set no-HT40, will enable as appropriate later */
+ channel->flags = IEEE80211_CHAN_NO_HT40;
+
+ if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
+ channel->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
+ channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
+ channel->flags |= IEEE80211_CHAN_RADAR;
+
+ /* Initialize regulatory-based run-time data */
+ channel->max_power =
+ eeprom_ch_info[ch_idx].max_power_avg;
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ channel->hw_value,
+ (band != 1) ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ eeprom_ch_info[ch_idx].flags,
+ eeprom_ch_info[ch_idx].max_power_avg,
+ ((eeprom_ch_info[ch_idx].flags &
+ EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch_info[ch_idx].flags &
+ EEPROM_CHANNEL_RADAR))
+ ? "" : "not ");
+ }
+ }
+
+ if (cfg->eeprom_params->enhanced_txpower) {
+ /*
+ * for newer device (6000 series and up)
+ * EEPROM contain enhanced tx power information
+ * driver need to process addition information
+ * to determine the max channel tx power limits
+ */
+ iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
+ n_channels);
+ } else {
+ /* All others use data from channel map */
+ int i;
+
+ data->max_tx_pwr_half_dbm = -128;
+
+ for (i = 0; i < n_channels; i++)
+ data->max_tx_pwr_half_dbm =
+ max_t(s8, data->max_tx_pwr_half_dbm,
+ data->channels[i].max_power * 2);
+ }
+
+ /* Check if we do have HT40 channels */
+ if (cfg->eeprom_params->regulatory_bands[5] ==
+ EEPROM_REGULATORY_BAND_NO_HT40 &&
+ cfg->eeprom_params->regulatory_bands[6] ==
+ EEPROM_REGULATORY_BAND_NO_HT40)
+ return n_channels;
+
+ /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+ for (band = 6; band <= 7; band++) {
+ enum ieee80211_band ieeeband;
+
+ iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
+ &eeprom_ch_count, &eeprom_ch_info,
+ &eeprom_ch_array);
+
+ /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+ ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
+ : IEEE80211_BAND_5GHZ;
+
+ /* Loop through each band adding each of the channels */
+ for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
+ /* Set up driver's info for lower half */
+ iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
+ eeprom_ch_array[ch_idx],
+ &eeprom_ch_info[ch_idx],
+ IEEE80211_CHAN_NO_HT40PLUS);
+
+ /* Set up driver's info for upper half */
+ iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
+ eeprom_ch_array[ch_idx] + 4,
+ &eeprom_ch_info[ch_idx],
+ IEEE80211_CHAN_NO_HT40MINUS);
+ }
+ }
+
+ return n_channels;
+}
+
+static int iwl_init_sband_channels(struct iwl_eeprom_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum ieee80211_band band)
+{
+ struct ieee80211_channel *chan = &data->channels[0];
+ int n = 0, idx = 0;
+
+ while (chan->band != band && idx < n_channels)
+ chan = &data->channels[++idx];
+
+ sband->channels = &data->channels[idx];
+
+ while (chan->band == band && idx < n_channels) {
+ chan = &data->channels[++idx];
+ n++;
+ }
+
+ sband->n_channels = n;
+
+ return n;
+}
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+
+static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_eeprom_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
+{
+ int max_bit_rate = 0;
+ u8 rx_chains;
+ u8 tx_chains;
+
+ tx_chains = hweight8(data->valid_tx_ant);
+ if (cfg->rx_with_siso_diversity)
+ rx_chains = 1;
+ else
+ rx_chains = hweight8(data->valid_rx_ant);
+
+ if (!(data->sku & EEPROM_SKU_CAP_11N_ENABLE) || !cfg->ht_params) {
+ ht_info->ht_supported = false;
+ return;
+ }
+
+ ht_info->ht_supported = true;
+ ht_info->cap = 0;
+
+ if (iwlwifi_mod_params.amsdu_size_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ if (rx_chains >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ if (cfg->ht_params->ht_greenfield_support)
+ ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+
+ if (cfg->ht_params->ht40_bands & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ ht_info->mcs.rx_mask[4] = 0x01;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains != rx_chains) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_chains - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_eeprom_data *data,
+ const u8 *eeprom, size_t eeprom_size)
+{
+ int n_channels = iwl_init_channel_map(dev, cfg, data,
+ eeprom, eeprom_size);
+ int n_used = 0;
+ struct ieee80211_supported_band *sband;
+
+ sband = &data->bands[IEEE80211_BAND_2GHZ];
+ sband->band = IEEE80211_BAND_2GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+ sband->n_bitrates = N_RATES_24;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ sband = &data->bands[IEEE80211_BAND_5GHZ];
+ sband->band = IEEE80211_BAND_5GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+ sband->n_bitrates = N_RATES_52;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ if (n_channels != n_used)
+ IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
+ n_used, n_channels);
+}
+
+/* EEPROM data functions */
+
+struct iwl_eeprom_data *
+iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
+ const u8 *eeprom, size_t eeprom_size)
+{
+ struct iwl_eeprom_data *data;
+ const void *tmp;
+
+ if (WARN_ON(!cfg || !cfg->eeprom_params))
+ return NULL;
+
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ /* get MAC address(es) */
+ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
+ if (!tmp)
+ goto err_free;
+ memcpy(data->hw_addr, tmp, ETH_ALEN);
+ data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_NUM_MAC_ADDRESS);
+
+ if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
+ goto err_free;
+
+ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
+ if (!tmp)
+ goto err_free;
+ memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
+
+ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_RAW_TEMPERATURE);
+ if (!tmp)
+ goto err_free;
+ data->raw_temperature = *(__le16 *)tmp;
+
+ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_KELVIN_TEMPERATURE);
+ if (!tmp)
+ goto err_free;
+ data->kelvin_temperature = *(__le16 *)tmp;
+ data->kelvin_voltage = *((__le16 *)tmp + 1);
+
+ data->radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_RADIO_CONFIG);
+ data->sku = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_SKU_CAP);
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+ data->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+
+ data->eeprom_version = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_VERSION);
+
+ data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(data->radio_cfg);
+ data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(data->radio_cfg);
+
+ /* check overrides (some devices have wrong EEPROM) */
+ if (cfg->valid_tx_ant)
+ data->valid_tx_ant = cfg->valid_tx_ant;
+ if (cfg->valid_rx_ant)
+ data->valid_rx_ant = cfg->valid_rx_ant;
+
+ if (!data->valid_tx_ant || !data->valid_rx_ant) {
+ IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
+ data->valid_tx_ant, data->valid_rx_ant);
+ goto err_free;
+ }
+
+ iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
+
+ return data;
+ err_free:
+ kfree(data);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data);
+
+/* helper functions */
+int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
+ struct iwl_trans *trans)
+{
+ if (data->eeprom_version >= trans->cfg->eeprom_ver ||
+ data->calib_version >= trans->cfg->eeprom_calib_ver) {
+ IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ data->eeprom_version, data->calib_version);
+ return 0;
+ }
+
+ IWL_ERR(trans,
+ "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
+ data->eeprom_version, trans->cfg->eeprom_ver,
+ data->calib_version, trans->cfg->eeprom_calib_ver);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iwl_eeprom_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
new file mode 100644
index 0000000..9c07c67
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -0,0 +1,138 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_eeprom_parse_h__
+#define __iwl_eeprom_parse_h__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include "iwl-trans.h"
+
+/* SKU Capabilities (actual values from EEPROM definition) */
+#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
+#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
+#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
+#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
+#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
+
+/* radio config bits (actual values from EEPROM definition) */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+struct iwl_eeprom_data {
+ int n_hw_addrs;
+ u8 hw_addr[ETH_ALEN];
+
+ u16 radio_config;
+
+ u8 calib_version;
+ __le16 calib_voltage;
+
+ __le16 raw_temperature;
+ __le16 kelvin_temperature;
+ __le16 kelvin_voltage;
+ __le16 xtal_calib[2];
+
+ u16 sku;
+ u16 radio_cfg;
+ u16 eeprom_version;
+ s8 max_tx_pwr_half_dbm;
+
+ u8 valid_tx_ant, valid_rx_ant;
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_channel channels[];
+};
+
+/**
+ * iwl_parse_eeprom_data - parse EEPROM data and return values
+ *
+ * @dev: device pointer we're parsing for, for debug only
+ * @cfg: device configuration for parsing and overrides
+ * @eeprom: the EEPROM data
+ * @eeprom_size: length of the EEPROM data
+ *
+ * This function parses all EEPROM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_eeprom_data().
+ */
+struct iwl_eeprom_data *
+iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
+ const u8 *eeprom, size_t eeprom_size);
+
+/**
+ * iwl_free_eeprom_data - free EEPROM data
+ * @data: the data to free
+ */
+static inline void iwl_free_eeprom_data(struct iwl_eeprom_data *data)
+{
+ kfree(data);
+}
+
+int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
+ struct iwl_trans *trans);
+
+#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
new file mode 100644
index 0000000..27c7da3
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -0,0 +1,463 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "iwl-debug.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
+
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
+{
+ u16 count;
+ int ret;
+
+ for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ EEPROM_SEM_TIMEOUT);
+ if (ret >= 0) {
+ IWL_DEBUG_EEPROM(trans->dev,
+ "Acquired semaphore after %d tries.\n",
+ count+1);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
+{
+ iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+}
+
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
+{
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+
+ IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
+
+ switch (gp) {
+ case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
+ if (!nvm_is_otp) {
+ IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
+ gp);
+ return -ENOENT;
+ }
+ return 0;
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ if (nvm_is_otp) {
+ IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
+ return -ENOENT;
+ }
+ return 0;
+ case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
+ default:
+ IWL_ERR(trans,
+ "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
+ nvm_is_otp ? "OTP" : "EEPROM", gp);
+ return -ENOENT;
+ }
+}
+
+/******************************************************************************
+ *
+ * OTP related functions
+ *
+******************************************************************************/
+
+static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
+{
+ iwl_read32(trans, CSR_OTP_GP_REG);
+
+ iwl_clear_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_OTP_ACCESS_MODE);
+}
+
+static int iwl_nvm_is_otp(struct iwl_trans *trans)
+{
+ u32 otpgp;
+
+ /* OTP only valid for CP/PP and after */
+ switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_NONE:
+ IWL_ERR(trans, "Unknown hardware type\n");
+ return -EIO;
+ case CSR_HW_REV_TYPE_5300:
+ case CSR_HW_REV_TYPE_5350:
+ case CSR_HW_REV_TYPE_5100:
+ case CSR_HW_REV_TYPE_5150:
+ return 0;
+ default:
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
+ return 1;
+ return 0;
+ }
+}
+
+static int iwl_init_otp_access(struct iwl_trans *trans)
+{
+ int ret;
+
+ /* Enable 40MHz radio clock */
+ iwl_write32(trans, CSR_GP_CNTRL,
+ iwl_read32(trans, CSR_GP_CNTRL) |
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* wait for clock to be ready */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out access OTP\n");
+ } else {
+ iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+
+ /*
+ * CSR auto clock gate disable bit -
+ * this is only applicable for HW with OTP shadow RAM
+ */
+ if (trans->cfg->base_params->shadow_ram_support)
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ }
+ return ret;
+}
+
+static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
+ __le16 *eeprom_data)
+{
+ int ret = 0;
+ u32 r;
+ u32 otpgp;
+
+ iwl_write32(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
+ return ret;
+ }
+ r = iwl_read32(trans, CSR_EEPROM_REG);
+ /* check for ECC errors: */
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
+ /* stop in this case */
+ /* set the uncorrectable OTP ECC bit for acknowledgement */
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+ IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
+ return -EINVAL;
+ }
+ if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
+ /* continue in this case */
+ /* set the correctable OTP ECC bit for acknowledgement */
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+ IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
+ }
+ *eeprom_data = cpu_to_le16(r >> 16);
+ return 0;
+}
+
+/*
+ * iwl_is_otp_empty: check for empty OTP
+ */
+static bool iwl_is_otp_empty(struct iwl_trans *trans)
+{
+ u16 next_link_addr = 0;
+ __le16 link_value;
+ bool is_empty = false;
+
+ /* locate the beginning of OTP link list */
+ if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
+ if (!link_value) {
+ IWL_ERR(trans, "OTP is empty\n");
+ is_empty = true;
+ }
+ } else {
+ IWL_ERR(trans, "Unable to read first block of OTP list.\n");
+ is_empty = true;
+ }
+
+ return is_empty;
+}
+
+
+/*
+ * iwl_find_otp_image: find EEPROM image in OTP
+ * finding the OTP block that contains the EEPROM image.
+ * the last valid block on the link list (the block _before_ the last block)
+ * is the block we should read and used to configure the device.
+ * If all the available OTP blocks are full, the last block will be the block
+ * we should read and used to configure the device.
+ * only perform this operation if shadow RAM is disabled
+ */
+static int iwl_find_otp_image(struct iwl_trans *trans,
+ u16 *validblockaddr)
+{
+ u16 next_link_addr = 0, valid_addr;
+ __le16 link_value = 0;
+ int usedblocks = 0;
+
+ /* set addressing mode to absolute to traverse the link list */
+ iwl_set_otp_access_absolute(trans);
+
+ /* checking for empty OTP or error */
+ if (iwl_is_otp_empty(trans))
+ return -EINVAL;
+
+ /*
+ * start traverse link list
+ * until reach the max number of OTP blocks
+ * different devices have different number of OTP blocks
+ */
+ do {
+ /* save current valid block address
+ * check for more block on the link list
+ */
+ valid_addr = next_link_addr;
+ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
+ IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
+ usedblocks, next_link_addr);
+ if (iwl_read_otp_word(trans, next_link_addr, &link_value))
+ return -EINVAL;
+ if (!link_value) {
+ /*
+ * reach the end of link list, return success and
+ * set address point to the starting address
+ * of the image
+ */
+ *validblockaddr = valid_addr;
+ /* skip first 2 bytes (link list pointer) */
+ *validblockaddr += 2;
+ return 0;
+ }
+ /* more in the link list, continue */
+ usedblocks++;
+ } while (usedblocks <= trans->cfg->base_params->max_ll_items);
+
+ /* OTP has no valid blocks */
+ IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
+ return -EINVAL;
+}
+
+/**
+ * iwl_read_eeprom - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter and return it
+ * and its size.
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
+{
+ __le16 *e;
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+ u16 validblockaddr = 0;
+ u16 cache_addr = 0;
+ int nvm_is_otp;
+
+ if (!eeprom || !eeprom_size)
+ return -EINVAL;
+
+ nvm_is_otp = iwl_nvm_is_otp(trans);
+ if (nvm_is_otp < 0)
+ return nvm_is_otp;
+
+ sz = trans->cfg->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
+
+ e = kmalloc(sz, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
+ if (ret < 0) {
+ IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ goto err_free;
+ }
+
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = iwl_eeprom_acquire_semaphore(trans);
+ if (ret < 0) {
+ IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
+ goto err_free;
+ }
+
+ if (nvm_is_otp) {
+ ret = iwl_init_otp_access(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to initialize OTP access.\n");
+ goto err_unlock;
+ }
+
+ iwl_write32(trans, CSR_EEPROM_GP,
+ iwl_read32(trans, CSR_EEPROM_GP) &
+ ~CSR_EEPROM_GP_IF_OWNER_MSK);
+
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+ /* traversing the linked list if no shadow ram supported */
+ if (!trans->cfg->base_params->shadow_ram_support) {
+ ret = iwl_find_otp_image(trans, &validblockaddr);
+ if (ret)
+ goto err_unlock;
+ }
+ for (addr = validblockaddr; addr < validblockaddr + sz;
+ addr += sizeof(u16)) {
+ __le16 eeprom_data;
+
+ ret = iwl_read_otp_word(trans, addr, &eeprom_data);
+ if (ret)
+ goto err_unlock;
+ e[cache_addr / 2] = eeprom_data;
+ cache_addr += sizeof(u16);
+ }
+ } else {
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ iwl_write32(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Time out reading EEPROM[%d]\n", addr);
+ goto err_unlock;
+ }
+ r = iwl_read32(trans, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+ }
+
+ IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
+ nvm_is_otp ? "OTP" : "EEPROM");
+
+ iwl_eeprom_release_semaphore(trans);
+
+ *eeprom_size = sz;
+ *eeprom = (u8 *)e;
+ return 0;
+
+ err_unlock:
+ iwl_eeprom_release_semaphore(trans);
+ err_free:
+ kfree(e);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index c34c6a9..1337c9d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -58,72 +58,13 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*****************************************************************************/
-#ifndef __IWL_PHYDB_H__
-#define __IWL_PHYDB_H__
-
-#include <linux/types.h>
-
-#define IWL_NUM_PAPD_CH_GROUPS 4
-#define IWL_NUM_TXP_CH_GROUPS 8
-
-struct iwl_phy_db_entry {
- u16 size;
- u8 *data;
-};
-
-struct iwl_shared;
-
-/**
- * struct iwl_phy_db - stores phy configuration and calibration data.
- *
- * @cfg: phy configuration.
- * @calib_nch: non channel specific calibration data.
- * @calib_ch: channel specific calibration data.
- * @calib_ch_group_papd: calibration data related to papd channel group.
- * @calib_ch_group_txp: calibration data related to tx power chanel group.
- */
-struct iwl_phy_db {
- struct iwl_phy_db_entry cfg;
- struct iwl_phy_db_entry calib_nch;
- struct iwl_phy_db_entry calib_ch;
- struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
- struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
-
- u32 channel_num;
- u32 channel_size;
-
- /* for an access to the logger */
- struct device *dev;
-};
-
-enum iwl_phy_db_section_type {
- IWL_PHY_DB_CFG = 1,
- IWL_PHY_DB_CALIB_NCH,
- IWL_PHY_DB_CALIB_CH,
- IWL_PHY_DB_CALIB_CHG_PAPD,
- IWL_PHY_DB_CALIB_CHG_TXP,
- IWL_PHY_DB_MAX
-};
-
-/* for parsing of tx power channel group data that comes from the firmware*/
-struct iwl_phy_db_chg_txp {
- __le32 space;
- __le16 max_channel_idx;
-} __packed;
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db);
+#ifndef __iwl_eeprom_h__
+#define __iwl_eeprom_h__
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 *data,
- u16 size, gfp_t alloc_ctx);
+#include "iwl-trans.h"
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 **data,
- u16 *size, u16 ch_id);
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
-#endif /* __IWL_PHYDB_H__ */
+#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
deleted file mode 100644
index 50c5891..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ /dev/null
@@ -1,1148 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-agn.h"
-#include "iwl-eeprom.h"
-#include "iwl-io.h"
-#include "iwl-prph.h"
-
-/************************** EEPROM BANDS ****************************
- *
- * The iwl_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel. We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware. This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel. There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/* 2.4 GHz */
-const u8 iwl_eeprom_band_1[14] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-/* 5.2 GHz bands */
-static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
- 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
- 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
- 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
- 145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
- 1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
- 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-/******************************************************************************
- *
- * generic NVM functions
- *
-******************************************************************************/
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_EEPROM(trans,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
-{
- iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-}
-
-static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
-{
- u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP) &
- CSR_EEPROM_GP_VALID_MSK;
- int ret = 0;
-
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
- switch (gp) {
- case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
- IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
- gp);
- ret = -ENOENT;
- }
- break;
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
- IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
- ret = -ENOENT;
- }
- break;
- case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
- default:
- IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
- "EEPROM_GP=0x%08x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM", gp);
- ret = -ENOENT;
- break;
- }
- return ret;
-}
-
-u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset)
-{
- if (!priv->eeprom)
- return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
-}
-
-int iwl_eeprom_check_version(struct iwl_priv *priv)
-{
- u16 eeprom_ver;
- u16 calib_ver;
-
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwl_eeprom_calib_version(priv);
-
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
- goto err;
-
- IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- eeprom_ver, calib_ver);
-
- return 0;
-err:
- IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
- "CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
- return -EINVAL;
-
-}
-
-int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
-{
- u16 radio_cfg;
-
- priv->hw_params.sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !priv->cfg->ht_params) {
- IWL_ERR(priv, "Invalid 11n configuration\n");
- return -EINVAL;
- }
-
- if (!priv->hw_params.sku) {
- IWL_ERR(priv, "Invalid device sku\n");
- return -EINVAL;
- }
-
- IWL_INFO(priv, "Device SKU: 0x%X\n", priv->hw_params.sku);
-
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- priv->hw_params.valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- priv->hw_params.valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
-
- /* check overrides (some devices have wrong EEPROM) */
- if (priv->cfg->valid_tx_ant)
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- if (priv->cfg->valid_rx_ant)
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
- if (!priv->hw_params.valid_tx_ant || !priv->hw_params.valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
- priv->hw_params.valid_tx_ant,
- priv->hw_params.valid_rx_ant);
- return -EINVAL;
- }
-
- IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
- priv->hw_params.valid_tx_ant, priv->hw_params.valid_rx_ant);
-
- return 0;
-}
-
-u16 iwl_eeprom_calib_version(struct iwl_priv *priv)
-{
- struct iwl_eeprom_calib_hdr *hdr;
-
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
- EEPROM_CALIB_ALL);
- return hdr->version;
-}
-
-static u32 eeprom_indirect_address(struct iwl_priv *priv, u32 address)
-{
- u16 offset = 0;
-
- if ((address & INDIRECT_ADDRESS) == 0)
- return address;
-
- switch (address & INDIRECT_TYPE_MSK) {
- case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
- break;
- case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
- break;
- case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
- break;
- case INDIRECT_TXP_LIMIT:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
- break;
- case INDIRECT_TXP_LIMIT_SIZE:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
- break;
- case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
- break;
- case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
- break;
- case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
- break;
- default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
- address & INDIRECT_TYPE_MSK);
- break;
- }
-
- /* translate the offset from words to byte */
- return (address & ADDRESS_MSK) + (offset << 1);
-}
-
-const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset)
-{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[address];
-}
-
-void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac)
-{
- const u8 *addr = iwl_eeprom_query_addr(priv,
- EEPROM_MAC_ADDRESS);
- memcpy(mac, addr, ETH_ALEN);
-}
-
-/******************************************************************************
- *
- * OTP related functions
- *
-******************************************************************************/
-
-static void iwl_set_otp_access(struct iwl_trans *trans,
- enum iwl_access_mode mode)
-{
- iwl_read32(trans, CSR_OTP_GP_REG);
-
- if (mode == IWL_OTP_ACCESS_ABSOLUTE)
- iwl_clear_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_OTP_ACCESS_MODE);
- else
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_OTP_ACCESS_MODE);
-}
-
-static int iwl_get_nvm_type(struct iwl_trans *trans, u32 hw_rev)
-{
- u32 otpgp;
- int nvm_type;
-
- /* OTP only valid for CP/PP and after */
- switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(trans, "Unknown hardware type\n");
- return -ENOENT;
- case CSR_HW_REV_TYPE_5300:
- case CSR_HW_REV_TYPE_5350:
- case CSR_HW_REV_TYPE_5100:
- case CSR_HW_REV_TYPE_5150:
- nvm_type = NVM_DEVICE_TYPE_EEPROM;
- break;
- default:
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
- nvm_type = NVM_DEVICE_TYPE_OTP;
- else
- nvm_type = NVM_DEVICE_TYPE_EEPROM;
- break;
- }
- return nvm_type;
-}
-
-static int iwl_init_otp_access(struct iwl_trans *trans)
-{
- int ret;
-
- /* Enable 40MHz radio clock */
- iwl_write32(trans, CSR_GP_CNTRL,
- iwl_read32(trans, CSR_GP_CNTRL) |
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- /* wait for clock to be ready */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- 25000);
- if (ret < 0)
- IWL_ERR(trans, "Time out access OTP\n");
- else {
- iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- /*
- * CSR auto clock gate disable bit -
- * this is only applicable for HW with OTP shadow RAM
- */
- if (trans->cfg->base_params->shadow_ram_support)
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
- }
- return ret;
-}
-
-static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
- __le16 *eeprom_data)
-{
- int ret = 0;
- u32 r;
- u32 otpgp;
-
- iwl_write32(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
- return ret;
- }
- r = iwl_read32(trans, CSR_EEPROM_REG);
- /* check for ECC errors: */
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
- /* stop in this case */
- /* set the uncorrectable OTP ECC bit for acknowledgement */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
- return -EINVAL;
- }
- if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
- /* continue in this case */
- /* set the correctable OTP ECC bit for acknowledgement */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
- }
- *eeprom_data = cpu_to_le16(r >> 16);
- return 0;
-}
-
-/*
- * iwl_is_otp_empty: check for empty OTP
- */
-static bool iwl_is_otp_empty(struct iwl_trans *trans)
-{
- u16 next_link_addr = 0;
- __le16 link_value;
- bool is_empty = false;
-
- /* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
- if (!link_value) {
- IWL_ERR(trans, "OTP is empty\n");
- is_empty = true;
- }
- } else {
- IWL_ERR(trans, "Unable to read first block of OTP list.\n");
- is_empty = true;
- }
-
- return is_empty;
-}
-
-
-/*
- * iwl_find_otp_image: find EEPROM image in OTP
- * finding the OTP block that contains the EEPROM image.
- * the last valid block on the link list (the block _before_ the last block)
- * is the block we should read and used to configure the device.
- * If all the available OTP blocks are full, the last block will be the block
- * we should read and used to configure the device.
- * only perform this operation if shadow RAM is disabled
- */
-static int iwl_find_otp_image(struct iwl_trans *trans,
- u16 *validblockaddr)
-{
- u16 next_link_addr = 0, valid_addr;
- __le16 link_value = 0;
- int usedblocks = 0;
-
- /* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access(trans, IWL_OTP_ACCESS_ABSOLUTE);
-
- /* checking for empty OTP or error */
- if (iwl_is_otp_empty(trans))
- return -EINVAL;
-
- /*
- * start traverse link list
- * until reach the max number of OTP blocks
- * different devices have different number of OTP blocks
- */
- do {
- /* save current valid block address
- * check for more block on the link list
- */
- valid_addr = next_link_addr;
- next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(trans, "OTP blocks %d addr 0x%x\n",
- usedblocks, next_link_addr);
- if (iwl_read_otp_word(trans, next_link_addr, &link_value))
- return -EINVAL;
- if (!link_value) {
- /*
- * reach the end of link list, return success and
- * set address point to the starting address
- * of the image
- */
- *validblockaddr = valid_addr;
- /* skip first 2 bytes (link list pointer) */
- *validblockaddr += 2;
- return 0;
- }
- /* more in the link list, continue */
- usedblocks++;
- } while (usedblocks <= trans->cfg->base_params->max_ll_items);
-
- /* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n");
- return -EINVAL;
-}
-
-/******************************************************************************
- *
- * Tx Power related functions
- *
-******************************************************************************/
-/**
- * iwl_get_max_txpower_avg - get the highest tx power from all chains.
- * find the highest tx power from all chains for the channel
- */
-static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg,
- struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
- int element, s8 *max_txpower_in_half_dbm)
-{
- s8 max_txpower_avg = 0; /* (dBm) */
-
- /* Take the highest tx power from any valid chains */
- if ((cfg->valid_tx_ant & ANT_A) &&
- (enhanced_txpower[element].chain_a_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_a_max;
- if ((cfg->valid_tx_ant & ANT_B) &&
- (enhanced_txpower[element].chain_b_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_b_max;
- if ((cfg->valid_tx_ant & ANT_C) &&
- (enhanced_txpower[element].chain_c_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_c_max;
- if (((cfg->valid_tx_ant == ANT_AB) |
- (cfg->valid_tx_ant == ANT_BC) |
- (cfg->valid_tx_ant == ANT_AC)) &&
- (enhanced_txpower[element].mimo2_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].mimo2_max;
- if ((cfg->valid_tx_ant == ANT_ABC) &&
- (enhanced_txpower[element].mimo3_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].mimo3_max;
-
- /*
- * max. tx power in EEPROM is in 1/2 dBm format
- * convert from 1/2 dBm to dBm (round-up convert)
- * but we also do not want to loss 1/2 dBm resolution which
- * will impact performance
- */
- *max_txpower_in_half_dbm = max_txpower_avg;
- return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
-}
-
-static void
-iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *txp,
- s8 max_txpower_avg)
-{
- int ch_idx;
- bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
- enum ieee80211_band band;
-
- band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
- IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
-
- for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
- struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
-
- /* update matching channel or from common data only */
- if (txp->channel != 0 && ch_info->channel != txp->channel)
- continue;
-
- /* update matching band only */
- if (band != ch_info->band)
- continue;
-
- if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
- ch_info->max_power_avg = max_txpower_avg;
- ch_info->curr_txpow = max_txpower_avg;
- ch_info->scan_power = max_txpower_avg;
- }
-
- if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
- ch_info->ht40_max_power_avg = max_txpower_avg;
- }
-}
-
-#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
-#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
-#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
-
-#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
- ? # x " " : "")
-
-static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
-{
- struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
- int idx, entries;
- __le16 *txp_len;
- s8 max_txp_avg, max_txp_avg_halfdbm;
-
- BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
-
- /* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
- entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
-
- txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
-
- for (idx = 0; idx < entries; idx++) {
- txp = &txp_array[idx];
- /* skip invalid entries */
- if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
- continue;
-
- IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
- (txp->channel && (txp->flags &
- IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
- "Common " : (txp->channel) ?
- "Channel" : "Common",
- (txp->channel),
- TXP_CHECK_AND_PRINT(VALID),
- TXP_CHECK_AND_PRINT(BAND_52G),
- TXP_CHECK_AND_PRINT(OFDM),
- TXP_CHECK_AND_PRINT(40MHZ),
- TXP_CHECK_AND_PRINT(HT_AP),
- TXP_CHECK_AND_PRINT(RES1),
- TXP_CHECK_AND_PRINT(RES2),
- TXP_CHECK_AND_PRINT(COMMON_TYPE),
- txp->flags);
- IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
- "chain_B: 0X%02x chain_C: 0X%02x\n",
- txp->chain_a_max, txp->chain_b_max,
- txp->chain_c_max);
- IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
- "MIMO3: 0x%02x High 20_on_40: 0x%02x "
- "Low 20_on_40: 0x%02x\n",
- txp->mimo2_max, txp->mimo3_max,
- ((txp->delta_20_in_40 & 0xf0) >> 4),
- (txp->delta_20_in_40 & 0x0f));
-
- max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
- &max_txp_avg_halfdbm);
-
- /*
- * Update the user limit values values to the highest
- * power supported by any channel
- */
- if (max_txp_avg > priv->tx_power_user_lmt)
- priv->tx_power_user_lmt = max_txp_avg;
- if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
- priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
-
- iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
- }
-}
-
-/**
- * iwl_eeprom_init - read EEPROM contents
- *
- * Load the EEPROM contents from adapter into priv->eeprom
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
-{
- __le16 *e;
- u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
- u16 validblockaddr = 0;
- u16 cache_addr = 0;
-
- priv->nvm_device_type = iwl_get_nvm_type(priv->trans, hw_rev);
- if (priv->nvm_device_type == -ENOENT)
- return -ENOENT;
- /* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
- ret = -ENOMEM;
- goto alloc_err;
- }
- e = (__le16 *)priv->eeprom;
-
- ret = iwl_eeprom_verify_signature(priv);
- if (ret < 0) {
- IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- goto err;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(priv->trans);
- if (ret < 0) {
- IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
- ret = -ENOENT;
- goto err;
- }
-
- if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
-
- ret = iwl_init_otp_access(priv->trans);
- if (ret) {
- IWL_ERR(priv, "Failed to initialize OTP access.\n");
- ret = -ENOENT;
- goto done;
- }
- iwl_write32(priv->trans, CSR_EEPROM_GP,
- iwl_read32(priv->trans, CSR_EEPROM_GP) &
- ~CSR_EEPROM_GP_IF_OWNER_MSK);
-
- iwl_set_bit(priv->trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- /* traversing the linked list if no shadow ram supported */
- if (!priv->cfg->base_params->shadow_ram_support) {
- if (iwl_find_otp_image(priv->trans, &validblockaddr)) {
- ret = -ENOENT;
- goto done;
- }
- }
- for (addr = validblockaddr; addr < validblockaddr + sz;
- addr += sizeof(u16)) {
- __le16 eeprom_data;
-
- ret = iwl_read_otp_word(priv->trans, addr,
- &eeprom_data);
- if (ret)
- goto done;
- e[cache_addr / 2] = eeprom_data;
- cache_addr += sizeof(u16);
- }
- } else {
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- iwl_write32(priv->trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(priv->trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(priv,
- "Time out reading EEPROM[%d]\n", addr);
- goto done;
- }
- r = iwl_read32(priv->trans, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
- }
-
- IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM",
- iwl_eeprom_query16(priv, EEPROM_VERSION));
-
- ret = 0;
-done:
- iwl_eeprom_release_semaphore(priv->trans);
-
-err:
- if (ret)
- iwl_eeprom_free(priv);
-alloc_err:
- return ret;
-}
-
-void iwl_eeprom_free(struct iwl_priv *priv)
-{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
-}
-
-static void iwl_init_band_reference(struct iwl_priv *priv,
- int eep_band, int *eeprom_ch_count,
- const struct iwl_eeprom_channel **eeprom_ch_info,
- const u8 **eeprom_ch_index)
-{
- u32 offset = priv->lib->
- eeprom_ops.regulatory_bands[eep_band - 1];
- switch (eep_band) {
- case 1: /* 2.4GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_1;
- break;
- case 2: /* 4.9GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_2;
- break;
- case 3: /* 5.2GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_3;
- break;
- case 4: /* 5.5GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_4;
- break;
- case 5: /* 5.7GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_5;
- break;
- case 6: /* 2.4GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_6;
- break;
- case 7: /* 5 GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_7;
- break;
- default:
- BUG();
- return;
- }
-}
-
-#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-/**
- * iwl_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
- *
- * Does not set up a command, or touch hardware.
- */
-static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel,
- const struct iwl_eeprom_channel *eeprom_ch,
- u8 clear_ht40_extension_channel)
-{
- struct iwl_channel_info *ch_info;
-
- ch_info = (struct iwl_channel_info *)
- iwl_get_channel_info(priv, band, channel);
-
- if (!is_channel_valid(ch_info))
- return -1;
-
- IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT(IBSS),
- CHECK_AND_PRINT(ACTIVE),
- CHECK_AND_PRINT(RADAR),
- CHECK_AND_PRINT(WIDE),
- CHECK_AND_PRINT(DFS),
- eeprom_ch->flags,
- eeprom_ch->max_power_avg,
- ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
- "" : "not ");
-
- ch_info->ht40_eeprom = *eeprom_ch;
- ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
- ch_info->ht40_flags = eeprom_ch->flags;
- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
-
- return 0;
-}
-
-#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-
-/**
- * iwl_init_channel_map - Set up driver's info for all possible channels
- */
-int iwl_init_channel_map(struct iwl_priv *priv)
-{
- int eeprom_ch_count = 0;
- const u8 *eeprom_ch_index = NULL;
- const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
- int band, ch;
- struct iwl_channel_info *ch_info;
-
- if (priv->channel_count) {
- IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
- return 0;
- }
-
- IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
-
- priv->channel_count =
- ARRAY_SIZE(iwl_eeprom_band_1) +
- ARRAY_SIZE(iwl_eeprom_band_2) +
- ARRAY_SIZE(iwl_eeprom_band_3) +
- ARRAY_SIZE(iwl_eeprom_band_4) +
- ARRAY_SIZE(iwl_eeprom_band_5);
-
- IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
- priv->channel_count);
-
- priv->channel_info = kcalloc(priv->channel_count,
- sizeof(struct iwl_channel_info),
- GFP_KERNEL);
- if (!priv->channel_info) {
- IWL_ERR(priv, "Could not allocate channel_info\n");
- priv->channel_count = 0;
- return -ENOMEM;
- }
-
- ch_info = priv->channel_info;
-
- /* Loop through the 5 EEPROM bands adding them in order to the
- * channel map we maintain (that contains additional information than
- * what just in the EEPROM) */
- for (band = 1; band <= 5; band++) {
-
- iwl_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- ch_info->channel = eeprom_ch_index[ch];
- ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
-
- /* permanently store EEPROM's channel regulatory flags
- * and max power in channel info database. */
- ch_info->eeprom = eeprom_ch_info[ch];
-
- /* Copy the run-time flags so they are there even on
- * invalid channels */
- ch_info->flags = eeprom_ch_info[ch].flags;
- /* First write that ht40 is not enabled, and then enable
- * one by one */
- ch_info->ht40_extension_channel =
- IEEE80211_CHAN_NO_HT40;
-
- if (!(is_channel_valid(ch_info))) {
- IWL_DEBUG_EEPROM(priv,
- "Ch. %d Flags %x [%sGHz] - "
- "No traffic\n",
- ch_info->channel,
- ch_info->flags,
- is_channel_a_band(ch_info) ?
- "5.2" : "2.4");
- ch_info++;
- continue;
- }
-
- /* Initialize regulatory-based run-time data */
- ch_info->max_power_avg = ch_info->curr_txpow =
- eeprom_ch_info[ch].max_power_avg;
- ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
- ch_info->min_power = 0;
-
- IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
- "%s%s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
- CHECK_AND_PRINT_I(DFS),
- eeprom_ch_info[ch].flags,
- eeprom_ch_info[ch].max_power_avg,
- ((eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_RADAR))
- ? "" : "not ");
-
- ch_info++;
- }
- }
-
- /* Check if we do have HT40 channels */
- if (priv->lib->eeprom_ops.regulatory_bands[5] ==
- EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->lib->eeprom_ops.regulatory_bands[6] ==
- EEPROM_REGULATORY_BAND_NO_HT40)
- return 0;
-
- /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
- for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
-
- iwl_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband =
- (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- /* Set up driver's info for lower half */
- iwl_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch],
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40PLUS);
-
- /* Set up driver's info for upper half */
- iwl_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch] + 4,
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40MINUS);
- }
- }
-
- /* for newer device (6000 series and up)
- * EEPROM contain enhanced tx power information
- * driver need to process addition information
- * to determine the max channel tx power limits
- */
- if (priv->lib->eeprom_ops.enhanced_txpower)
- iwl_eeprom_enhanced_txpower(priv);
-
- return 0;
-}
-
-/*
- * iwl_free_channel_map - undo allocations in iwl_init_channel_map
- */
-void iwl_free_channel_map(struct iwl_priv *priv)
-{
- kfree(priv->channel_info);
- priv->channel_count = 0;
-}
-
-/**
- * iwl_get_channel_info - Find driver's private channel info
- *
- * Based on band and channel number.
- */
-const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel)
-{
- int i;
-
- switch (band) {
- case IEEE80211_BAND_5GHZ:
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel == channel)
- return &priv->channel_info[i];
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (channel >= 1 && channel <= 14)
- return &priv->channel_info[channel - 1];
- break;
- default:
- BUG();
- }
-
- return NULL;
-}
-
-void iwl_rf_config(struct iwl_priv *priv)
-{
- u16 radio_cfg;
-
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
- iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
- IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
- EEPROM_RF_CFG_STEP_MSK(radio_cfg),
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
- } else
- WARN_ON(1);
-
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
deleted file mode 100644
index 64bfd94..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_eeprom_h__
-#define __iwl_eeprom_h__
-
-#include <net/mac80211.h>
-
-struct iwl_priv;
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-/*
- * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
- *
- * IBSS and/or AP operation is allowed *only* on those channels with
- * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
- * RADAR detection is not supported by the 4965 driver, but is a
- * requirement for establishing a new network for legal operation on channels
- * requiring RADAR detection or restricting ACTIVE scanning.
- *
- * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
- * It only indicates that 20 MHz channel use is supported; HT40 channel
- * usage is indicated by a separate set of regulatory flags for each
- * HT40 channel pair.
- *
- * NOTE: Using a channel inappropriately will result in a uCode error!
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-enum {
- EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
- EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
- /* Bit 2 Reserved */
- EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
- EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
- EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
- /* Bit 6 Reserved (was Narrow Channel) */
- EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
-};
-
-/* SKU Capabilities */
-#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
-#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
-#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
-#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
-#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
-
-/* *regulatory* channel data format in eeprom, one for each channel.
- * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
-struct iwl_eeprom_channel {
- u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
- s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
-} __packed;
-
-enum iwl_eeprom_enhanced_txpwr_flags {
- IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
- IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
- IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
- IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
- IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
- IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
- IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
- IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
-};
-
-/**
- * iwl_eeprom_enhanced_txpwr structure
- * This structure presents the enhanced regulatory tx power limit layout
- * in eeprom image
- * Enhanced regulatory tx power portion of eeprom image can be broken down
- * into individual structures; each one is 8 bytes in size and contain the
- * following information
- * @flags: entry flags
- * @channel: channel number
- * @chain_a_max_pwr: chain a max power in 1/2 dBm
- * @chain_b_max_pwr: chain b max power in 1/2 dBm
- * @chain_c_max_pwr: chain c max power in 1/2 dBm
- * @delta_20_in_40: 20-in-40 deltas (hi/lo)
- * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
- * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
- *
- */
-struct iwl_eeprom_enhanced_txpwr {
- u8 flags;
- u8 channel;
- s8 chain_a_max;
- s8 chain_b_max;
- s8 chain_c_max;
- u8 delta_20_in_40;
- s8 mimo2_max;
- s8 mimo3_max;
-} __packed;
-
-/* calibration */
-struct iwl_eeprom_calib_hdr {
- u8 version;
- u8 pa_type;
- __le16 voltage;
-} __packed;
-
-#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
-#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
-
-/* temperature */
-#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
-#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
-
-
-/* agn links */
-#define EEPROM_LINK_HOST (2*0x64)
-#define EEPROM_LINK_GENERAL (2*0x65)
-#define EEPROM_LINK_REGULATORY (2*0x66)
-#define EEPROM_LINK_CALIBRATION (2*0x67)
-#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
-#define EEPROM_LINK_OTHERS (2*0x69)
-#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
-#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
-
-/* agn regulatory - indirect access */
-#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
-#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
-#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
-#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
-#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
-#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
-#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
-
-/* 6000 regulatory - indirect access */
-#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
-/* 2.4 GHz */
-extern const u8 iwl_eeprom_band_1[14];
-
-#define ADDRESS_MSK 0x0000FFFF
-#define INDIRECT_TYPE_MSK 0x000F0000
-#define INDIRECT_HOST 0x00010000
-#define INDIRECT_GENERAL 0x00020000
-#define INDIRECT_REGULATORY 0x00030000
-#define INDIRECT_CALIBRATION 0x00040000
-#define INDIRECT_PROCESS_ADJST 0x00050000
-#define INDIRECT_OTHERS 0x00060000
-#define INDIRECT_TXP_LIMIT 0x00070000
-#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
-#define INDIRECT_ADDRESS 0x00100000
-
-/* General */
-#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
-#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
-#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
-#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
-#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
-#define EEPROM_VERSION (2*0x44) /* 2 bytes */
-#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
-#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
-#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
-#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
-
-/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
-#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
-#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
-
-#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
-
-struct iwl_eeprom_ops {
- const u32 regulatory_bands[7];
- bool enhanced_txpower;
-};
-
-
-int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_priv *priv);
-int iwl_eeprom_check_version(struct iwl_priv *priv);
-int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
-u16 iwl_eeprom_calib_version(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset);
-u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset);
-void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac);
-int iwl_init_channel_map(struct iwl_priv *priv);
-void iwl_free_channel_map(struct iwl_priv *priv);
-const struct iwl_channel_info *iwl_get_channel_info(
- const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel);
-void iwl_rf_config(struct iwl_priv *priv);
-
-#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 74bce97..8060466 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -421,6 +421,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
(FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
+#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
+
/* Instruct FH to increment the retry count of a packet when
* it is brought from the memory to TX-FIFO
*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 0f8b8aa..3dfebfb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -27,6 +27,7 @@
*****************************************************************************/
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/export.h>
#include "iwl-io.h"
#include "iwl-csr.h"
@@ -52,6 +53,7 @@ void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
__iwl_set_bit(trans, reg, mask);
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_set_bit);
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{
@@ -61,6 +63,25 @@ void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
__iwl_clear_bit(trans, reg, mask);
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_clear_bit);
+
+void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+}
+EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
@@ -76,6 +97,7 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
return -ETIMEDOUT;
}
+EXPORT_SYMBOL_GPL(iwl_poll_bit);
int iwl_grab_nic_access_silent(struct iwl_trans *trans)
{
@@ -117,6 +139,7 @@ int iwl_grab_nic_access_silent(struct iwl_trans *trans)
return 0;
}
+EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
bool iwl_grab_nic_access(struct iwl_trans *trans)
{
@@ -130,6 +153,7 @@ bool iwl_grab_nic_access(struct iwl_trans *trans)
return true;
}
+EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
void iwl_release_nic_access(struct iwl_trans *trans)
{
@@ -144,6 +168,7 @@ void iwl_release_nic_access(struct iwl_trans *trans)
*/
mmiowb();
}
+EXPORT_SYMBOL_GPL(iwl_release_nic_access);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
@@ -158,6 +183,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
return value;
}
+EXPORT_SYMBOL_GPL(iwl_read_direct32);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
@@ -170,6 +196,7 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_write_direct32);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout)
@@ -185,6 +212,7 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
return -ETIMEDOUT;
}
+EXPORT_SYMBOL_GPL(iwl_poll_direct_bit);
static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
{
@@ -211,6 +239,7 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 reg)
spin_unlock_irqrestore(&trans->reg_lock, flags);
return val;
}
+EXPORT_SYMBOL_GPL(iwl_read_prph);
void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
{
@@ -223,6 +252,7 @@ void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_write_prph);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
{
@@ -236,6 +266,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
u32 bits, u32 mask)
@@ -250,6 +281,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
{
@@ -264,9 +296,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
-void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
- void *buf, int words)
+void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
{
unsigned long flags;
int offs;
@@ -275,24 +308,26 @@ void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
- for (offs = 0; offs < words; offs++)
+ for (offs = 0; offs < dwords; offs++)
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
{
u32 value;
- _iwl_read_targ_mem_words(trans, addr, &value, 1);
+ _iwl_read_targ_mem_dwords(trans, addr, &value, 1);
return value;
}
+EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
-int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
- void *buf, int words)
+int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
{
unsigned long flags;
int offs, result = 0;
@@ -301,7 +336,7 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
- for (offs = 0; offs < words; offs++)
+ for (offs = 0; offs < dwords; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_release_nic_access(trans);
} else
@@ -310,8 +345,10 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
return result;
}
+EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
{
- return _iwl_write_targ_mem_words(trans, addr, &val, 1);
+ return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
}
+EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index abb3250..50d3819 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -54,6 +54,8 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
+void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
+
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
@@ -74,18 +76,18 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
-void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
- void *buf, int words);
+void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
-#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \
+#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
do { \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
- _iwl_read_targ_mem_words(trans, addr, buf, \
- (bufsize) / sizeof(u32));\
+ _iwl_read_targ_mem_dwords(trans, addr, buf, \
+ (bufsize) / sizeof(u32));\
} while (0)
-int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
- void *buf, int words);
+int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 0066b89..c61f207 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -61,6 +61,7 @@
*
*****************************************************************************/
#include <linux/sched.h>
+#include <linux/export.h>
#include "iwl-notif-wait.h"
@@ -71,6 +72,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
INIT_LIST_HEAD(&notif_wait->notif_waits);
init_waitqueue_head(&notif_wait->notif_waitq);
}
+EXPORT_SYMBOL_GPL(iwl_notification_wait_init);
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt)
@@ -115,20 +117,20 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
if (triggered)
wake_up_all(&notif_wait->notif_waitq);
}
+EXPORT_SYMBOL_GPL(iwl_notification_wait_notify);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{
- unsigned long flags;
struct iwl_notification_wait *wait_entry;
- spin_lock_irqsave(&notif_wait->notif_wait_lock, flags);
+ spin_lock(&notif_wait->notif_wait_lock);
list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
wait_entry->aborted = true;
- spin_unlock_irqrestore(&notif_wait->notif_wait_lock, flags);
+ spin_unlock(&notif_wait->notif_wait_lock);
wake_up_all(&notif_wait->notif_waitq);
}
-
+EXPORT_SYMBOL_GPL(iwl_abort_notification_waits);
void
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
@@ -152,6 +154,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
list_add(&wait_entry->list, &notif_wait->notif_waits);
spin_unlock_bh(&notif_wait->notif_wait_lock);
}
+EXPORT_SYMBOL_GPL(iwl_init_notification_wait);
int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry,
@@ -175,6 +178,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
return -ETIMEDOUT;
return 0;
}
+EXPORT_SYMBOL_GPL(iwl_wait_notification);
void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry)
@@ -183,3 +187,4 @@ void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
list_del(&wait_entry->list);
spin_unlock_bh(&notif_wait->notif_wait_lock);
}
+EXPORT_SYMBOL_GPL(iwl_remove_notification);
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 4ef742b..64886f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -111,22 +111,25 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD the this Rx responds to.
- * Must be atomic.
+ * Must be atomic and called with BH disabled.
* @queue_full: notifies that a HW queue is full.
- * Must be atomic
+ * Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
- * Must be atomic
+ * Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Must be atomic.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
* Must be atomic
- * @nic_error: error notification. Must be atomic
- * @cmd_queue_full: Called when the command queue gets full. Must be atomic.
+ * @nic_error: error notification. Must be atomic and must be called with BH
+ * disabled.
+ * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
+ * called with BH disabled.
* @nic_config: configure NIC, called before firmware is started.
* May sleep
- * @wimax_active: invoked when WiMax becomes active. Must be atomic.
+ * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
+ * with BH disabled.
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -145,6 +148,9 @@ struct iwl_op_mode_ops {
void (*wimax_active)(struct iwl_op_mode *op_mode);
};
+int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
+void iwl_opmode_deregister(const char *name);
+
/**
* struct iwl_op_mode - operational mode
*
@@ -162,7 +168,6 @@ struct iwl_op_mode {
static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
{
might_sleep();
-
op_mode->ops->stop(op_mode);
}
@@ -218,9 +223,4 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
-/*****************************************************
-* Op mode layers implementations
-******************************************************/
-extern const struct iwl_op_mode_ops iwl_dvm_ops;
-
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644
index f166955..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ /dev/null
@@ -1,288 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include "iwl-debug.h"
-#include "iwl-dev.h"
-
-#include "iwl-phy-db.h"
-
-#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-
-struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
-{
- struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
- GFP_KERNEL);
-
- if (!phy_db)
- return phy_db;
-
- phy_db->dev = dev;
-
- /* TODO: add default values of the phy db. */
- return phy_db;
-}
-
-/*
- * get phy db section: returns a pointer to a phy db section specified by
- * type and channel group id.
- */
-static struct iwl_phy_db_entry *
-iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type,
- u16 chg_id)
-{
- if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
- return NULL;
-
- switch (type) {
- case IWL_PHY_DB_CFG:
- return &phy_db->cfg;
- case IWL_PHY_DB_CALIB_NCH:
- return &phy_db->calib_nch;
- case IWL_PHY_DB_CALIB_CH:
- return &phy_db->calib_ch;
- case IWL_PHY_DB_CALIB_CHG_PAPD:
- if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
- return NULL;
- return &phy_db->calib_ch_group_papd[chg_id];
- case IWL_PHY_DB_CALIB_CHG_TXP:
- if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
- return NULL;
- return &phy_db->calib_ch_group_txp[chg_id];
- default:
- return NULL;
- }
- return NULL;
-}
-
-static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type,
- u16 chg_id)
-{
- struct iwl_phy_db_entry *entry =
- iwl_phy_db_get_section(phy_db, type, chg_id);
- if (!entry)
- return;
-
- kfree(entry->data);
- entry->data = NULL;
- entry->size = 0;
-}
-
-void iwl_phy_db_free(struct iwl_phy_db *phy_db)
-{
- int i;
-
- if (!phy_db)
- return;
-
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
- for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
- iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
-
- kfree(phy_db);
-}
-
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 *data,
- u16 size, gfp_t alloc_ctx)
-{
- struct iwl_phy_db_entry *entry;
- u16 chg_id = 0;
-
- if (!phy_db)
- return -EINVAL;
-
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
- type == IWL_PHY_DB_CALIB_CHG_TXP)
- chg_id = le16_to_cpup((__le16 *)data);
-
- entry = iwl_phy_db_get_section(phy_db, type, chg_id);
- if (!entry)
- return -EINVAL;
-
- kfree(entry->data);
- entry->data = kmemdup(data, size, alloc_ctx);
- if (!entry->data) {
- entry->size = 0;
- return -ENOMEM;
- }
-
- entry->size = size;
-
- if (type == IWL_PHY_DB_CALIB_CH) {
- phy_db->channel_num = le32_to_cpup((__le32 *)data);
- phy_db->channel_size =
- (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
- }
-
- return 0;
-}
-
-static int is_valid_channel(u16 ch_id)
-{
- if (ch_id <= 14 ||
- (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
- (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
- (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
- return 1;
- return 0;
-}
-
-static u8 ch_id_to_ch_index(u16 ch_id)
-{
- if (WARN_ON(!is_valid_channel(ch_id)))
- return 0xff;
-
- if (ch_id <= 14)
- return ch_id - 1;
- if (ch_id <= 64)
- return (ch_id + 20) / 4;
- if (ch_id <= 140)
- return (ch_id - 12) / 4;
- return (ch_id - 13) / 4;
-}
-
-
-static u16 channel_id_to_papd(u16 ch_id)
-{
- if (WARN_ON(!is_valid_channel(ch_id)))
- return 0xff;
-
- if (1 <= ch_id && ch_id <= 14)
- return 0;
- if (36 <= ch_id && ch_id <= 64)
- return 1;
- if (100 <= ch_id && ch_id <= 140)
- return 2;
- return 3;
-}
-
-static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
-{
- struct iwl_phy_db_chg_txp *txp_chg;
- int i;
- u8 ch_index = ch_id_to_ch_index(ch_id);
- if (ch_index == 0xff)
- return 0xff;
-
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
- txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
- if (!txp_chg)
- return 0xff;
- /*
- * Looking for the first channel group that its max channel is
- * higher then wanted channel.
- */
- if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
- return i;
- }
- return 0xff;
-}
-
-int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
- enum iwl_phy_db_section_type type, u8 **data,
- u16 *size, u16 ch_id)
-{
- struct iwl_phy_db_entry *entry;
- u32 channel_num;
- u32 channel_size;
- u16 ch_group_id = 0;
- u16 index;
-
- if (!phy_db)
- return -EINVAL;
-
- /* find wanted channel group */
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
- ch_group_id = channel_id_to_papd(ch_id);
- else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
- ch_group_id = channel_id_to_txp(phy_db, ch_id);
-
- entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
- if (!entry)
- return -EINVAL;
-
- if (type == IWL_PHY_DB_CALIB_CH) {
- index = ch_id_to_ch_index(ch_id);
- channel_num = phy_db->channel_num;
- channel_size = phy_db->channel_size;
- if (index >= channel_num) {
- IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
- return -EINVAL;
- }
- *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
- *size = channel_size;
- } else {
- *data = entry->data;
- *size = entry->size;
- }
- return 0;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 3b106929..9253ef1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -187,7 +187,7 @@
#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
#define SCD_QUEUE_STTS_REG_POS_WSL (4)
#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
-#define SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
+#define SCD_QUEUE_STTS_REG_MSK (0x017F0000)
#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
@@ -224,6 +224,7 @@
#define SCD_TXFACT (SCD_BASE + 0x10)
#define SCD_ACTIVE (SCD_BASE + 0x14)
#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
+#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
#define SCD_AGGR_SEL (SCD_BASE + 0x248)
#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
new file mode 100644
index 0000000..81e8c71
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -0,0 +1,856 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/export.h>
+#include <net/netlink.h>
+
+#include "iwl-io.h"
+#include "iwl-fh.h"
+#include "iwl-prph.h"
+#include "iwl-trans.h"
+#include "iwl-test.h"
+#include "iwl-csr.h"
+#include "iwl-testmode.h"
+
+/*
+ * Periphery registers absolute lower bound. This is used in order to
+ * differentiate registery access through HBUS_TARG_PRPH_* and
+ * HBUS_TARG_MEM_* accesses.
+ */
+#define IWL_ABS_PRPH_START (0xA00000)
+
+/*
+ * The TLVs used in the gnl message policy between the kernel module and
+ * user space application. iwl_testmode_gnl_msg_policy is to be carried
+ * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
+ * See iwl-testmode.h
+ */
+static
+struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
+ [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
+ [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
+ [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+ [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
+};
+
+static inline void iwl_test_trace_clear(struct iwl_test *tst)
+{
+ memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
+}
+
+static void iwl_test_trace_stop(struct iwl_test *tst)
+{
+ if (!tst->trace.enabled)
+ return;
+
+ if (tst->trace.cpu_addr && tst->trace.dma_addr)
+ dma_free_coherent(tst->trans->dev,
+ tst->trace.tsize,
+ tst->trace.cpu_addr,
+ tst->trace.dma_addr);
+
+ iwl_test_trace_clear(tst);
+}
+
+static inline void iwl_test_mem_clear(struct iwl_test *tst)
+{
+ memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
+}
+
+static inline void iwl_test_mem_stop(struct iwl_test *tst)
+{
+ if (!tst->mem.in_read)
+ return;
+
+ iwl_test_mem_clear(tst);
+}
+
+/*
+ * Initializes the test object
+ * During the lifetime of the test object it is assumed that the transport is
+ * started. The test object should be stopped before the transport is stopped.
+ */
+void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
+ struct iwl_test_ops *ops)
+{
+ tst->trans = trans;
+ tst->ops = ops;
+
+ iwl_test_trace_clear(tst);
+ iwl_test_mem_clear(tst);
+}
+EXPORT_SYMBOL_GPL(iwl_test_init);
+
+/*
+ * Stop the test object
+ */
+void iwl_test_free(struct iwl_test *tst)
+{
+ iwl_test_mem_stop(tst);
+ iwl_test_trace_stop(tst);
+}
+EXPORT_SYMBOL_GPL(iwl_test_free);
+
+static inline int iwl_test_send_cmd(struct iwl_test *tst,
+ struct iwl_host_cmd *cmd)
+{
+ return tst->ops->send_cmd(tst->trans->op_mode, cmd);
+}
+
+static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
+{
+ return tst->ops->valid_hw_addr(addr);
+}
+
+static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
+{
+ return tst->ops->get_fw_ver(tst->trans->op_mode);
+}
+
+static inline struct sk_buff*
+iwl_test_alloc_reply(struct iwl_test *tst, int len)
+{
+ return tst->ops->alloc_reply(tst->trans->op_mode, len);
+}
+
+static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
+{
+ return tst->ops->reply(tst->trans->op_mode, skb);
+}
+
+static inline struct sk_buff*
+iwl_test_alloc_event(struct iwl_test *tst, int len)
+{
+ return tst->ops->alloc_event(tst->trans->op_mode, len);
+}
+
+static inline void
+iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
+{
+ return tst->ops->event(tst->trans->op_mode, skb);
+}
+
+/*
+ * This function handles the user application commands to the fw. The fw
+ * commands are sent in a synchronuous manner. In case that the user requested
+ * to get commands response, it is send to the user.
+ */
+static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
+{
+ struct iwl_host_cmd cmd;
+ struct iwl_rx_packet *pkt;
+ struct sk_buff *skb;
+ void *reply_buf;
+ u32 reply_len;
+ int ret;
+ bool cmd_want_skb;
+
+ memset(&cmd, 0, sizeof(struct iwl_host_cmd));
+
+ if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
+ !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
+ IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
+ return -ENOMSG;
+ }
+
+ cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
+ cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
+ if (cmd_want_skb)
+ cmd.flags |= CMD_WANT_SKB;
+
+ cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
+ cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
+ cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+ IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
+ cmd.id, cmd.flags, cmd.len[0]);
+
+ ret = iwl_test_send_cmd(tst, &cmd);
+ if (ret) {
+ IWL_ERR(tst->trans, "Failed to send hcmd\n");
+ return ret;
+ }
+ if (!cmd_want_skb)
+ return ret;
+
+ /* Handling return of SKB to the user */
+ pkt = cmd.resp_pkt;
+ if (!pkt) {
+ IWL_ERR(tst->trans, "HCMD received a null response packet\n");
+ return ret;
+ }
+
+ reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ skb = iwl_test_alloc_reply(tst, reply_len + 20);
+ reply_buf = kmalloc(reply_len, GFP_KERNEL);
+ if (!skb || !reply_buf) {
+ kfree_skb(skb);
+ kfree(reply_buf);
+ return -ENOMEM;
+ }
+
+ /* The reply is in a page, that we cannot send to user space. */
+ memcpy(reply_buf, &(pkt->hdr), reply_len);
+ iwl_free_resp(&cmd);
+
+ if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+ IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+ nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
+ goto nla_put_failure;
+ return iwl_test_reply(tst, skb);
+
+nla_put_failure:
+ IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
+ kfree(reply_buf);
+ kfree_skb(skb);
+ return -ENOMSG;
+}
+
+/*
+ * Handles the user application commands for register access.
+ */
+static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
+{
+ u32 ofs, val32, cmd;
+ u8 val8;
+ struct sk_buff *skb;
+ int status = 0;
+ struct iwl_trans *trans = tst->trans;
+
+ if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
+ IWL_ERR(trans, "Missing reg offset\n");
+ return -ENOMSG;
+ }
+
+ ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
+ IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
+
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+
+ /*
+ * Allow access only to FH/CSR/HBUS in direct mode.
+ * Since we don't have the upper bounds for the CSR and HBUS segments,
+ * we will use only the upper bound of FH for sanity check.
+ */
+ if (ofs >= FH_MEM_UPPER_BOUND) {
+ IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
+ FH_MEM_UPPER_BOUND);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ val32 = iwl_read_direct32(tst->trans, ofs);
+ IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
+
+ skb = iwl_test_alloc_reply(tst, 20);
+ if (!skb) {
+ IWL_ERR(trans, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
+ goto nla_put_failure;
+ status = iwl_test_reply(tst, skb);
+ if (status < 0)
+ IWL_ERR(trans, "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+ IWL_ERR(trans, "Missing value to write\n");
+ return -ENOMSG;
+ } else {
+ val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+ IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
+ iwl_write_direct32(tst->trans, ofs, val32);
+ }
+ break;
+
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
+ IWL_ERR(trans, "Missing value to write\n");
+ return -ENOMSG;
+ } else {
+ val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
+ IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
+ iwl_write8(tst->trans, ofs, val8);
+ }
+ break;
+
+ default:
+ IWL_ERR(trans, "Unknown test register cmd ID\n");
+ return -ENOMSG;
+ }
+
+ return status;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+/*
+ * Handles the request to start FW tracing. Allocates of the trace buffer
+ * and sends a reply to user space with the address of the allocated buffer.
+ */
+static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
+{
+ struct sk_buff *skb;
+ int status = 0;
+
+ if (tst->trace.enabled)
+ return -EBUSY;
+
+ if (!tb[IWL_TM_ATTR_TRACE_SIZE])
+ tst->trace.size = TRACE_BUFF_SIZE_DEF;
+ else
+ tst->trace.size =
+ nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
+
+ if (!tst->trace.size)
+ return -EINVAL;
+
+ if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
+ tst->trace.size > TRACE_BUFF_SIZE_MAX)
+ return -EINVAL;
+
+ tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
+ tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
+ tst->trace.tsize,
+ &tst->trace.dma_addr,
+ GFP_KERNEL);
+ if (!tst->trace.cpu_addr)
+ return -ENOMEM;
+
+ tst->trace.enabled = true;
+ tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
+
+ memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
+
+ skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
+ if (!skb) {
+ IWL_ERR(tst->trans, "Memory allocation fail\n");
+ iwl_test_trace_stop(tst);
+ return -ENOMEM;
+ }
+
+ if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
+ sizeof(tst->trace.dma_addr),
+ (u64 *)&tst->trace.dma_addr))
+ goto nla_put_failure;
+
+ status = iwl_test_reply(tst, skb);
+ if (status < 0)
+ IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
+
+ tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
+ DUMP_CHUNK_SIZE);
+
+ return status;
+
+nla_put_failure:
+ kfree_skb(skb);
+ if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
+ IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
+ iwl_test_trace_stop(tst);
+ return -EMSGSIZE;
+}
+
+/*
+ * Handles indirect read from the periphery or the SRAM. The read is performed
+ * to a temporary buffer. The user space application should later issue a dump
+ */
+static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
+{
+ struct iwl_trans *trans = tst->trans;
+ unsigned long flags;
+ int i;
+
+ if (size & 0x3)
+ return -EINVAL;
+
+ tst->mem.size = size;
+ tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
+ if (tst->mem.addr == NULL)
+ return -ENOMEM;
+
+ /* Hard-coded periphery absolute address */
+ if (IWL_ABS_PRPH_START <= addr &&
+ addr < IWL_ABS_PRPH_START + PRPH_END) {
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ iwl_grab_nic_access(trans);
+ iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
+ addr | (3 << 24));
+ for (i = 0; i < size; i += 4)
+ *(u32 *)(tst->mem.addr + i) =
+ iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
+ iwl_release_nic_access(trans);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+ } else { /* target memory (SRAM) */
+ _iwl_read_targ_mem_dwords(trans, addr,
+ tst->mem.addr,
+ tst->mem.size / 4);
+ }
+
+ tst->mem.nchunks =
+ DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
+ tst->mem.in_read = true;
+ return 0;
+
+}
+
+/*
+ * Handles indirect write to the periphery or SRAM. The is performed to a
+ * temporary buffer.
+ */
+static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
+ u32 size, unsigned char *buf)
+{
+ struct iwl_trans *trans = tst->trans;
+ u32 val, i;
+ unsigned long flags;
+
+ if (IWL_ABS_PRPH_START <= addr &&
+ addr < IWL_ABS_PRPH_START + PRPH_END) {
+ /* Periphery writes can be 1-3 bytes long, or DWORDs */
+ if (size < 4) {
+ memcpy(&val, buf, size);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ iwl_grab_nic_access(trans);
+ iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
+ (addr & 0x0000FFFF) |
+ ((size - 1) << 24));
+ iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+ iwl_release_nic_access(trans);
+ /* needed after consecutive writes w/o read */
+ mmiowb();
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+ } else {
+ if (size % 4)
+ return -EINVAL;
+ for (i = 0; i < size; i += 4)
+ iwl_write_prph(trans, addr+i,
+ *(u32 *)(buf+i));
+ }
+ } else if (iwl_test_valid_hw_addr(tst, addr)) {
+ _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Handles the user application commands for indirect read/write
+ * to/from the periphery or the SRAM.
+ */
+static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
+{
+ u32 addr, size, cmd;
+ unsigned char *buf;
+
+ /* Both read and write should be blocked, for atomicity */
+ if (tst->mem.in_read)
+ return -EBUSY;
+
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+ if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
+ IWL_ERR(tst->trans, "Error finding memory offset address\n");
+ return -ENOMSG;
+ }
+ addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
+ if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
+ IWL_ERR(tst->trans, "Error finding size for memory reading\n");
+ return -ENOMSG;
+ }
+ size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
+
+ if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
+ return iwl_test_indirect_read(tst, addr, size);
+ } else {
+ if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
+ return -EINVAL;
+ buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
+ return iwl_test_indirect_write(tst, addr, size, buf);
+ }
+}
+
+/*
+ * Enable notifications to user space
+ */
+static int iwl_test_notifications(struct iwl_test *tst,
+ struct nlattr **tb)
+{
+ tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
+ return 0;
+}
+
+/*
+ * Handles the request to get the device id
+ */
+static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
+{
+ u32 devid = tst->trans->hw_id;
+ struct sk_buff *skb;
+ int status;
+
+ IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
+
+ skb = iwl_test_alloc_reply(tst, 20);
+ if (!skb) {
+ IWL_ERR(tst->trans, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+
+ if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
+ goto nla_put_failure;
+ status = iwl_test_reply(tst, skb);
+ if (status < 0)
+ IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
+
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+/*
+ * Handles the request to get the FW version
+ */
+static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
+{
+ struct sk_buff *skb;
+ int status;
+ u32 ver = iwl_test_fw_ver(tst);
+
+ IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
+
+ skb = iwl_test_alloc_reply(tst, 20);
+ if (!skb) {
+ IWL_ERR(tst->trans, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+
+ if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
+ goto nla_put_failure;
+
+ status = iwl_test_reply(tst, skb);
+ if (status < 0)
+ IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
+
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+/*
+ * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
+ */
+int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
+ void *data, int len)
+{
+ int result;
+
+ result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
+ iwl_testmode_gnl_msg_policy);
+ if (result) {
+ IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
+ return result;
+ }
+
+ /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
+ if (!tb[IWL_TM_ATTR_COMMAND]) {
+ IWL_ERR(tst->trans, "Missing testmode command type\n");
+ return -ENOMSG;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iwl_test_parse);
+
+/*
+ * Handle test commands.
+ * Returns 1 for unknown commands (not handled by the test object); negative
+ * value in case of error.
+ */
+int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
+{
+ int result;
+
+ switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+ case IWL_TM_CMD_APP2DEV_UCODE:
+ IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
+ result = iwl_test_fw_cmd(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
+ result = iwl_test_reg(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
+ IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
+ result = iwl_test_trace_begin(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_END_TRACE:
+ iwl_test_trace_stop(tst);
+ result = 0;
+ break;
+
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
+ IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
+ result = iwl_test_indirect_mem(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
+ IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
+ result = iwl_test_notifications(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
+ result = iwl_test_get_fw_ver(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
+ result = iwl_test_get_dev_id(tst, tb);
+ break;
+
+ default:
+ IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
+ result = 1;
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
+
+static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int idx, length;
+
+ if (!tst->trace.enabled || !tst->trace.trace_addr)
+ return -EFAULT;
+
+ idx = cb->args[4];
+ if (idx >= tst->trace.nchunks)
+ return -ENOENT;
+
+ length = DUMP_CHUNK_SIZE;
+ if (((idx + 1) == tst->trace.nchunks) &&
+ (tst->trace.size % DUMP_CHUNK_SIZE))
+ length = tst->trace.size %
+ DUMP_CHUNK_SIZE;
+
+ if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
+ tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
+ goto nla_put_failure;
+
+ cb->args[4] = ++idx;
+ return 0;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
+static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int idx, length;
+
+ if (!tst->mem.in_read)
+ return -EFAULT;
+
+ idx = cb->args[4];
+ if (idx >= tst->mem.nchunks) {
+ iwl_test_mem_stop(tst);
+ return -ENOENT;
+ }
+
+ length = DUMP_CHUNK_SIZE;
+ if (((idx + 1) == tst->mem.nchunks) &&
+ (tst->mem.size % DUMP_CHUNK_SIZE))
+ length = tst->mem.size % DUMP_CHUNK_SIZE;
+
+ if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
+ tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
+ goto nla_put_failure;
+
+ cb->args[4] = ++idx;
+ return 0;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
+/*
+ * Handle dump commands.
+ * Returns 1 for unknown commands (not handled by the test object); negative
+ * value in case of error.
+ */
+int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int result;
+
+ switch (cmd) {
+ case IWL_TM_CMD_APP2DEV_READ_TRACE:
+ IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
+ result = iwl_test_trace_dump(tst, skb, cb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
+ IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
+ result = iwl_test_buffer_dump(tst, skb, cb);
+ break;
+
+ default:
+ result = 1;
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(iwl_test_dump);
+
+/*
+ * Multicast a spontaneous messages from the device to the user space.
+ */
+static void iwl_test_send_rx(struct iwl_test *tst,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct sk_buff *skb;
+ struct iwl_rx_packet *data;
+ int length;
+
+ data = rxb_addr(rxb);
+ length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+ /* the length doesn't include len_n_flags field, so add it manually */
+ length += sizeof(__le32);
+
+ skb = iwl_test_alloc_event(tst, length + 20);
+ if (skb == NULL) {
+ IWL_ERR(tst->trans, "Out of memory for message to user\n");
+ return;
+ }
+
+ if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+ IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+ nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
+ goto nla_put_failure;
+
+ iwl_test_event(tst, skb);
+ return;
+
+nla_put_failure:
+ kfree_skb(skb);
+ IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
+}
+
+/*
+ * Called whenever a Rx frames is recevied from the device. If notifications to
+ * the user space are requested, sends the frames to the user.
+ */
+void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
+{
+ if (tst->notify)
+ iwl_test_send_rx(tst, rxb);
+}
+EXPORT_SYMBOL_GPL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
new file mode 100644
index 0000000..e13ffa8
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -0,0 +1,161 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_TEST_H__
+#define __IWL_TEST_H__
+
+#include <linux/types.h>
+#include "iwl-trans.h"
+
+struct iwl_test_trace {
+ u32 size;
+ u32 tsize;
+ u32 nchunks;
+ u8 *cpu_addr;
+ u8 *trace_addr;
+ dma_addr_t dma_addr;
+ bool enabled;
+};
+
+struct iwl_test_mem {
+ u32 size;
+ u32 nchunks;
+ u8 *addr;
+ bool in_read;
+};
+
+/*
+ * struct iwl_test_ops: callback to the op mode
+ *
+ * The structure defines the callbacks that the op_mode should handle,
+ * inorder to handle logic that is out of the scope of iwl_test. The
+ * op_mode must set all the callbacks.
+
+ * @send_cmd: handler that is used by the test object to request the
+ * op_mode to send a command to the fw.
+ *
+ * @valid_hw_addr: handler that is used by the test object to request the
+ * op_mode to check if the given address is a valid address.
+ *
+ * @get_fw_ver: handler used to get the FW version.
+ *
+ * @alloc_reply: handler used by the test object to request the op_mode
+ * to allocate an skb for sending a reply to the user, and initialize
+ * the skb. It is assumed that the test object only fills the required
+ * attributes.
+ *
+ * @reply: handler used by the test object to request the op_mode to reply
+ * to a request. The skb is an skb previously allocated by the the
+ * alloc_reply callback.
+ I
+ * @alloc_event: handler used by the test object to request the op_mode
+ * to allocate an skb for sending an event, and initialize
+ * the skb. It is assumed that the test object only fills the required
+ * attributes.
+ *
+ * @reply: handler used by the test object to request the op_mode to send
+ * an event. The skb is an skb previously allocated by the the
+ * alloc_event callback.
+ */
+struct iwl_test_ops {
+ int (*send_cmd)(struct iwl_op_mode *op_modes,
+ struct iwl_host_cmd *cmd);
+ bool (*valid_hw_addr)(u32 addr);
+ u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
+
+ struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
+ int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
+ struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
+ void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
+};
+
+struct iwl_test {
+ struct iwl_trans *trans;
+ struct iwl_test_ops *ops;
+ struct iwl_test_trace trace;
+ struct iwl_test_mem mem;
+ bool notify;
+};
+
+void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
+ struct iwl_test_ops *ops);
+
+void iwl_test_free(struct iwl_test *tst);
+
+int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
+ void *data, int len);
+
+int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
+
+int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
+
+static inline void iwl_test_enable_notifications(struct iwl_test *tst,
+ bool enable)
+{
+ tst->notify = enable;
+}
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
deleted file mode 100644
index 060aac3..0000000
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ /dev/null
@@ -1,1114 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <net/net_namespace.h>
-#include <linux/netdevice.h>
-#include <net/cfg80211.h>
-#include <net/mac80211.h>
-#include <net/netlink.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-testmode.h"
-#include "iwl-trans.h"
-#include "iwl-fh.h"
-#include "iwl-prph.h"
-
-
-/* Periphery registers absolute lower bound. This is used in order to
- * differentiate registery access through HBUS_TARG_PRPH_* and
- * HBUS_TARG_MEM_* accesses.
- */
-#define IWL_TM_ABS_PRPH_START (0xA00000)
-
-/* The TLVs used in the gnl message policy between the kernel module and
- * user space application. iwl_testmode_gnl_msg_policy is to be carried
- * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
- * See iwl-testmode.h
- */
-static
-struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
- [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
- [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
- [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
- [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
-
- [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
- [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
- [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
-};
-
-/*
- * See the struct iwl_rx_packet in iwl-commands.h for the format of the
- * received events from the device
- */
-static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- if (pkt)
- return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- else
- return 0;
-}
-
-
-/*
- * This function multicasts the spontaneous messages from the device to the
- * user space. It is invoked whenever there is a received messages
- * from the device. This function is called within the ISR of the rx handlers
- * in iwlagn driver.
- *
- * The parsing of the message content is left to the user space application,
- * The message content is treated as unattacked raw data and is encapsulated
- * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
- *
- * @priv: the instance of iwlwifi device
- * @rxb: pointer to rx data content received by the ISR
- *
- * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
- * For the messages multicasting to the user application, the mandatory
- * TLV fields are :
- * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
- * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
- */
-
-static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct sk_buff *skb;
- void *data;
- int length;
-
- data = (void *)rxb_addr(rxb);
- length = get_event_length(rxb);
-
- if (!data || length == 0)
- return;
-
- skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
- GFP_ATOMIC);
- if (skb == NULL) {
- IWL_ERR(priv,
- "Run out of memory for messages to user space ?\n");
- return;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
- /* the length doesn't include len_n_flags field, so add it manually */
- nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
- goto nla_put_failure;
- cfg80211_testmode_event(skb, GFP_ATOMIC);
- return;
-
-nla_put_failure:
- kfree_skb(skb);
- IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n");
-}
-
-void iwl_testmode_init(struct iwl_priv *priv)
-{
- priv->pre_rx_handler = NULL;
- priv->testmode_trace.trace_enabled = false;
- priv->testmode_mem.read_in_progress = false;
-}
-
-static void iwl_mem_cleanup(struct iwl_priv *priv)
-{
- if (priv->testmode_mem.read_in_progress) {
- kfree(priv->testmode_mem.buff_addr);
- priv->testmode_mem.buff_addr = NULL;
- priv->testmode_mem.buff_size = 0;
- priv->testmode_mem.num_chunks = 0;
- priv->testmode_mem.read_in_progress = false;
- }
-}
-
-static void iwl_trace_cleanup(struct iwl_priv *priv)
-{
- if (priv->testmode_trace.trace_enabled) {
- if (priv->testmode_trace.cpu_addr &&
- priv->testmode_trace.dma_addr)
- dma_free_coherent(priv->trans->dev,
- priv->testmode_trace.total_size,
- priv->testmode_trace.cpu_addr,
- priv->testmode_trace.dma_addr);
- priv->testmode_trace.trace_enabled = false;
- priv->testmode_trace.cpu_addr = NULL;
- priv->testmode_trace.trace_addr = NULL;
- priv->testmode_trace.dma_addr = 0;
- priv->testmode_trace.buff_size = 0;
- priv->testmode_trace.total_size = 0;
- }
-}
-
-
-void iwl_testmode_cleanup(struct iwl_priv *priv)
-{
- iwl_trace_cleanup(priv);
- iwl_mem_cleanup(priv);
-}
-
-
-/*
- * This function handles the user application commands to the ucode.
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
- * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
- * host command to the ucode.
- *
- * If any mandatory field is missing, -ENOMSG is replied to the user space
- * application; otherwise, waits for the host command to be sent and checks
- * the return code. In case or error, it is returned, otherwise a reply is
- * allocated and the reply RX packet
- * is returned.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_host_cmd cmd;
- struct iwl_rx_packet *pkt;
- struct sk_buff *skb;
- void *reply_buf;
- u32 reply_len;
- int ret;
- bool cmd_want_skb;
-
- memset(&cmd, 0, sizeof(struct iwl_host_cmd));
-
- if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
- !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
- IWL_ERR(priv, "Missing ucode command mandatory fields\n");
- return -ENOMSG;
- }
-
- cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
- cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
- if (cmd_want_skb)
- cmd.flags |= CMD_WANT_SKB;
-
- cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
- cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
- cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
- cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
- " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
-
- ret = iwl_dvm_send_cmd(priv, &cmd);
- if (ret) {
- IWL_ERR(priv, "Failed to send hcmd\n");
- return ret;
- }
- if (!cmd_want_skb)
- return ret;
-
- /* Handling return of SKB to the user */
- pkt = cmd.resp_pkt;
- if (!pkt) {
- IWL_ERR(priv, "HCMD received a null response packet\n");
- return ret;
- }
-
- reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20);
- reply_buf = kmalloc(reply_len, GFP_KERNEL);
- if (!skb || !reply_buf) {
- kfree_skb(skb);
- kfree(reply_buf);
- return -ENOMEM;
- }
-
- /* The reply is in a page, that we cannot send to user space. */
- memcpy(reply_buf, &(pkt->hdr), reply_len);
- iwl_free_resp(&cmd);
-
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
- nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
- goto nla_put_failure;
- return cfg80211_testmode_reply(skb);
-
-nla_put_failure:
- IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n");
- return -ENOMSG;
-}
-
-
-/*
- * This function handles the user application commands for register access.
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
- * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
- * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
- * the success of the command execution.
- *
- * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
- * value is returned with IWL_TM_ATTR_REG_VALUE32.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- u32 ofs, val32, cmd;
- u8 val8;
- struct sk_buff *skb;
- int status = 0;
-
- if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
- IWL_ERR(priv, "Missing register offset\n");
- return -ENOMSG;
- }
- ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
- IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
-
- /* Allow access only to FH/CSR/HBUS in direct mode.
- Since we don't have the upper bounds for the CSR and HBUS segments,
- we will use only the upper bound of FH for sanity check. */
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
- if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
- cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
- cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
- (ofs >= FH_MEM_UPPER_BOUND)) {
- IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n",
- FH_MEM_UPPER_BOUND);
- return -EINVAL;
- }
-
- switch (cmd) {
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- val32 = iwl_read_direct32(priv->trans, ofs);
- IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
-
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
- if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
- IWL_ERR(priv, "Missing value to write\n");
- return -ENOMSG;
- } else {
- val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
- IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
- iwl_write_direct32(priv->trans, ofs, val32);
- }
- break;
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
- IWL_ERR(priv, "Missing value to write\n");
- return -ENOMSG;
- } else {
- val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
- IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
- iwl_write8(priv->trans, ofs, val8);
- }
- break;
- default:
- IWL_ERR(priv, "Unknown testmode register command ID\n");
- return -ENOSYS;
- }
-
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EMSGSIZE;
-}
-
-
-static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
-{
- struct iwl_notification_wait calib_wait;
- static const u8 calib_complete[] = {
- CALIBRATION_COMPLETE_NOTIFICATION
- };
- int ret;
-
- iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
- calib_complete, ARRAY_SIZE(calib_complete),
- NULL, NULL);
- ret = iwl_init_alive_start(priv);
- if (ret) {
- IWL_ERR(priv, "Fail init calibration: %d\n", ret);
- goto cfg_init_calib_error;
- }
-
- ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
- if (ret)
- IWL_ERR(priv, "Error detecting"
- " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
- return ret;
-
-cfg_init_calib_error:
- iwl_remove_notification(&priv->notif_wait, &calib_wait);
- return ret;
-}
-
-/*
- * This function handles the user application commands for driver.
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
- * value of the actual command execution is replied to the user application.
- *
- * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
- * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
- * IWL_TM_CMD_DEV2APP_SYNC_RSP.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_trans *trans = priv->trans;
- struct sk_buff *skb;
- unsigned char *rsp_data_ptr = NULL;
- int status = 0, rsp_data_len = 0;
- u32 devid, inst_size = 0, data_size = 0;
- const struct fw_img *img;
-
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- rsp_data_ptr = (unsigned char *)priv->cfg->name;
- rsp_data_len = strlen(priv->cfg->name);
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- rsp_data_len + 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
- IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
- nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
- rsp_data_len, rsp_data_ptr))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
- if (status)
- IWL_ERR(priv, "Error loading init ucode: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
- iwl_testmode_cfg_init_calib(priv);
- priv->ucode_loaded = false;
- iwl_trans_stop_device(trans);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
- if (status) {
- IWL_ERR(priv,
- "Error loading runtime ucode: %d\n", status);
- break;
- }
- status = iwl_alive_start(priv);
- if (status)
- IWL_ERR(priv,
- "Error starting the device: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
- iwl_scan_cancel_timeout(priv, 200);
- priv->ucode_loaded = false;
- iwl_trans_stop_device(trans);
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
- if (status) {
- IWL_ERR(priv,
- "Error loading WOWLAN ucode: %d\n", status);
- break;
- }
- status = iwl_alive_start(priv);
- if (status)
- IWL_ERR(priv,
- "Error starting the device: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- if (priv->eeprom) {
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- priv->cfg->base_params->eeprom_size + 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
- IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
- nla_put(skb, IWL_TM_ATTR_EEPROM,
- priv->cfg->base_params->eeprom_size,
- priv->eeprom))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n",
- status);
- } else
- return -EFAULT;
- break;
-
- case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
- if (!tb[IWL_TM_ATTR_FIXRATE]) {
- IWL_ERR(priv, "Missing fixrate setting\n");
- return -ENOMSG;
- }
- priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
- IWL_INFO(priv, "uCode version raw: 0x%x\n",
- priv->fw->ucode_ver);
-
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
- priv->fw->ucode_ver))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
- devid = priv->trans->hw_id;
- IWL_INFO(priv, "hw version: 0x%x\n", devid);
-
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (!priv->ucode_loaded) {
- IWL_ERR(priv, "No uCode has not been loaded\n");
- return -EINVAL;
- } else {
- img = &priv->fw->img[priv->cur_ucode];
- inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
- data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
- nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
- nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- default:
- IWL_ERR(priv, "Unknown testmode driver command ID\n");
- return -ENOSYS;
- }
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EMSGSIZE;
-}
-
-
-/*
- * This function handles the user application commands for uCode trace
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
- * value of the actual command execution is replied to the user application.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct sk_buff *skb;
- int status = 0;
- struct device *dev = priv->trans->dev;
-
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
- if (priv->testmode_trace.trace_enabled)
- return -EBUSY;
-
- if (!tb[IWL_TM_ATTR_TRACE_SIZE])
- priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
- else
- priv->testmode_trace.buff_size =
- nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
- if (!priv->testmode_trace.buff_size)
- return -EINVAL;
- if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
- priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
- return -EINVAL;
-
- priv->testmode_trace.total_size =
- priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
- priv->testmode_trace.cpu_addr =
- dma_alloc_coherent(dev,
- priv->testmode_trace.total_size,
- &priv->testmode_trace.dma_addr,
- GFP_KERNEL);
- if (!priv->testmode_trace.cpu_addr)
- return -ENOMEM;
- priv->testmode_trace.trace_enabled = true;
- priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
- priv->testmode_trace.cpu_addr, 0x100);
- memset(priv->testmode_trace.trace_addr, 0x03B,
- priv->testmode_trace.buff_size);
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- sizeof(priv->testmode_trace.dma_addr) + 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- iwl_trace_cleanup(priv);
- return -ENOMEM;
- }
- if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
- sizeof(priv->testmode_trace.dma_addr),
- (u64 *)&priv->testmode_trace.dma_addr))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0) {
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- }
- priv->testmode_trace.num_chunks =
- DIV_ROUND_UP(priv->testmode_trace.buff_size,
- DUMP_CHUNK_SIZE);
- break;
-
- case IWL_TM_CMD_APP2DEV_END_TRACE:
- iwl_trace_cleanup(priv);
- break;
- default:
- IWL_ERR(priv, "Unknown testmode mem command ID\n");
- return -ENOSYS;
- }
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
- IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
- iwl_trace_cleanup(priv);
- return -EMSGSIZE;
-}
-
-static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int idx, length;
-
- if (priv->testmode_trace.trace_enabled &&
- priv->testmode_trace.trace_addr) {
- idx = cb->args[4];
- if (idx >= priv->testmode_trace.num_chunks)
- return -ENOENT;
- length = DUMP_CHUNK_SIZE;
- if (((idx + 1) == priv->testmode_trace.num_chunks) &&
- (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
- length = priv->testmode_trace.buff_size %
- DUMP_CHUNK_SIZE;
-
- if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
- priv->testmode_trace.trace_addr +
- (DUMP_CHUNK_SIZE * idx)))
- goto nla_put_failure;
- idx++;
- cb->args[4] = idx;
- return 0;
- } else
- return -EFAULT;
-
- nla_put_failure:
- return -ENOBUFS;
-}
-
-/*
- * This function handles the user application switch ucode ownership.
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
- * decide who the current owner of the uCode
- *
- * If the current owner is OWNERSHIP_TM, then the only host command
- * can deliver to uCode is from testmode, all the other host commands
- * will dropped.
- *
- * default driver is the owner of uCode in normal operational mode
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- u8 owner;
-
- if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
- IWL_ERR(priv, "Missing ucode owner\n");
- return -ENOMSG;
- }
-
- owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
- if (owner == IWL_OWNERSHIP_DRIVER) {
- priv->ucode_owner = owner;
- priv->pre_rx_handler = NULL;
- } else if (owner == IWL_OWNERSHIP_TM) {
- priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
- priv->ucode_owner = owner;
- } else {
- IWL_ERR(priv, "Invalid owner\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
-{
- struct iwl_trans *trans = priv->trans;
- unsigned long flags;
- int i;
-
- if (size & 0x3)
- return -EINVAL;
- priv->testmode_mem.buff_size = size;
- priv->testmode_mem.buff_addr =
- kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL);
- if (priv->testmode_mem.buff_addr == NULL)
- return -ENOMEM;
-
- /* Hard-coded periphery absolute address */
- if (IWL_TM_ABS_PRPH_START <= addr &&
- addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
- addr | (3 << 24));
- for (i = 0; i < size; i += 4)
- *(u32 *)(priv->testmode_mem.buff_addr + i) =
- iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
- iwl_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
- } else { /* target memory (SRAM) */
- _iwl_read_targ_mem_words(trans, addr,
- priv->testmode_mem.buff_addr,
- priv->testmode_mem.buff_size / 4);
- }
-
- priv->testmode_mem.num_chunks =
- DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE);
- priv->testmode_mem.read_in_progress = true;
- return 0;
-
-}
-
-static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
- u32 size, unsigned char *buf)
-{
- struct iwl_trans *trans = priv->trans;
- u32 val, i;
- unsigned long flags;
-
- if (IWL_TM_ABS_PRPH_START <= addr &&
- addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
- /* Periphery writes can be 1-3 bytes long, or DWORDs */
- if (size < 4) {
- memcpy(&val, buf, size);
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
- (addr & 0x0000FFFF) |
- ((size - 1) << 24));
- iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
- iwl_release_nic_access(trans);
- /* needed after consecutive writes w/o read */
- mmiowb();
- spin_unlock_irqrestore(&trans->reg_lock, flags);
- } else {
- if (size % 4)
- return -EINVAL;
- for (i = 0; i < size; i += 4)
- iwl_write_prph(trans, addr+i,
- *(u32 *)(buf+i));
- }
- } else if (iwlagn_hw_valid_rtc_data_addr(addr) ||
- (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
- addr < IWLAGN_RTC_INST_UPPER_BOUND)) {
- _iwl_write_targ_mem_words(trans, addr, buf, size/4);
- } else
- return -EINVAL;
- return 0;
-}
-
-/*
- * This function handles the user application commands for SRAM data dump
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
- * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
- *
- * Several error will be retured, -EBUSY if the SRAM data retrieved by
- * previous command has not been delivered to userspace, or -ENOMSG if
- * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
- * are missing, or -ENOMEM if the buffer allocation fails.
- *
- * Otherwise 0 is replied indicating the success of the SRAM reading.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw,
- struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- u32 addr, size, cmd;
- unsigned char *buf;
-
- /* Both read and write should be blocked, for atomicity */
- if (priv->testmode_mem.read_in_progress)
- return -EBUSY;
-
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
- if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
- IWL_ERR(priv, "Error finding memory offset address\n");
- return -ENOMSG;
- }
- addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
- if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
- IWL_ERR(priv, "Error finding size for memory reading\n");
- return -ENOMSG;
- }
- size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
-
- if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ)
- return iwl_testmode_indirect_read(priv, addr, size);
- else {
- if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
- return -EINVAL;
- buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
- return iwl_testmode_indirect_write(priv, addr, size, buf);
- }
-}
-
-static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int idx, length;
-
- if (priv->testmode_mem.read_in_progress) {
- idx = cb->args[4];
- if (idx >= priv->testmode_mem.num_chunks) {
- iwl_mem_cleanup(priv);
- return -ENOENT;
- }
- length = DUMP_CHUNK_SIZE;
- if (((idx + 1) == priv->testmode_mem.num_chunks) &&
- (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE))
- length = priv->testmode_mem.buff_size %
- DUMP_CHUNK_SIZE;
-
- if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
- priv->testmode_mem.buff_addr +
- (DUMP_CHUNK_SIZE * idx)))
- goto nla_put_failure;
- idx++;
- cb->args[4] = idx;
- return 0;
- } else
- return -EFAULT;
-
- nla_put_failure:
- return -ENOBUFS;
-}
-
-static int iwl_testmode_notifications(struct ieee80211_hw *hw,
- struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- bool enable;
-
- enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
- if (enable)
- priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
- else
- priv->pre_rx_handler = NULL;
- return 0;
-}
-
-
-/* The testmode gnl message handler that takes the gnl message from the
- * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
- * invoke the corresponding handlers.
- *
- * This function is invoked when there is user space application sending
- * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
- * by nl80211.
- *
- * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
- * dispatching it to the corresponding handler.
- *
- * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
- * -ENOSYS is replied to the user application if the command is unknown;
- * Otherwise, the command is dispatched to the respective handler.
- *
- * @hw: ieee80211_hw object that represents the device
- * @data: pointer to user space message
- * @len: length in byte of @data
- */
-int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
-{
- struct nlattr *tb[IWL_TM_ATTR_MAX];
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int result;
-
- result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
- iwl_testmode_gnl_msg_policy);
- if (result != 0) {
- IWL_ERR(priv, "Error parsing the gnl message : %d\n", result);
- return result;
- }
-
- /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
- if (!tb[IWL_TM_ATTR_COMMAND]) {
- IWL_ERR(priv, "Missing testmode command type\n");
- return -ENOMSG;
- }
- /* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
-
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_UCODE:
- IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
- result = iwl_testmode_ucode(hw, tb);
- break;
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
- result = iwl_testmode_reg(hw, tb);
- break;
- case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
- case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
- case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
- case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
- case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
- case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
- IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
- result = iwl_testmode_driver(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
- case IWL_TM_CMD_APP2DEV_END_TRACE:
- case IWL_TM_CMD_APP2DEV_READ_TRACE:
- IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
- result = iwl_testmode_trace(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_OWNERSHIP:
- IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
- result = iwl_testmode_ownership(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
- IWL_DEBUG_INFO(priv, "testmode indirect memory cmd "
- "to driver\n");
- result = iwl_testmode_indirect_mem(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
- IWL_DEBUG_INFO(priv, "testmode notifications cmd "
- "to driver\n");
- result = iwl_testmode_notifications(hw, tb);
- break;
-
- default:
- IWL_ERR(priv, "Unknown testmode command\n");
- result = -ENOSYS;
- break;
- }
-
- mutex_unlock(&priv->mutex);
- return result;
-}
-
-int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct netlink_callback *cb,
- void *data, int len)
-{
- struct nlattr *tb[IWL_TM_ATTR_MAX];
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int result;
- u32 cmd;
-
- if (cb->args[3]) {
- /* offset by 1 since commands start at 0 */
- cmd = cb->args[3] - 1;
- } else {
- result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
- iwl_testmode_gnl_msg_policy);
- if (result) {
- IWL_ERR(priv,
- "Error parsing the gnl message : %d\n", result);
- return result;
- }
-
- /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
- if (!tb[IWL_TM_ATTR_COMMAND]) {
- IWL_ERR(priv, "Missing testmode command type\n");
- return -ENOMSG;
- }
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
- cb->args[3] = cmd + 1;
- }
-
- /* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
- switch (cmd) {
- case IWL_TM_CMD_APP2DEV_READ_TRACE:
- IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
- result = iwl_testmode_trace_dump(hw, skb, cb);
- break;
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
- IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
- result = iwl_testmode_buffer_dump(hw, skb, cb);
- break;
- default:
- result = -EINVAL;
- break;
- }
-
- mutex_unlock(&priv->mutex);
- return result;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 79a1e7a..92576a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -154,6 +154,9 @@ struct iwl_cmd_header {
__le16 sequence;
} __packed;
+/* iwl_cmd_header flags value */
+#define IWL_CMD_FAILED_MSK 0x40
+
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
@@ -280,21 +283,24 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
#define MAX_NO_RECLAIM_CMDS 6
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
/*
* Maximum number of HW queues the transport layer
* currently supports
*/
#define IWL_MAX_HW_QUEUES 32
+#define IWL_INVALID_STATION 255
+#define IWL_MAX_TID_COUNT 8
+#define IWL_FRAME_LIMIT 64
/**
* struct iwl_trans_config - transport configuration
*
* @op_mode: pointer to the upper layer.
- * @queue_to_fifo: queue to FIFO mapping to set up by
- * default
- * @n_queue_to_fifo: number of queues to set up
* @cmd_queue: the index of the command queue.
* Must be set before start_fw.
+ * @cmd_fifo: the fifo for host commands
* @no_reclaim_cmds: Some devices erroneously don't set the
* SEQ_RX_FRAME bit on some notifications, this is the
* list of such notifications to filter. Max length is
@@ -309,10 +315,9 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
- const u8 *queue_to_fifo;
- u8 n_queue_to_fifo;
u8 cmd_queue;
+ u8 cmd_fifo;
const u8 *no_reclaim_cmds;
int n_no_reclaim_cmds;
@@ -350,10 +355,10 @@ struct iwl_trans;
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
- * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
- * ready and a successful ADDBA response has been received.
- * May sleep
- * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
+ * @txq_enable: setup a queue. To setup an AC queue, use the
+ * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
+ * this one. The op_mode must not configure the HCMD queue. May sleep.
+ * @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @wait_tx_queue_empty: wait until all tx queues are empty
* May sleep
@@ -386,9 +391,9 @@ struct iwl_trans_ops {
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
- void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn);
- void (*tx_agg_disable)(struct iwl_trans *trans, int queue);
+ void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn);
+ void (*txq_disable)(struct iwl_trans *trans, int queue);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
@@ -428,6 +433,11 @@ enum iwl_trans_state {
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported
* @wait_command_queue: the wait_queue for SYNC host commands
+ * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+ * The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_headroom: room needed for the transport's private use before the
+ * device_cmd for Tx - for internal use only
+ * The user should use iwl_trans_{alloc,free}_tx_cmd.
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -445,6 +455,11 @@ struct iwl_trans {
wait_queue_head_t wait_command_queue;
+ /* The following fields are internal only */
+ struct kmem_cache *dev_cmd_pool;
+ size_t dev_cmd_headroom;
+ char dev_cmd_pool_name[50];
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -483,9 +498,9 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
{
might_sleep();
- trans->ops->fw_alive(trans);
-
trans->state = IWL_TRANS_FW_ALIVE;
+
+ trans->ops->fw_alive(trans);
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -520,6 +535,26 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
return trans->ops->send_cmd(trans, cmd);
}
+static inline struct iwl_device_cmd *
+iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
+{
+ u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+
+ if (unlikely(dev_cmd_ptr == NULL))
+ return NULL;
+
+ return (struct iwl_device_cmd *)
+ (dev_cmd_ptr + trans->dev_cmd_headroom);
+}
+
+static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_cmd *dev_cmd)
+{
+ u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
+
+ kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
+}
+
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
{
@@ -538,27 +573,34 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
trans->ops->reclaim(trans, queue, ssn, skbs);
}
-static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue)
+static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
{
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- trans->ops->tx_agg_disable(trans, queue);
+ trans->ops->txq_disable(trans, queue);
}
-static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
- int fifo, int sta_id, int tid,
- int frame_limit, u16 ssn)
+static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
+ int fifo, int sta_id, int tid,
+ int frame_limit, u16 ssn)
{
might_sleep();
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid,
+ trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn);
}
+static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
+ int fifo)
+{
+ iwl_trans_txq_enable(trans, queue, fifo, IWL_INVALID_STATION,
+ IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
+}
+
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
index 2629a66..81b83f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -27,9 +27,9 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-cfg.h"
#include "iwl-csr.h"
#include "iwl-agn-hw.h"
+#include "cfg.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 5
@@ -64,13 +64,26 @@ static const struct iwl_base_params iwl1000_base_params = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .wd_timeout = IWL_WATCHHDOG_DISABLED,
+ .wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 128,
};
static const struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+};
+
+static const struct iwl_eeprom_params iwl1000_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ }
};
#define IWL_DEVICE_1000 \
@@ -84,6 +97,7 @@ static const struct iwl_ht_params iwl1000_ht_params = {
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.base_params = &iwl1000_base_params, \
+ .eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl1000_bgn_cfg = {
@@ -108,6 +122,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.base_params = &iwl1000_base_params, \
+ .eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
index 7f79341..9fbde32 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -27,9 +27,9 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-cfg.h"
#include "iwl-agn-hw.h"
-#include "iwl-commands.h" /* needed for BT for now */
+#include "cfg.h"
+#include "dvm/commands.h" /* needed for BT for now */
/* Highest firmware API version supported */
#define IWL2030_UCODE_API_MAX 6
@@ -79,7 +79,7 @@ static const struct iwl_base_params iwl2000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.hd_v2 = true,
};
@@ -97,13 +97,14 @@ static const struct iwl_base_params iwl2030_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
.hd_v2 = true,
};
static const struct iwl_ht_params iwl2000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
};
static const struct iwl_bt_params iwl2030_bt_params = {
@@ -111,11 +112,24 @@ static const struct iwl_bt_params iwl2030_bt_params = {
.advanced_bt_coexist = true,
.agg_time_limit = BT_AGG_THRESHOLD_DEF,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
- .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+ .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
.bt_sco_disable = true,
.bt_session_2 = true,
};
+static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
+ .enhanced_txpower = true,
+};
+
#define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \
@@ -127,6 +141,7 @@ static const struct iwl_bt_params iwl2030_bt_params = {
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
+ .eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE
@@ -155,6 +170,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
+ .eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -177,6 +193,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
+ .eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -207,6 +224,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
+ .eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
index 8e26bc8..d1665fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -27,9 +27,9 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-cfg.h"
#include "iwl-agn-hw.h"
#include "iwl-csr.h"
+#include "cfg.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
@@ -62,13 +62,26 @@ static const struct iwl_base_params iwl5000_base_params = {
.led_compensation = 51,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .wd_timeout = IWL_WATCHHDOG_DISABLED,
+ .wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
.no_idle_support = true,
};
static const struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+static const struct iwl_eeprom_params iwl5000_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
+ },
};
#define IWL_DEVICE_5000 \
@@ -82,6 +95,7 @@ static const struct iwl_ht_params iwl5000_ht_params = {
.eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
.base_params = &iwl5000_base_params, \
+ .eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl5300_agn_cfg = {
@@ -128,6 +142,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
.base_params = &iwl5000_base_params,
+ .eeprom_params = &iwl5000_eeprom_params,
.ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
@@ -144,6 +159,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.base_params = &iwl5000_base_params, \
+ .eeprom_params = &iwl5000_eeprom_params, \
.no_xtal_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index 381b02c..4a57624 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -27,25 +27,28 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-cfg.h"
#include "iwl-agn-hw.h"
-#include "iwl-commands.h" /* needed for BT for now */
+#include "cfg.h"
+#include "dvm/commands.h" /* needed for BT for now */
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 6
#define IWL6050_UCODE_API_MAX 5
#define IWL6000G2_UCODE_API_MAX 6
+#define IWL6035_UCODE_API_MAX 6
/* Oldest version we won't warn about */
#define IWL6000_UCODE_API_OK 4
#define IWL6000G2_UCODE_API_OK 5
#define IWL6050_UCODE_API_OK 5
#define IWL6000G2B_UCODE_API_OK 6
+#define IWL6035_UCODE_API_OK 6
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
-#define IWL6000G2_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 5
+#define IWL6035_UCODE_API_MIN 6
/* EEPROM versions */
#define EEPROM_6000_TX_POWER_VERSION (4)
@@ -86,7 +89,7 @@ static const struct iwl_base_params iwl6000_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +105,7 @@ static const struct iwl_base_params iwl6050_base_params = {
.chain_noise_scale = 1500,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 1024,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,12 +121,13 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
.chain_noise_scale = 1000,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
- .shadow_reg_enable = true,
+ .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
};
static const struct iwl_ht_params iwl6000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
static const struct iwl_bt_params iwl6000_bt_params = {
@@ -135,6 +139,19 @@ static const struct iwl_bt_params iwl6000_bt_params = {
.bt_sco_disable = true,
};
+static const struct iwl_eeprom_params iwl6000_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
+ },
+ .enhanced_txpower = true,
+};
+
#define IWL_DEVICE_6005 \
.fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
@@ -146,6 +163,7 @@ static const struct iwl_bt_params iwl6000_bt_params = {
.eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
@@ -201,6 +219,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true \
@@ -227,9 +246,26 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
IWL_DEVICE_6030,
};
+#define IWL_DEVICE_6035 \
+ .fw_name_pre = IWL6030_FW_PRE, \
+ .ucode_api_max = IWL6035_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6035_UCODE_API_OK, \
+ .ucode_api_min = IWL6035_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_6030, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
+ .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
+ .base_params = &iwl6000_g2_base_params, \
+ .bt_params = &iwl6000_bt_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
+ .need_temp_offset_calib = true, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .adv_pm = true
+
const struct iwl_cfg iwl6035_2agn_cfg = {
.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
- IWL_DEVICE_6030,
+ IWL_DEVICE_6035,
.ht_params = &iwl6000_ht_params,
};
@@ -273,6 +309,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
.eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.base_params = &iwl6000_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl6000i_2agn_cfg = {
@@ -303,6 +340,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -327,6 +365,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -353,6 +392,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.base_params = &iwl6000_base_params,
+ .eeprom_params = &iwl6000_eeprom_params,
.ht_params = &iwl6000_ht_params,
.led_mode = IWL_LED_BLINK,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
index 82152311..82152311 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 0c8a1c2..f4c3500 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -68,10 +68,11 @@
#include <linux/pci-aspm.h>
#include "iwl-trans.h"
-#include "iwl-cfg.h"
#include "iwl-drv.h"
#include "iwl-trans.h"
-#include "iwl-trans-pcie-int.h"
+
+#include "cfg.h"
+#include "internal.h"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 6213c05..d9694c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -269,10 +269,9 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
unsigned long status;
u8 cmd_queue;
+ u8 cmd_fifo;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
- u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
- u8 n_q_to_fifo;
bool rx_buf_size_8k;
u32 rx_page_order;
@@ -313,7 +312,7 @@ void iwl_bg_rx_replenish(struct work_struct *data);
void iwl_irq_tasklet(struct iwl_trans *trans);
void iwlagn_rx_replenish(struct iwl_trans *trans);
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
- struct iwl_rx_queue *q);
+ struct iwl_rx_queue *q);
/*****************************************************
* ICT
@@ -328,7 +327,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data);
* TX / HCMD
******************************************************/
void iwl_txq_update_write_ptr(struct iwl_trans *trans,
- struct iwl_tx_queue *txq);
+ struct iwl_tx_queue *txq);
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset);
@@ -337,17 +336,13 @@ int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_tx_cmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
-void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
-void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
-void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, bool active);
-void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn);
-void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int index, enum dma_data_direction dma_dir);
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn);
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
+void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
struct sk_buff_head *skbs);
int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08517d3..39a6ca1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -32,7 +32,7 @@
#include "iwl-prph.h"
#include "iwl-io.h"
-#include "iwl-trans-pcie-int.h"
+#include "internal.h"
#include "iwl-op-mode.h"
#ifdef CONFIG_IWLWIFI_IDI
@@ -130,7 +130,7 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
- struct iwl_rx_queue *q)
+ struct iwl_rx_queue *q)
{
unsigned long flags;
u32 reg;
@@ -201,9 +201,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
@@ -253,9 +251,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
*/
static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
@@ -278,8 +274,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask,
- trans_pcie->rx_page_order);
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(trans, "alloc_pages failed, "
@@ -315,9 +310,10 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
BUG_ON(rxb->page);
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ rxb->page_dma =
+ dma_map_page(trans->dev, page, 0,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
@@ -465,8 +461,8 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
} else
@@ -497,7 +493,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
/* Rx interrupt, but nothing sent from uCode */
if (i == r)
- IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
+ IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
/* calculate total frames need to be restock after handling RX */
total_empty = r - rxq->write_actual;
@@ -513,8 +509,8 @@ static void iwl_rx_handle(struct iwl_trans *trans)
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
- IWL_DEBUG_RX(trans, "rxbuf: r = %d, i = %d (%p)\n", rxb);
-
+ IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
+ r, i, rxb);
iwl_rx_handle_rxbuf(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
@@ -546,12 +542,12 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
- APMS_CLK_VAL_MRB_FUNC_MODE) ||
+ APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
- APMG_PS_CTRL_VAL_RESET_REQ))) {
- struct iwl_trans_pcie *trans_pcie;
+ APMG_PS_CTRL_VAL_RESET_REQ))) {
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
iwl_op_mode_wimax_active(trans->op_mode);
wake_up(&trans->wait_command_queue);
@@ -567,6 +563,8 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
/* tasklet for iwlagn interrupt */
void iwl_irq_tasklet(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
unsigned long flags;
@@ -575,10 +573,6 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
u32 inta_mask;
#endif
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
-
-
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Ack/clear/reset pending uCode interrupts.
@@ -593,7 +587,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* interrupt coalescing can still be achieved.
*/
iwl_write32(trans, CSR_INT,
- trans_pcie->inta | ~trans_pcie->inta_mask);
+ trans_pcie->inta | ~trans_pcie->inta_mask);
inta = trans_pcie->inta;
@@ -602,7 +596,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* just for debug */
inta_mask = iwl_read32(trans, CSR_INT_MASK);
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
- inta, inta_mask);
+ inta, inta_mask);
}
#endif
@@ -651,7 +645,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
- hw_rfkill ? "disable radio" : "enable radio");
+ hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
@@ -693,7 +687,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* Rx "responses" (frame-received notification), and other
* notifications from uCode come through here*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
- CSR_INT_BIT_RX_PERIODIC)) {
+ CSR_INT_BIT_RX_PERIODIC)) {
IWL_DEBUG_ISR(trans, "Rx interrupt\n");
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
@@ -733,7 +727,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
iwl_write8(trans, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_ENA);
+ CSR_INT_PERIODIC_ENA);
isr_stats->rx++;
}
@@ -782,8 +776,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* Free dram table */
void iwl_free_isr_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (trans_pcie->ict_tbl) {
dma_free_coherent(trans->dev, ICT_SIZE,
@@ -802,8 +795,7 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
*/
int iwl_alloc_isr_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
dma_alloc_coherent(trans->dev, ICT_SIZE,
@@ -837,10 +829,9 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
*/
void iwl_reset_ict(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
unsigned long flags;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans_pcie->ict_tbl)
return;
@@ -868,9 +859,7 @@ void iwl_reset_ict(struct iwl_trans *trans)
/* Device is going down disable ict interrupt usage */
void iwl_disable_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -878,23 +867,19 @@ void iwl_disable_ict(struct iwl_trans *trans)
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
+/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
static irqreturn_t iwl_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta, inta_mask;
- unsigned long flags;
#ifdef CONFIG_IWLWIFI_DEBUG
u32 inta_fh;
#endif
- if (!trans)
- return IRQ_NONE;
- trace_iwlwifi_dev_irq(trans->dev);
+ lockdep_assert_held(&trans_pcie->irq_lock);
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ trace_iwlwifi_dev_irq(trans->dev);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@@ -918,7 +903,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
/* Hardware disappeared. It might have already raised
* an interrupt */
IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
+ return IRQ_HANDLED;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -934,21 +919,16 @@ static irqreturn_t iwl_isr(int irq, void *data)
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
+ !trans_pcie->inta)
iwl_enable_interrupts(trans);
- unplugged:
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_HANDLED;
-
- none:
+none:
/* re-enable interrupts here since we don't have anything to service. */
/* only Re-enable if disabled by irq and no schedules tasklet. */
if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
+ !trans_pcie->inta)
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_NONE;
}
@@ -974,15 +954,19 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
/* dram interrupt table not set yet,
* use legacy interrupt.
*/
- if (!trans_pcie->use_ict)
- return iwl_isr(irq, data);
+ if (unlikely(!trans_pcie->use_ict)) {
+ irqreturn_t ret = iwl_isr(irq, data);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ return ret;
+ }
trace_iwlwifi_dev_irq(trans->dev);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1036,7 +1020,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
inta = (0xff & val) | ((0xff00 & val) << 16);
IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
+ inta, inta_mask, val);
inta &= trans_pcie->inta_mask;
trans_pcie->inta |= inta;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 2e57161..939c2f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -70,15 +70,12 @@
#include "iwl-drv.h"
#include "iwl-trans.h"
-#include "iwl-trans-pcie-int.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
-#include "iwl-eeprom.h"
#include "iwl-agn-hw.h"
+#include "internal.h"
/* FIXME: need to abstract out TX command (once we know what it looks like) */
-#include "iwl-commands.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+#include "dvm/commands.h"
#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
(((1<<trans->cfg->base_params->num_of_queues) - 1) &\
@@ -86,8 +83,7 @@
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
struct device *dev = trans->dev;
@@ -114,7 +110,7 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans)
err_rb_stts:
dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
+ rxq->bd, rxq->bd_dma);
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
rxq->bd = NULL;
err_bd:
@@ -123,8 +119,7 @@ err_bd:
static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
int i;
@@ -134,8 +129,8 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
__free_pages(rxq->pool[i].page,
trans_pcie->rx_page_order);
rxq->pool[i].page = NULL;
@@ -193,8 +188,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
static int iwl_rx_init(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
int i, err;
@@ -236,10 +230,8 @@ static int iwl_rx_init(struct iwl_trans *trans)
static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-
unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
@@ -274,11 +266,11 @@ static int iwl_trans_rx_stop(struct iwl_trans *trans)
/* stop Rx DMA */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}
-static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr, size_t size)
+static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
{
if (WARN_ON(ptr->addr))
return -EINVAL;
@@ -291,8 +283,8 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
return 0;
}
-static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr)
+static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr)
{
if (unlikely(!ptr->addr))
return;
@@ -304,8 +296,13 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
{
struct iwl_tx_queue *txq = (void *)data;
+ struct iwl_queue *q = &txq->q;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ u32 scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
+ u8 buf[16];
+ int i;
spin_lock(&txq->lock);
/* check if triggered erroneously */
@@ -315,26 +312,59 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
}
spin_unlock(&txq->lock);
-
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
jiffies_to_msecs(trans_pcie->wd_timeout));
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
- IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
- & (TFD_QUEUE_SIZE_MAX - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
+
+ iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_read_targ_mem(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
+
+ if (i & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ i, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans,
+ SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
+ }
+
+ for (i = q->read_ptr; i != q->write_ptr;
+ i = iwl_queue_inc_wrap(i, q->n_bd)) {
+ struct iwl_tx_cmd *tx_cmd =
+ (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
+ IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
+ get_unaligned_le32(&tx_cmd->scratch));
+ }
iwl_op_mode_nic_error(trans->op_mode);
}
static int iwl_trans_txq_alloc(struct iwl_trans *trans,
- struct iwl_tx_queue *txq, int slots_num,
- u32 txq_id)
+ struct iwl_tx_queue *txq, int slots_num,
+ u32 txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
int i;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
@@ -435,9 +465,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
- /* The read_ptr needs to bound by q->n_window */
- iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
- dma_dir);
+ iwl_txq_free_tfd(trans, txq, dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
spin_unlock_bh(&txq->lock);
@@ -457,6 +485,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct device *dev = trans->dev;
int i;
+
if (WARN_ON(!txq))
return;
@@ -576,11 +605,11 @@ error:
}
static int iwl_tx_init(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
int txq_id, slots_num;
unsigned long flags;
bool alloc = false;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans_pcie->txq) {
ret = iwl_trans_tx_alloc(trans);
@@ -645,10 +674,9 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int pos;
u16 pci_lnk_ctl;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
struct pci_dev *pci_dev = trans_pcie->pci_dev;
@@ -702,14 +730,14 @@ static int iwl_apm_init(struct iwl_trans *trans)
/* Disable L0S exit timer (platform NMI Work/Around) */
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
* don't wait for ICH L0s (ICH bug W/A)
*/
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
/* Set FH wait threshold to maximum (HW error during stress W/A) */
iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
@@ -719,7 +747,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
* wake device's PCI Express link L1a -> L0s
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
iwl_apm_config(trans);
@@ -740,8 +768,8 @@ static int iwl_apm_init(struct iwl_trans *trans)
* and accesses to uCode SRAM.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
if (ret < 0) {
IWL_DEBUG_INFO(trans, "Failed to init the card\n");
goto out;
@@ -775,8 +803,8 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
ret = iwl_poll_bit(trans, CSR_RESET,
- CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -818,8 +846,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
iwl_apm_init(trans);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING,
- IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
@@ -838,8 +865,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
if (trans->cfg->base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
- 0x800FFFFF);
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
}
return 0;
@@ -853,13 +880,13 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
int ret;
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
/* See if we got it */
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
return ret;
@@ -879,11 +906,11 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
+ CSR_HW_IF_CONFIG_REG_PREPARE);
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
if (ret < 0)
return ret;
@@ -910,32 +937,33 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
trans_pcie->ucode_write_complete = false;
iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
iwl_write_direct32(trans,
- FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+ dst_addr);
iwl_write_direct32(trans,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
iwl_write_direct32(trans,
- FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
- (iwl_get_dma_hi_addr(phy_addr)
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
section_num);
@@ -1018,15 +1046,12 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/*
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under the irq lock and with MAC access
*/
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
struct iwl_trans_pcie __maybe_unused *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- lockdep_assert_held(&trans_pcie->irq_lock);
-
iwl_write_prph(trans, SCD_TXFACT, mask);
}
@@ -1034,11 +1059,12 @@ static void iwl_tx_start(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 a;
- unsigned long flags;
- int i, chan;
+ int chan;
u32 reg_val;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -1060,64 +1086,31 @@ static void iwl_tx_start(struct iwl_trans *trans)
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10);
+ /* The chain extension of the SCD doesn't work well. This feature is
+ * enabled by default by the HW, so we need to disable it manually.
+ */
+ iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
+ iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+ trans_pcie->cmd_fifo);
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
+
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
- iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
- iwl_write_prph(trans, SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
- iwl_write_prph(trans, SCD_INTERRUPT_MASK,
- IWL_MASK(0, trans->cfg->base_params->num_of_queues));
-
- /* Activate all Tx DMA/FIFO channels */
- iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
-
- iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
-
- /* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
-
- for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
- int fifo = trans_pcie->setup_q_to_fifo[i];
-
- set_bit(i, trans_pcie->queue_used);
-
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
- fifo, true);
- }
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
/* Enable L1-Active */
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
@@ -1131,9 +1124,9 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
*/
static int iwl_trans_tx_stop(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ch, txq_id, ret;
unsigned long flags;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* Turn off all Tx DMA fifos */
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1145,18 +1138,19 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
iwl_write_direct32(trans,
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000);
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
if (ret < 0)
- IWL_ERR(trans, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_read_direct32(trans,
- FH_TSSR_TX_STATUS_REG));
+ IWL_ERR(trans,
+ "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+ ch,
+ iwl_read_direct32(trans,
+ FH_TSSR_TX_STATUS_REG));
}
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
if (!trans_pcie->txq) {
- IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
+ IWL_WARN(trans,
+ "Stopping tx queues that aren't allocated...\n");
return 0;
}
@@ -1170,8 +1164,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
- unsigned long flags;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
/* tell the device to stop sending interrupts */
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1201,7 +1195,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_apm_stop(trans);
@@ -1270,13 +1264,27 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUG
+ wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
+ ((wifi_seq & 0xff) != q->write_ptr),
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, q->write_ptr);
+#endif
+
/* Set up driver data for this TFD */
txq->entries[q->write_ptr].skb = skb;
txq->entries[q->write_ptr].cmd = dev_cmd;
dev_cmd->hdr.cmd = REPLY_TX;
- dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[q->write_ptr].meta;
@@ -1341,7 +1349,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* take back ownership of DMA buffer to enable update */
dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
@@ -1353,16 +1361,17 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
trace_iwlwifi_dev_tx(trans->dev,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ &txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, firstlen,
skb->data + hdr_len, secondlen);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
+ if (txq->need_update && q->read_ptr == q->write_ptr &&
+ trans_pcie->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */
@@ -1392,8 +1401,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
bool hw_rfkill;
@@ -1406,7 +1414,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_alloc_isr_ict(trans);
err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
- DRV_NAME, trans);
+ DRV_NAME, trans);
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n",
trans_pcie->irq);
@@ -1419,7 +1427,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
err = iwl_prepare_card_hw(trans);
if (err) {
- IWL_ERR(trans, "Error while preparing HW: %d", err);
+ IWL_ERR(trans, "Error while preparing HW: %d\n", err);
goto err_free_irq;
}
@@ -1444,9 +1452,9 @@ error:
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
bool op_mode_leaving)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
unsigned long flags;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_apm_stop(trans);
@@ -1517,6 +1525,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+ trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
@@ -1525,17 +1534,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
trans_pcie->n_no_reclaim_cmds * sizeof(u8));
- trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
-
- if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
- trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
-
- /* at least the command queue must be mapped */
- WARN_ON(!trans_pcie->n_q_to_fifo);
-
- memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
- trans_pcie->n_q_to_fifo * sizeof(u8));
-
trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
if (trans_pcie->rx_buf_size_8k)
trans_pcie->rx_page_order = get_order(8 * 1024);
@@ -1550,8 +1548,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
void iwl_trans_pcie_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_trans_pcie_tx_free(trans);
#ifndef CONFIG_IWLWIFI_IDI
@@ -1566,6 +1563,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iounmap(trans_pcie->hw_base);
pci_release_regions(trans_pcie->pci_dev);
pci_disable_device(trans_pcie->pci_dev);
+ kmem_cache_destroy(trans->dev_cmd_pool);
kfree(trans);
}
@@ -1813,8 +1811,8 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1850,11 +1848,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
}
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
char buf[256];
int pos = 0;
@@ -1878,11 +1876,10 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
char __user *user_buf,
- size_t count, loff_t *ppos) {
-
+ size_t count, loff_t *ppos)
+{
struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
int pos = 0;
@@ -1940,8 +1937,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
char buf[8];
@@ -1961,8 +1957,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
}
static ssize_t iwl_dbgfs_csr_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
char buf[8];
@@ -1982,8 +1978,8 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
}
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
char *buf;
@@ -2009,7 +2005,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
if (!trans->op_mode)
return -EAGAIN;
+ local_bh_disable();
iwl_op_mode_nic_error(trans->op_mode);
+ local_bh_enable();
return count;
}
@@ -2026,7 +2024,7 @@ DEBUGFS_WRITE_FILE_OPS(fw_restart);
*
*/
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
- struct dentry *dir)
+ struct dentry *dir)
{
DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
@@ -2038,9 +2036,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
- struct dentry *dir)
-{ return 0; }
-
+ struct dentry *dir)
+{
+ return 0;
+}
#endif /*CONFIG_IWLWIFI_DEBUGFS */
static const struct iwl_trans_ops trans_ops_pcie = {
@@ -2057,8 +2056,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.tx = iwl_trans_pcie_tx,
.reclaim = iwl_trans_pcie_reclaim,
- .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
- .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
+ .txq_disable = iwl_trans_pcie_txq_disable,
+ .txq_enable = iwl_trans_pcie_txq_enable,
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
@@ -2085,7 +2084,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
int err;
trans = kzalloc(sizeof(struct iwl_trans) +
- sizeof(struct iwl_trans_pcie), GFP_KERNEL);
+ sizeof(struct iwl_trans_pcie), GFP_KERNEL);
if (WARN_ON(!trans))
return NULL;
@@ -2101,7 +2100,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* W/A - seems to solve weird behavior. We need to remove this if we
* don't want to stay in L1 all the time. This wastes a lot of power */
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ PCIE_LINK_STATE_CLKPM);
if (pci_enable_device(pdev)) {
err = -ENODEV;
@@ -2117,7 +2116,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ DMA_BIT_MASK(32));
/* both attempts failed: */
if (err) {
dev_printk(KERN_ERR, &pdev->dev,
@@ -2128,25 +2127,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
+ dev_printk(KERN_ERR, &pdev->dev,
+ "pci_request_regions failed\n");
goto out_pci_disable_device;
}
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
- dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
+ dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
err = -ENODEV;
goto out_pci_release_regions;
}
dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
+ "pci_resource_len = 0x%08llx\n",
+ (unsigned long long) pci_resource_len(pdev, 0));
dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_base = %p\n", trans_pcie->hw_base);
+ "pci_resource_base = %p\n", trans_pcie->hw_base);
dev_printk(KERN_INFO, &pdev->dev,
- "HW Revision ID = 0x%X\n", pdev->revision);
+ "HW Revision ID = 0x%X\n", pdev->revision);
/* We disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
@@ -2155,7 +2155,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
err = pci_enable_msi(pdev);
if (err)
dev_printk(KERN_ERR, &pdev->dev,
- "pci_enable_msi failed(0X%x)", err);
+ "pci_enable_msi failed(0X%x)\n", err);
trans->dev = &pdev->dev;
trans_pcie->irq = pdev->irq;
@@ -2177,8 +2177,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans->wait_command_queue);
spin_lock_init(&trans->reg_lock);
+ snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+ "iwl_cmd_pool:%s", dev_name(trans->dev));
+
+ trans->dev_cmd_headroom = 0;
+ trans->dev_cmd_pool =
+ kmem_cache_create(trans->dev_cmd_pool_name,
+ sizeof(struct iwl_device_cmd)
+ + trans->dev_cmd_headroom,
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!trans->dev_cmd_pool)
+ goto out_pci_disable_msi;
+
return trans;
+out_pci_disable_msi:
+ pci_disable_msi(pdev);
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
@@ -2187,4 +2204,3 @@ out_no_pci:
kfree(trans);
return NULL;
}
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 21a8a67..6baf8de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -34,11 +34,10 @@
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-io.h"
-#include "iwl-agn-hw.h"
#include "iwl-op-mode.h"
-#include "iwl-trans-pcie-int.h"
+#include "internal.h"
/* FIXME: need to abstract out TX command (once we know what it looks like) */
-#include "iwl-commands.h"
+#include "dvm/commands.h"
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4
@@ -47,12 +46,11 @@
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int write_ptr = txq->q.write_ptr;
int txq_id = txq->q.id;
u8 sec_ctl = 0;
@@ -178,8 +176,8 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
return tfd->num_tbs & 0x1f;
}
-static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
+static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
+ struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
{
int i;
int num_tbs;
@@ -204,33 +202,39 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
for (i = 1; i < num_tbs; i++)
dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
iwl_tfd_tb_get_len(tfd, i), dma_dir);
+
+ tfd->num_tbs = 0;
}
/**
- * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
* @txq - tx queue
- * @index - the index of the TFD to be freed
- *@dma_dir - the direction of the DMA mapping
+ * @dma_dir - the direction of the DMA mapping
*
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- int index, enum dma_data_direction dma_dir)
+void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ enum dma_data_direction dma_dir)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
+ /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
+ int rd_ptr = txq->q.read_ptr;
+ int idx = get_cmd_index(&txq->q, rd_ptr);
+
lockdep_assert_held(&txq->lock);
- iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
- &tfd_tmp[index], dma_dir);
+ /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
+ iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
+ dma_dir);
/* free SKB */
if (txq->entries) {
struct sk_buff *skb;
- skb = txq->entries[index].skb;
+ skb = txq->entries[idx].skb;
/* Can be called from irqs-disabled context
* If skb is not NULL, it means that the whole queue is being
@@ -238,7 +242,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
*/
if (skb) {
iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[index].skb = NULL;
+ txq->entries[idx].skb = NULL;
}
}
}
@@ -264,7 +268,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
/* Each TFD can point to a maximum 20 Tx buffers */
if (num_tbs >= IWL_NUM_OF_TBS) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
+ IWL_NUM_OF_TBS);
return -EINVAL;
}
@@ -273,7 +277,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
if (unlikely(addr & ~IWL_TX_DMA_MASK))
IWL_ERR(trans, "Unaligned address = %llx\n",
- (unsigned long long)addr);
+ (unsigned long long)addr);
iwl_tfd_set_tb(tfd, num_tbs, addr, len);
@@ -376,16 +380,14 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
-static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
- u16 txq_id)
+static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
+ u16 txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 tbl_dw_addr;
u32 tbl_dw;
u16 scd_q2ratid;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
tbl_dw_addr = trans_pcie->scd_base_addr +
@@ -403,7 +405,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
return 0;
}
-static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
+static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -413,102 +415,87 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
-void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
- int txq_id, u32 index)
-{
- IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, bool active)
-{
- int txq_id = txq->q.id;
-
- iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
- (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
- SCD_QUEUE_STTS_REG_MSK);
-
- if (active)
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
- txq_id, tx_fifo_id);
- else
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
-}
-
-void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn)
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- u16 ra_tid = BUILD_RAxTID(sta_id, tid);
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
/* Stop this Tx queue before configuring it */
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
+ iwl_txq_set_inactive(trans, txq_id);
- /* Map receiver-address / traffic-ID to this queue */
- iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
+ /* Set this queue as a chain-building queue unless it is CMD queue */
+ if (txq_id != trans_pcie->cmd_queue)
+ iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
+ /* If this queue is mapped to a certain station: it is an AGG queue */
+ if (sta_id != IWL_INVALID_STATION) {
+ u16 ra_tid = BUILD_RAxTID(sta_id, tid);
- /* enable aggregations for the queue */
- iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ /* Map receiver-address / traffic-ID to this queue */
+ iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
+
+ /* enable aggregations for the queue */
+ iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ } else {
+ /*
+ * disable aggregations for the queue, this will also make the
+ * ra_tid mapping configuration irrelevant since it is now a
+ * non-AGG queue.
+ */
+ iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ }
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
- iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
+
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+ (ssn & 0xff) | (txq_id << 8));
+ iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
+ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
-
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
- fifo, true);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+ (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+ SCD_QUEUE_STTS_REG_MSK);
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
+ txq_id, fifo, ssn & 0xff);
}
-void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 rd_ptr, wr_ptr;
+ int n_bd = trans_pcie->txq[txq_id].q.n_bd;
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id);
return;
}
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
-
- iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
+ wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
- trans_pcie->txq[txq_id].q.read_ptr = 0;
- trans_pcie->txq[txq_id].q.write_ptr = 0;
- iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+ WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
+ txq_id, rd_ptr, wr_ptr);
- iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));
-
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
- 0, false);
+ iwl_txq_set_inactive(trans, txq_id);
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -609,13 +596,13 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
IWL_DEBUG_HC(trans,
- "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
- trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
- out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
- q->write_ptr, idx, trans_pcie->cmd_queue);
+ "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
+ out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+ cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
idx = -ENOMEM;
goto out;
@@ -624,8 +611,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, copy_size);
- iwlagn_txq_attach_buf_to_tfd(trans, txq,
- phys_addr, copy_size, 1);
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
trace_bufs[0] = &out_cmd->hdr;
trace_lens[0] = copy_size;
@@ -637,13 +623,12 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
- phys_addr = dma_map_single(trans->dev,
- (void *)cmd->data[i],
+ phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
cmd->len[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwlagn_unmap_tfd(trans, out_meta,
- &txq->tfds[q->write_ptr],
- DMA_BIDIRECTIONAL);
+ iwl_unmap_tfd(trans, out_meta,
+ &txq->tfds[q->write_ptr],
+ DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
@@ -717,9 +702,10 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
lockdep_assert_held(&txq->lock);
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
- IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
- "index %d is out of range [0-%d] %d %d.\n", __func__,
- txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
return;
}
@@ -727,8 +713,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
if (nfreed++ > 0) {
- IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
- q->write_ptr, q->read_ptr);
+ IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+ idx, q->write_ptr, q->read_ptr);
iwl_op_mode_nic_error(trans->op_mode);
}
@@ -765,9 +751,9 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
* in the queue management code. */
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+ txq_id, trans_pcie->cmd_queue, sequence,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
@@ -778,8 +764,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
- iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
- DMA_BIDIRECTIONAL);
+ iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -864,8 +849,9 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
ret = wait_event_timeout(trans->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status),
- HOST_COMPLETE_TIMEOUT);
+ !test_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status),
+ HOST_COMPLETE_TIMEOUT);
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
struct iwl_tx_queue *txq =
@@ -950,10 +936,10 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
if ((index >= q->n_bd) ||
(iwl_queue_used(q, last_to_free) == 0)) {
- IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
- "last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, q->n_bd,
- q->write_ptr, q->read_ptr);
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free, q->n_bd,
+ q->write_ptr, q->read_ptr);
return 0;
}
@@ -973,7 +959,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
- iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
+ iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
freed++;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
deleted file mode 100644
index 7107ce5..0000000
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-config IWM
- tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
- depends on MMC && EXPERIMENTAL
- depends on CFG80211
- select FW_LOADER
- select IWMC3200TOP
- help
- The Intel Wireless Multicomm 3200 hardware is a combo
- card with GPS, Bluetooth, WiMax and 802.11 radios. It
- runs over SDIO and is typically found on Moorestown
- based platform. This driver takes care of the 802.11
- part, which is a fullmac one.
-
- If you choose to build it as a module, it'll be called
- iwmc3200wifi.ko.
-
-config IWM_DEBUG
- bool "Enable full debugging output in iwmc3200wifi"
- depends on IWM && DEBUG_FS
- help
- This option will enable debug tracing and setting for iwm
-
- You can set the debug level and module through debugfs. By
- default all modules are set to the IWL_DL_ERR level.
- To see the list of debug modules and levels, see iwm/debug.h
-
- For example, if you want the full MLME debug output:
- echo 0xff > /sys/kernel/debug/iwm/phyN/debug/mlme
-
- Or, if you want the full debug, for all modules:
- echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
- echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
-
-config IWM_TRACING
- bool "Enable event tracing for iwmc3200wifi"
- depends on IWM && EVENT_TRACING
- help
- Say Y here to trace all the commands and responses between
- the driver and firmware (including TX/RX frames) with ftrace.
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
deleted file mode 100644
index cdc7e07..0000000
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-obj-$(CONFIG_IWM) := iwmc3200wifi.o
-iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
-iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
-
-iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
-iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
-
-CFLAGS_trace.o := -I$(src)
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
deleted file mode 100644
index 62edd58..0000000
--- a/drivers/net/wireless/iwmc3200wifi/bus.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IWM_BUS_H__
-#define __IWM_BUS_H__
-
-#include "iwm.h"
-
-struct iwm_if_ops {
- int (*enable)(struct iwm_priv *iwm);
- int (*disable)(struct iwm_priv *iwm);
- int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
-
- void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
- void (*debugfs_exit)(struct iwm_priv *iwm);
-
- const char *umac_name;
- const char *calib_lmac_name;
- const char *lmac_name;
-};
-
-static inline int iwm_bus_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
-{
- return iwm->bus_ops->send_chunk(iwm, buf, count);
-}
-
-static inline int iwm_bus_enable(struct iwm_priv *iwm)
-{
- return iwm->bus_ops->enable(iwm);
-}
-
-static inline int iwm_bus_disable(struct iwm_priv *iwm)
-{
- return iwm->bus_ops->disable(iwm);
-}
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
deleted file mode 100644
index 48e8218..0000000
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ /dev/null
@@ -1,882 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/slab.h>
-#include <net/cfg80211.h>
-
-#include "iwm.h"
-#include "commands.h"
-#include "cfg80211.h"
-#include "debug.h"
-
-#define RATETAB_ENT(_rate, _rateid, _flags) \
- { \
- .bitrate = (_rate), \
- .hw_value = (_rateid), \
- .flags = (_flags), \
- }
-
-#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_channel), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = 5000 + (5 * (_channel)), \
- .hw_value = (_channel), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-static struct ieee80211_rate iwm_rates[] = {
- RATETAB_ENT(10, 0x1, 0),
- RATETAB_ENT(20, 0x2, 0),
- RATETAB_ENT(55, 0x4, 0),
- RATETAB_ENT(110, 0x8, 0),
- RATETAB_ENT(60, 0x10, 0),
- RATETAB_ENT(90, 0x20, 0),
- RATETAB_ENT(120, 0x40, 0),
- RATETAB_ENT(180, 0x80, 0),
- RATETAB_ENT(240, 0x100, 0),
- RATETAB_ENT(360, 0x200, 0),
- RATETAB_ENT(480, 0x400, 0),
- RATETAB_ENT(540, 0x800, 0),
-};
-
-#define iwm_a_rates (iwm_rates + 4)
-#define iwm_a_rates_size 8
-#define iwm_g_rates (iwm_rates + 0)
-#define iwm_g_rates_size 12
-
-static struct ieee80211_channel iwm_2ghz_channels[] = {
- CHAN2G(1, 2412, 0),
- CHAN2G(2, 2417, 0),
- CHAN2G(3, 2422, 0),
- CHAN2G(4, 2427, 0),
- CHAN2G(5, 2432, 0),
- CHAN2G(6, 2437, 0),
- CHAN2G(7, 2442, 0),
- CHAN2G(8, 2447, 0),
- CHAN2G(9, 2452, 0),
- CHAN2G(10, 2457, 0),
- CHAN2G(11, 2462, 0),
- CHAN2G(12, 2467, 0),
- CHAN2G(13, 2472, 0),
- CHAN2G(14, 2484, 0),
-};
-
-static struct ieee80211_channel iwm_5ghz_a_channels[] = {
- CHAN5G(34, 0), CHAN5G(36, 0),
- CHAN5G(38, 0), CHAN5G(40, 0),
- CHAN5G(42, 0), CHAN5G(44, 0),
- CHAN5G(46, 0), CHAN5G(48, 0),
- CHAN5G(52, 0), CHAN5G(56, 0),
- CHAN5G(60, 0), CHAN5G(64, 0),
- CHAN5G(100, 0), CHAN5G(104, 0),
- CHAN5G(108, 0), CHAN5G(112, 0),
- CHAN5G(116, 0), CHAN5G(120, 0),
- CHAN5G(124, 0), CHAN5G(128, 0),
- CHAN5G(132, 0), CHAN5G(136, 0),
- CHAN5G(140, 0), CHAN5G(149, 0),
- CHAN5G(153, 0), CHAN5G(157, 0),
- CHAN5G(161, 0), CHAN5G(165, 0),
- CHAN5G(184, 0), CHAN5G(188, 0),
- CHAN5G(192, 0), CHAN5G(196, 0),
- CHAN5G(200, 0), CHAN5G(204, 0),
- CHAN5G(208, 0), CHAN5G(212, 0),
- CHAN5G(216, 0),
-};
-
-static struct ieee80211_supported_band iwm_band_2ghz = {
- .channels = iwm_2ghz_channels,
- .n_channels = ARRAY_SIZE(iwm_2ghz_channels),
- .bitrates = iwm_g_rates,
- .n_bitrates = iwm_g_rates_size,
-};
-
-static struct ieee80211_supported_band iwm_band_5ghz = {
- .channels = iwm_5ghz_a_channels,
- .n_channels = ARRAY_SIZE(iwm_5ghz_a_channels),
- .bitrates = iwm_a_rates,
- .n_bitrates = iwm_a_rates_size,
-};
-
-static int iwm_key_init(struct iwm_key *key, u8 key_index,
- const u8 *mac_addr, struct key_params *params)
-{
- key->hdr.key_idx = key_index;
- if (!mac_addr || is_broadcast_ether_addr(mac_addr)) {
- key->hdr.multicast = 1;
- memset(key->hdr.mac, 0xff, ETH_ALEN);
- } else {
- key->hdr.multicast = 0;
- memcpy(key->hdr.mac, mac_addr, ETH_ALEN);
- }
-
- if (params) {
- if (params->key_len > WLAN_MAX_KEY_LEN ||
- params->seq_len > IW_ENCODE_SEQ_MAX_SIZE)
- return -EINVAL;
-
- key->cipher = params->cipher;
- key->key_len = params->key_len;
- key->seq_len = params->seq_len;
- memcpy(key->key, params->key, key->key_len);
- memcpy(key->seq, params->seq, key->seq_len);
- }
-
- return 0;
-}
-
-static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key;
- int ret;
-
- IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
-
- if (key_index >= IWM_NUM_KEYS)
- return -ENOENT;
-
- key = &iwm->keys[key_index];
- memset(key, 0, sizeof(struct iwm_key));
- ret = iwm_key_init(key, key_index, mac_addr, params);
- if (ret < 0) {
- IWM_ERR(iwm, "Invalid key_params\n");
- return ret;
- }
-
- return iwm_set_key(iwm, 0, key);
-}
-
-static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
- void (*callback)(void *cookie,
- struct key_params*))
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key;
- struct key_params params;
-
- IWM_DBG_WEXT(iwm, DBG, "Getting key %d\n", key_index);
-
- if (key_index >= IWM_NUM_KEYS)
- return -ENOENT;
-
- memset(&params, 0, sizeof(params));
-
- key = &iwm->keys[key_index];
- params.cipher = key->cipher;
- params.key_len = key->key_len;
- params.seq_len = key->seq_len;
- params.seq = key->seq;
- params.key = key->key;
-
- callback(cookie, &params);
-
- return key->key_len ? 0 : -ENOENT;
-}
-
-
-static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key;
-
- if (key_index >= IWM_NUM_KEYS)
- return -ENOENT;
-
- key = &iwm->keys[key_index];
- if (!iwm->keys[key_index].key_len) {
- IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
- return 0;
- }
-
- if (key_index == iwm->default_key)
- iwm->default_key = -1;
-
- return iwm_set_key(iwm, 1, key);
-}
-
-static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev,
- u8 key_index, bool unicast,
- bool multicast)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
-
- IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
-
- if (key_index >= IWM_NUM_KEYS)
- return -ENOENT;
-
- if (!iwm->keys[key_index].key_len) {
- IWM_ERR(iwm, "Key %d not used\n", key_index);
- return -EINVAL;
- }
-
- iwm->default_key = key_index;
-
- return iwm_set_tx_key(iwm, key_index);
-}
-
-static int iwm_cfg80211_get_station(struct wiphy *wiphy,
- struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
-
- if (memcmp(mac, iwm->bssid, ETH_ALEN))
- return -ENOENT;
-
- sinfo->filled |= STATION_INFO_TX_BITRATE;
- sinfo->txrate.legacy = iwm->rate * 10;
-
- if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
- sinfo->filled |= STATION_INFO_SIGNAL;
- sinfo->signal = iwm->wstats.qual.level;
- }
-
- return 0;
-}
-
-
-int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_bss_info *bss;
- struct iwm_umac_notif_bss_info *umac_bss;
- struct ieee80211_mgmt *mgmt;
- struct ieee80211_channel *channel;
- struct ieee80211_supported_band *band;
- s32 signal;
- int freq;
-
- list_for_each_entry(bss, &iwm->bss_list, node) {
- umac_bss = bss->bss;
- mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
-
- if (umac_bss->band == UMAC_BAND_2GHZ)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- else if (umac_bss->band == UMAC_BAND_5GHZ)
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- else {
- IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
- return -EINVAL;
- }
-
- freq = ieee80211_channel_to_frequency(umac_bss->channel,
- band->band);
- channel = ieee80211_get_channel(wiphy, freq);
- signal = umac_bss->rssi * 100;
-
- if (!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
- le16_to_cpu(umac_bss->frame_len),
- signal, GFP_KERNEL))
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iwm_cfg80211_change_iface(struct wiphy *wiphy,
- struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
-{
- struct wireless_dev *wdev;
- struct iwm_priv *iwm;
- u32 old_mode;
-
- wdev = ndev->ieee80211_ptr;
- iwm = ndev_to_iwm(ndev);
- old_mode = iwm->conf.mode;
-
- switch (type) {
- case NL80211_IFTYPE_STATION:
- iwm->conf.mode = UMAC_MODE_BSS;
- break;
- case NL80211_IFTYPE_ADHOC:
- iwm->conf.mode = UMAC_MODE_IBSS;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- wdev->iftype = type;
-
- if ((old_mode == iwm->conf.mode) || !iwm->umac_profile)
- return 0;
-
- iwm->umac_profile->mode = cpu_to_le32(iwm->conf.mode);
-
- if (iwm->umac_profile_active)
- iwm_invalidate_mlme_profile(iwm);
-
- return 0;
-}
-
-static int iwm_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
- int ret;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
- IWM_ERR(iwm, "Scan while device is not ready\n");
- return -EIO;
- }
-
- if (test_bit(IWM_STATUS_SCANNING, &iwm->status)) {
- IWM_ERR(iwm, "Scanning already\n");
- return -EAGAIN;
- }
-
- if (test_bit(IWM_STATUS_SCAN_ABORTING, &iwm->status)) {
- IWM_ERR(iwm, "Scanning being aborted\n");
- return -EAGAIN;
- }
-
- set_bit(IWM_STATUS_SCANNING, &iwm->status);
-
- ret = iwm_scan_ssids(iwm, request->ssids, request->n_ssids);
- if (ret) {
- clear_bit(IWM_STATUS_SCANNING, &iwm->status);
- return ret;
- }
-
- iwm->scan_request = request;
- return 0;
-}
-
-static int iwm_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
- (iwm->conf.rts_threshold != wiphy->rts_threshold)) {
- int ret;
-
- iwm->conf.rts_threshold = wiphy->rts_threshold;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_RTS_THRESHOLD,
- iwm->conf.rts_threshold);
- if (ret < 0)
- return ret;
- }
-
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
- (iwm->conf.frag_threshold != wiphy->frag_threshold)) {
- int ret;
-
- iwm->conf.frag_threshold = wiphy->frag_threshold;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
- CFG_FRAG_THRESHOLD,
- iwm->conf.frag_threshold);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- struct ieee80211_channel *chan = params->channel;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status))
- return -EIO;
-
- /* UMAC doesn't support creating or joining an IBSS network
- * with specified bssid. */
- if (params->bssid)
- return -EOPNOTSUPP;
-
- iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
- iwm->umac_profile->ibss.band = chan->band;
- iwm->umac_profile->ibss.channel = iwm->channel;
- iwm->umac_profile->ssid.ssid_len = params->ssid_len;
- memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
-
- return iwm_send_mlme_profile(iwm);
-}
-
-static int iwm_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- if (iwm->umac_profile_active)
- return iwm_invalidate_mlme_profile(iwm);
-
- return 0;
-}
-
-static int iwm_set_auth_type(struct iwm_priv *iwm,
- enum nl80211_auth_type sme_auth_type)
-{
- u8 *auth_type = &iwm->umac_profile->sec.auth_type;
-
- switch (sme_auth_type) {
- case NL80211_AUTHTYPE_AUTOMATIC:
- case NL80211_AUTHTYPE_OPEN_SYSTEM:
- IWM_DBG_WEXT(iwm, DBG, "OPEN auth\n");
- *auth_type = UMAC_AUTH_TYPE_OPEN;
- break;
- case NL80211_AUTHTYPE_SHARED_KEY:
- if (iwm->umac_profile->sec.flags &
- (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) {
- IWM_DBG_WEXT(iwm, DBG, "WPA auth alg\n");
- *auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
- } else {
- IWM_DBG_WEXT(iwm, DBG, "WEP shared key auth alg\n");
- *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
- }
-
- break;
- default:
- IWM_ERR(iwm, "Unsupported auth alg: 0x%x\n", sme_auth_type);
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
-{
- IWM_DBG_WEXT(iwm, DBG, "wpa_version: %d\n", wpa_version);
-
- if (!wpa_version) {
- iwm->umac_profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
- return 0;
- }
-
- if (wpa_version & NL80211_WPA_VERSION_1)
- iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
-
- if (wpa_version & NL80211_WPA_VERSION_2)
- iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
-
- return 0;
-}
-
-static int iwm_set_cipher(struct iwm_priv *iwm, u32 cipher, bool ucast)
-{
- u8 *profile_cipher = ucast ? &iwm->umac_profile->sec.ucast_cipher :
- &iwm->umac_profile->sec.mcast_cipher;
-
- if (!cipher) {
- *profile_cipher = UMAC_CIPHER_TYPE_NONE;
- return 0;
- }
-
- IWM_DBG_WEXT(iwm, DBG, "%ccast cipher is 0x%x\n", ucast ? 'u' : 'm',
- cipher);
-
- switch (cipher) {
- case IW_AUTH_CIPHER_NONE:
- *profile_cipher = UMAC_CIPHER_TYPE_NONE;
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- *profile_cipher = UMAC_CIPHER_TYPE_WEP_40;
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- *profile_cipher = UMAC_CIPHER_TYPE_WEP_104;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- *profile_cipher = UMAC_CIPHER_TYPE_TKIP;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- *profile_cipher = UMAC_CIPHER_TYPE_CCMP;
- break;
- default:
- IWM_ERR(iwm, "Unsupported cipher: 0x%x\n", cipher);
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static int iwm_set_key_mgt(struct iwm_priv *iwm, u32 key_mgt)
-{
- u8 *auth_type = &iwm->umac_profile->sec.auth_type;
-
- IWM_DBG_WEXT(iwm, DBG, "key_mgt: 0x%x\n", key_mgt);
-
- if (key_mgt == WLAN_AKM_SUITE_8021X)
- *auth_type = UMAC_AUTH_TYPE_8021X;
- else if (key_mgt == WLAN_AKM_SUITE_PSK) {
- if (iwm->umac_profile->sec.flags &
- (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK))
- *auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
- else
- *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
- } else {
- IWM_ERR(iwm, "Invalid key mgt: 0x%x\n", key_mgt);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- struct ieee80211_channel *chan = sme->channel;
- struct key_params key_param;
- int ret;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status))
- return -EIO;
-
- if (!sme->ssid)
- return -EINVAL;
-
- if (iwm->umac_profile_active) {
- ret = iwm_invalidate_mlme_profile(iwm);
- if (ret) {
- IWM_ERR(iwm, "Couldn't invalidate profile\n");
- return ret;
- }
- }
-
- if (chan)
- iwm->channel =
- ieee80211_frequency_to_channel(chan->center_freq);
-
- iwm->umac_profile->ssid.ssid_len = sme->ssid_len;
- memcpy(iwm->umac_profile->ssid.ssid, sme->ssid, sme->ssid_len);
-
- if (sme->bssid) {
- IWM_DBG_WEXT(iwm, DBG, "BSSID: %pM\n", sme->bssid);
- memcpy(&iwm->umac_profile->bssid[0], sme->bssid, ETH_ALEN);
- iwm->umac_profile->bss_num = 1;
- } else {
- memset(&iwm->umac_profile->bssid[0], 0, ETH_ALEN);
- iwm->umac_profile->bss_num = 0;
- }
-
- ret = iwm_set_wpa_version(iwm, sme->crypto.wpa_versions);
- if (ret < 0)
- return ret;
-
- ret = iwm_set_auth_type(iwm, sme->auth_type);
- if (ret < 0)
- return ret;
-
- if (sme->crypto.n_ciphers_pairwise) {
- ret = iwm_set_cipher(iwm, sme->crypto.ciphers_pairwise[0],
- true);
- if (ret < 0)
- return ret;
- }
-
- ret = iwm_set_cipher(iwm, sme->crypto.cipher_group, false);
- if (ret < 0)
- return ret;
-
- if (sme->crypto.n_akm_suites) {
- ret = iwm_set_key_mgt(iwm, sme->crypto.akm_suites[0]);
- if (ret < 0)
- return ret;
- }
-
- /*
- * We save the WEP key in case we want to do shared authentication.
- * We have to do it so because UMAC will assert whenever it gets a
- * key before a profile.
- */
- if (sme->key) {
- key_param.key = kmemdup(sme->key, sme->key_len, GFP_KERNEL);
- if (key_param.key == NULL)
- return -ENOMEM;
- key_param.key_len = sme->key_len;
- key_param.seq_len = 0;
- key_param.cipher = sme->crypto.ciphers_pairwise[0];
-
- ret = iwm_key_init(&iwm->keys[sme->key_idx], sme->key_idx,
- NULL, &key_param);
- kfree(key_param.key);
- if (ret < 0) {
- IWM_ERR(iwm, "Invalid key_params\n");
- return ret;
- }
-
- iwm->default_key = sme->key_idx;
- }
-
- /* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
- if ((iwm->umac_profile->sec.flags &
- (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
- iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
- iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
- }
-
- ret = iwm_send_mlme_profile(iwm);
-
- if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
- sme->key == NULL)
- return ret;
-
- /*
- * We want to do shared auth.
- * We need to actually set the key we previously cached,
- * and then tell the UMAC it's the default one.
- * That will trigger the auth+assoc UMAC machinery, and again,
- * this must be done after setting the profile.
- */
- ret = iwm_set_key(iwm, 0, &iwm->keys[sme->key_idx]);
- if (ret < 0)
- return ret;
-
- return iwm_set_tx_key(iwm, iwm->default_key);
-}
-
-static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- IWM_DBG_WEXT(iwm, DBG, "Active: %d\n", iwm->umac_profile_active);
-
- if (iwm->umac_profile_active)
- iwm_invalidate_mlme_profile(iwm);
-
- return 0;
-}
-
-static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
- enum nl80211_tx_power_setting type, int mbm)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- int ret;
-
- switch (type) {
- case NL80211_TX_POWER_AUTOMATIC:
- return 0;
- case NL80211_TX_POWER_FIXED:
- if (mbm < 0 || (mbm % 100))
- return -EOPNOTSUPP;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status))
- return 0;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_TX_PWR_LIMIT_USR,
- MBM_TO_DBM(mbm) * 2);
- if (ret < 0)
- return ret;
-
- return iwm_tx_power_trigger(iwm);
- default:
- IWM_ERR(iwm, "Unsupported power type: %d\n", type);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- *dbm = iwm->txpower >> 1;
-
- return 0;
-}
-
-static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
- struct net_device *dev,
- bool enabled, int timeout)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- u32 power_index;
-
- if (enabled)
- power_index = IWM_POWER_INDEX_DEFAULT;
- else
- power_index = IWM_POWER_INDEX_MIN;
-
- if (power_index == iwm->conf.power_index)
- return 0;
-
- iwm->conf.power_index = power_index;
-
- return iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_POWER_INDEX, iwm->conf.power_index);
-}
-
-static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
- struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
-}
-
-static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
- struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
-}
-
-static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
- struct net_device *netdev)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- struct cfg80211_pmksa pmksa;
-
- memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
-
- return iwm_send_pmkid_update(iwm, &pmksa, IWM_CMD_PMKID_FLUSH);
-}
-
-
-static struct cfg80211_ops iwm_cfg80211_ops = {
- .change_virtual_intf = iwm_cfg80211_change_iface,
- .add_key = iwm_cfg80211_add_key,
- .get_key = iwm_cfg80211_get_key,
- .del_key = iwm_cfg80211_del_key,
- .set_default_key = iwm_cfg80211_set_default_key,
- .get_station = iwm_cfg80211_get_station,
- .scan = iwm_cfg80211_scan,
- .set_wiphy_params = iwm_cfg80211_set_wiphy_params,
- .connect = iwm_cfg80211_connect,
- .disconnect = iwm_cfg80211_disconnect,
- .join_ibss = iwm_cfg80211_join_ibss,
- .leave_ibss = iwm_cfg80211_leave_ibss,
- .set_tx_power = iwm_cfg80211_set_txpower,
- .get_tx_power = iwm_cfg80211_get_txpower,
- .set_power_mgmt = iwm_cfg80211_set_power_mgmt,
- .set_pmksa = iwm_cfg80211_set_pmksa,
- .del_pmksa = iwm_cfg80211_del_pmksa,
- .flush_pmksa = iwm_cfg80211_flush_pmksa,
-};
-
-static const u32 cipher_suites[] = {
- WLAN_CIPHER_SUITE_WEP40,
- WLAN_CIPHER_SUITE_WEP104,
- WLAN_CIPHER_SUITE_TKIP,
- WLAN_CIPHER_SUITE_CCMP,
-};
-
-struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev)
-{
- int ret = 0;
- struct wireless_dev *wdev;
-
- /*
- * We're trying to have the following memory
- * layout:
- *
- * +-------------------------+
- * | struct wiphy |
- * +-------------------------+
- * | struct iwm_priv |
- * +-------------------------+
- * | bus private data |
- * | (e.g. iwm_priv_sdio) |
- * +-------------------------+
- *
- */
-
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- dev_err(dev, "Couldn't allocate wireless device\n");
- return ERR_PTR(-ENOMEM);
- }
-
- wdev->wiphy = wiphy_new(&iwm_cfg80211_ops,
- sizeof(struct iwm_priv) + sizeof_bus);
- if (!wdev->wiphy) {
- dev_err(dev, "Couldn't allocate wiphy device\n");
- ret = -ENOMEM;
- goto out_err_new;
- }
-
- set_wiphy_dev(wdev->wiphy, dev);
- wdev->wiphy->max_scan_ssids = UMAC_WIFI_IF_PROBE_OPTION_MAX;
- wdev->wiphy->max_num_pmkids = UMAC_MAX_NUM_PMKIDS;
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &iwm_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &iwm_band_5ghz;
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- wdev->wiphy->cipher_suites = cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
-
- ret = wiphy_register(wdev->wiphy);
- if (ret < 0) {
- dev_err(dev, "Couldn't register wiphy device\n");
- goto out_err_register;
- }
-
- return wdev;
-
- out_err_register:
- wiphy_free(wdev->wiphy);
-
- out_err_new:
- kfree(wdev);
-
- return ERR_PTR(ret);
-}
-
-void iwm_wdev_free(struct iwm_priv *iwm)
-{
- struct wireless_dev *wdev = iwm_to_wdev(iwm);
-
- if (!wdev)
- return;
-
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.h b/drivers/net/wireless/iwmc3200wifi/cfg80211.h
deleted file mode 100644
index 56a3414..0000000
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IWM_CFG80211_H__
-#define __IWM_CFG80211_H__
-
-int iwm_cfg80211_inform_bss(struct iwm_priv *iwm);
-struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev);
-void iwm_wdev_free(struct iwm_priv *iwm);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
deleted file mode 100644
index bd75078..0000000
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ /dev/null
@@ -1,1002 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <linux/ieee80211.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/moduleparam.h>
-
-#include "iwm.h"
-#include "bus.h"
-#include "hal.h"
-#include "umac.h"
-#include "commands.h"
-#include "debug.h"
-
-static int iwm_send_lmac_ptrough_cmd(struct iwm_priv *iwm,
- u8 lmac_cmd_id,
- const void *lmac_payload,
- u16 lmac_payload_size,
- u8 resp)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_LMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_lmac_cmd lmac_cmd;
-
- lmac_cmd.id = lmac_cmd_id;
-
- umac_cmd.id = UMAC_CMD_OPCODE_WIFI_PASS_THROUGH;
- umac_cmd.resp = resp;
-
- return iwm_hal_send_host_cmd(iwm, &udma_cmd, &umac_cmd, &lmac_cmd,
- lmac_payload, lmac_payload_size);
-}
-
-int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
- bool resp)
-{
- struct iwm_umac_wifi_if *hdr = (struct iwm_umac_wifi_if *)payload;
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- int ret;
- u8 oid = hdr->oid;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
- IWM_ERR(iwm, "Interface is not ready yet");
- return -EAGAIN;
- }
-
- umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
- umac_cmd.resp = resp;
-
- ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd,
- payload, payload_size);
-
- if (resp) {
- ret = wait_event_interruptible_timeout(iwm->wifi_ntfy_queue,
- test_and_clear_bit(oid, &iwm->wifi_ntfy[0]),
- 3 * HZ);
-
- return ret ? 0 : -EBUSY;
- }
-
- return ret;
-}
-
-static int modparam_wiwi = COEX_MODE_CM;
-module_param_named(wiwi, modparam_wiwi, int, 0644);
-MODULE_PARM_DESC(wiwi, "Wifi-WiMAX coexistence: 1=SA, 2=XOR, 3=CM (default)");
-
-static struct coex_event iwm_sta_xor_prio_tbl[COEX_EVENTS_NUM] =
-{
- {4, 3, 0, COEX_UNASSOC_IDLE_FLAGS},
- {4, 3, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
- {4, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
- {4, 3, 0, COEX_CALIBRATION_FLAGS},
- {4, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
- {4, 3, 0, COEX_CONNECTION_ESTAB_FLAGS},
- {4, 3, 0, COEX_ASSOCIATED_IDLE_FLAGS},
- {4, 3, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
- {4, 3, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
- {4, 3, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
- {6, 3, 0, COEX_XOR_RF_ON_FLAGS},
- {4, 3, 0, COEX_RF_OFF_FLAGS},
- {6, 6, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
- {4, 3, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
- {4, 3, 0, COEX_RSRVD1_FLAGS},
- {4, 3, 0, COEX_RSRVD2_FLAGS}
-};
-
-static struct coex_event iwm_sta_cm_prio_tbl[COEX_EVENTS_NUM] =
-{
- {1, 1, 0, COEX_UNASSOC_IDLE_FLAGS},
- {4, 4, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
- {3, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
- {6, 6, 0, COEX_CALIBRATION_FLAGS},
- {3, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
- {6, 5, 0, COEX_CONNECTION_ESTAB_FLAGS},
- {4, 4, 0, COEX_ASSOCIATED_IDLE_FLAGS},
- {4, 4, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
- {4, 4, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
- {4, 4, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
- {1, 1, 0, COEX_RF_ON_FLAGS},
- {1, 1, 0, COEX_RF_OFF_FLAGS},
- {7, 7, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
- {5, 4, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
- {1, 1, 0, COEX_RSRVD1_FLAGS},
- {1, 1, 0, COEX_RSRVD2_FLAGS}
-};
-
-int iwm_send_prio_table(struct iwm_priv *iwm)
-{
- struct iwm_coex_prio_table_cmd coex_table_cmd;
- u32 coex_enabled, mode_enabled;
-
- memset(&coex_table_cmd, 0, sizeof(struct iwm_coex_prio_table_cmd));
-
- coex_table_cmd.flags = COEX_FLAGS_STA_TABLE_VALID_MSK;
-
- switch (modparam_wiwi) {
- case COEX_MODE_XOR:
- case COEX_MODE_CM:
- coex_enabled = 1;
- break;
- default:
- coex_enabled = 0;
- break;
- }
-
- switch (iwm->conf.mode) {
- case UMAC_MODE_BSS:
- case UMAC_MODE_IBSS:
- mode_enabled = 1;
- break;
- default:
- mode_enabled = 0;
- break;
- }
-
- if (coex_enabled && mode_enabled) {
- coex_table_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK |
- COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK |
- COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK;
-
- switch (modparam_wiwi) {
- case COEX_MODE_XOR:
- memcpy(coex_table_cmd.sta_prio, iwm_sta_xor_prio_tbl,
- sizeof(iwm_sta_xor_prio_tbl));
- break;
- case COEX_MODE_CM:
- memcpy(coex_table_cmd.sta_prio, iwm_sta_cm_prio_tbl,
- sizeof(iwm_sta_cm_prio_tbl));
- break;
- default:
- IWM_ERR(iwm, "Invalid coex_mode 0x%x\n",
- modparam_wiwi);
- break;
- }
- } else
- IWM_WARN(iwm, "coexistense disabled\n");
-
- return iwm_send_lmac_ptrough_cmd(iwm, COEX_PRIORITY_TABLE_CMD,
- &coex_table_cmd,
- sizeof(struct iwm_coex_prio_table_cmd), 0);
-}
-
-int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
-{
- struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd;
-
- memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd));
-
- cal_cfg_cmd.ucode_cfg.init.enable = cpu_to_le32(calib_requested);
- cal_cfg_cmd.ucode_cfg.init.start = cpu_to_le32(calib_requested);
- cal_cfg_cmd.ucode_cfg.init.send_res = cpu_to_le32(calib_requested);
- cal_cfg_cmd.ucode_cfg.flags =
- cpu_to_le32(CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK);
-
- return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd,
- sizeof(struct iwm_lmac_cal_cfg_cmd), 1);
-}
-
-int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
-{
- struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd;
-
- memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd));
-
- cal_cfg_cmd.ucode_cfg.periodic.enable = cpu_to_le32(calib_requested);
- cal_cfg_cmd.ucode_cfg.periodic.start = cpu_to_le32(calib_requested);
-
- return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd,
- sizeof(struct iwm_lmac_cal_cfg_cmd), 0);
-}
-
-int iwm_store_rxiq_calib_result(struct iwm_priv *iwm)
-{
- struct iwm_calib_rxiq *rxiq;
- u8 *eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
- int grplen = sizeof(struct iwm_calib_rxiq_group);
-
- rxiq = kzalloc(sizeof(struct iwm_calib_rxiq), GFP_KERNEL);
- if (!rxiq) {
- IWM_ERR(iwm, "Couldn't alloc memory for RX IQ\n");
- return -ENOMEM;
- }
-
- eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
- if (IS_ERR(eeprom_rxiq)) {
- IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n");
- kfree(rxiq);
- return PTR_ERR(eeprom_rxiq);
- }
-
- iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].buf = (u8 *)rxiq;
- iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].size = sizeof(*rxiq);
-
- rxiq->hdr.opcode = SHILOH_PHY_CALIBRATE_RX_IQ_CMD;
- rxiq->hdr.first_grp = 0;
- rxiq->hdr.grp_num = 1;
- rxiq->hdr.all_data_valid = 1;
-
- memcpy(&rxiq->group[0], eeprom_rxiq, 4 * grplen);
- memcpy(&rxiq->group[4], eeprom_rxiq + 6 * grplen, grplen);
-
- return 0;
-}
-
-int iwm_send_calib_results(struct iwm_priv *iwm)
-{
- int i, ret = 0;
-
- for (i = PHY_CALIBRATE_OPCODES_NUM; i < CALIBRATION_CMD_NUM; i++) {
- if (test_bit(i - PHY_CALIBRATE_OPCODES_NUM,
- &iwm->calib_done_map)) {
- IWM_DBG_CMD(iwm, DBG,
- "Send calibration %d result\n", i);
- ret |= iwm_send_lmac_ptrough_cmd(iwm,
- REPLY_PHY_CALIBRATION_CMD,
- iwm->calib_res[i].buf,
- iwm->calib_res[i].size, 0);
-
- kfree(iwm->calib_res[i].buf);
- iwm->calib_res[i].buf = NULL;
- iwm->calib_res[i].size = 0;
- }
- }
-
- return ret;
-}
-
-int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit)
-{
- struct iwm_ct_kill_cfg_cmd cmd;
-
- cmd.entry_threshold = entry;
- cmd.exit_threshold = exit;
-
- return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd,
- sizeof(struct iwm_ct_kill_cfg_cmd), 0);
-}
-
-int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_reset reset;
-
- reset.flags = reset_flags;
-
- umac_cmd.id = UMAC_CMD_OPCODE_RESET;
- umac_cmd.resp = resp;
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &reset,
- sizeof(struct iwm_umac_cmd_reset));
-}
-
-int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_set_param_fix param;
-
- if ((tbl != UMAC_PARAM_TBL_CFG_FIX) &&
- (tbl != UMAC_PARAM_TBL_FA_CFG_FIX))
- return -EINVAL;
-
- umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_FIX;
- umac_cmd.resp = 0;
-
- param.tbl = cpu_to_le16(tbl);
- param.key = cpu_to_le16(key);
- param.value = cpu_to_le32(value);
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &param,
- sizeof(struct iwm_umac_cmd_set_param_fix));
-}
-
-int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
- void *payload, u16 payload_size)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_set_param_var *param_hdr;
- u8 *param;
- int ret;
-
- param = kzalloc(payload_size +
- sizeof(struct iwm_umac_cmd_set_param_var), GFP_KERNEL);
- if (!param) {
- IWM_ERR(iwm, "Couldn't allocate param\n");
- return -ENOMEM;
- }
-
- param_hdr = (struct iwm_umac_cmd_set_param_var *)param;
-
- umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_VAR;
- umac_cmd.resp = 0;
-
- param_hdr->tbl = cpu_to_le16(UMAC_PARAM_TBL_CFG_VAR);
- param_hdr->key = cpu_to_le16(key);
- param_hdr->len = cpu_to_le16(payload_size);
- memcpy(param + sizeof(struct iwm_umac_cmd_set_param_var),
- payload, payload_size);
-
- ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, param,
- sizeof(struct iwm_umac_cmd_set_param_var) +
- payload_size);
- kfree(param);
-
- return ret;
-}
-
-int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags)
-{
- int ret;
-
- /* Use UMAC default values */
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_POWER_INDEX, iwm->conf.power_index);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
- CFG_FRAG_THRESHOLD,
- iwm->conf.frag_threshold);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_RTS_THRESHOLD,
- iwm->conf.rts_threshold);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_CTS_TO_SELF, iwm->conf.cts_to_self);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_WIRELESS_MODE,
- iwm->conf.wireless_mode);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_COEX_MODE, modparam_wiwi);
- if (ret < 0)
- return ret;
-
- /*
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_ASSOCIATION_TIMEOUT,
- iwm->conf.assoc_timeout);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_ROAM_TIMEOUT,
- iwm->conf.roam_timeout);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_WIRELESS_MODE,
- WIRELESS_MODE_11A | WIRELESS_MODE_11G);
- if (ret < 0)
- return ret;
- */
-
- ret = iwm_umac_set_config_var(iwm, CFG_NET_ADDR,
- iwm_to_ndev(iwm)->dev_addr, ETH_ALEN);
- if (ret < 0)
- return ret;
-
- /* UMAC PM static configurations */
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_PM_LEGACY_RX_TIMEOUT, 0x12C);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_PM_LEGACY_TX_TIMEOUT, 0x15E);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_PM_CTRL_FLAGS, 0x1);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_PM_KEEP_ALIVE_IN_BEACONS, 0x80);
- if (ret < 0)
- return ret;
-
- /* reset UMAC */
- ret = iwm_send_umac_reset(iwm, reset_flags, 1);
- if (ret < 0)
- return ret;
-
- ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
- WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
- return ret;
- }
-
- return ret;
-}
-
-int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id)
-{
- struct iwm_udma_wifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
-
- udma_cmd.eop = 1; /* always set eop for non-concatenated Tx */
- udma_cmd.credit_group = pool_id;
- udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
- udma_cmd.lmac_offset = 0;
-
- umac_cmd.id = REPLY_TX;
- umac_cmd.color = tx_info->color;
- umac_cmd.resp = 0;
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd,
- skb->data, skb->len);
-}
-
-static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
- u8 *response, u32 resp_size)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
- struct iwm_nonwifi_cmd *cmd;
- u16 seq_num;
- int ret = 0;
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ;
- target_cmd.addr = address;
- target_cmd.op1_sz = cpu_to_le32(resp_size);
- target_cmd.op2 = 0;
- target_cmd.handle_by_hw = 0;
- target_cmd.resp = 1;
- target_cmd.eop = 1;
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't send READ command\n");
- return ret;
- }
-
- /* When succeeding, the send_target routine returns the seq number */
- seq_num = ret;
-
- ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
- (cmd = iwm_get_pending_nonwifi_cmd(iwm, seq_num,
- UMAC_HDI_OUT_OPCODE_READ)) != NULL,
- 2 * HZ);
-
- if (!ret) {
- IWM_ERR(iwm, "Didn't receive a target READ answer\n");
- return ret;
- }
-
- memcpy(response, cmd->buf.hdr + sizeof(struct iwm_udma_in_hdr),
- resp_size);
-
- kfree(cmd);
-
- return 0;
-}
-
-int iwm_read_mac(struct iwm_priv *iwm, u8 *mac)
-{
- int ret;
- u8 mac_align[ALIGN(ETH_ALEN, 8)];
-
- ret = iwm_target_read(iwm, cpu_to_le32(WICO_MAC_ADDRESS_ADDR),
- mac_align, sizeof(mac_align));
- if (ret)
- return ret;
-
- if (is_valid_ether_addr(mac_align))
- memcpy(mac, mac_align, ETH_ALEN);
- else {
- IWM_ERR(iwm, "Invalid EEPROM MAC\n");
- memcpy(mac, iwm->conf.mac_addr, ETH_ALEN);
- get_random_bytes(&mac[3], 3);
- }
-
- return 0;
-}
-
-static int iwm_check_profile(struct iwm_priv *iwm)
-{
- if (!iwm->umac_profile_active)
- return -EAGAIN;
-
- if (iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
- iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104 &&
- iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_TKIP &&
- iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_CCMP) {
- IWM_ERR(iwm, "Wrong unicast cipher: 0x%x\n",
- iwm->umac_profile->sec.ucast_cipher);
- return -EAGAIN;
- }
-
- if (iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
- iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_104 &&
- iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_TKIP &&
- iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_CCMP) {
- IWM_ERR(iwm, "Wrong multicast cipher: 0x%x\n",
- iwm->umac_profile->sec.mcast_cipher);
- return -EAGAIN;
- }
-
- if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
- iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
- (iwm->umac_profile->sec.ucast_cipher !=
- iwm->umac_profile->sec.mcast_cipher)) {
- IWM_ERR(iwm, "Unicast and multicast ciphers differ for WEP\n");
- }
-
- return 0;
-}
-
-int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx)
-{
- struct iwm_umac_tx_key_id tx_key_id;
- int ret;
-
- ret = iwm_check_profile(iwm);
- if (ret < 0)
- return ret;
-
- /* UMAC only allows to set default key for WEP and auth type is
- * NOT 802.1X or RSNA. */
- if ((iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
- iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104) ||
- iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_8021X ||
- iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_RSNA_PSK)
- return 0;
-
- tx_key_id.hdr.oid = UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID;
- tx_key_id.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_tx_key_id) -
- sizeof(struct iwm_umac_wifi_if));
-
- tx_key_id.key_idx = key_idx;
-
- return iwm_send_wifi_if_cmd(iwm, &tx_key_id, sizeof(tx_key_id), 1);
-}
-
-int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key)
-{
- int ret = 0;
- u8 cmd[64], *sta_addr, *key_data, key_len;
- s8 key_idx;
- u16 cmd_size = 0;
- struct iwm_umac_key_hdr *key_hdr = &key->hdr;
- struct iwm_umac_key_wep40 *wep40 = (struct iwm_umac_key_wep40 *)cmd;
- struct iwm_umac_key_wep104 *wep104 = (struct iwm_umac_key_wep104 *)cmd;
- struct iwm_umac_key_tkip *tkip = (struct iwm_umac_key_tkip *)cmd;
- struct iwm_umac_key_ccmp *ccmp = (struct iwm_umac_key_ccmp *)cmd;
-
- if (!remove) {
- ret = iwm_check_profile(iwm);
- if (ret < 0)
- return ret;
- }
-
- sta_addr = key->hdr.mac;
- key_data = key->key;
- key_len = key->key_len;
- key_idx = key->hdr.key_idx;
-
- if (!remove) {
- u8 auth_type = iwm->umac_profile->sec.auth_type;
-
- IWM_DBG_WEXT(iwm, DBG, "key_idx:%d\n", key_idx);
- IWM_DBG_WEXT(iwm, DBG, "key_len:%d\n", key_len);
- IWM_DBG_WEXT(iwm, DBG, "MAC:%pM, idx:%d, multicast:%d\n",
- key_hdr->mac, key_hdr->key_idx, key_hdr->multicast);
-
- IWM_DBG_WEXT(iwm, DBG, "profile: mcast:0x%x, ucast:0x%x\n",
- iwm->umac_profile->sec.mcast_cipher,
- iwm->umac_profile->sec.ucast_cipher);
- IWM_DBG_WEXT(iwm, DBG, "profile: auth_type:0x%x, flags:0x%x\n",
- iwm->umac_profile->sec.auth_type,
- iwm->umac_profile->sec.flags);
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- wep40->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP40_KEY;
- wep40->hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_wep40) -
- sizeof(struct iwm_umac_wifi_if));
-
- memcpy(&wep40->key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
- memcpy(wep40->key, key_data, key_len);
- wep40->static_key =
- !!((auth_type != UMAC_AUTH_TYPE_8021X) &&
- (auth_type != UMAC_AUTH_TYPE_RSNA_PSK));
-
- cmd_size = sizeof(struct iwm_umac_key_wep40);
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- wep104->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP104_KEY;
- wep104->hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_wep104) -
- sizeof(struct iwm_umac_wifi_if));
-
- memcpy(&wep104->key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
- memcpy(wep104->key, key_data, key_len);
- wep104->static_key =
- !!((auth_type != UMAC_AUTH_TYPE_8021X) &&
- (auth_type != UMAC_AUTH_TYPE_RSNA_PSK));
-
- cmd_size = sizeof(struct iwm_umac_key_wep104);
- break;
-
- case WLAN_CIPHER_SUITE_CCMP:
- key_hdr->key_idx++;
- ccmp->hdr.oid = UMAC_WIFI_IF_CMD_ADD_CCMP_KEY;
- ccmp->hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_ccmp) -
- sizeof(struct iwm_umac_wifi_if));
-
- memcpy(&ccmp->key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
-
- memcpy(ccmp->key, key_data, key_len);
-
- if (key->seq_len)
- memcpy(ccmp->iv_count, key->seq, key->seq_len);
-
- cmd_size = sizeof(struct iwm_umac_key_ccmp);
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- key_hdr->key_idx++;
- tkip->hdr.oid = UMAC_WIFI_IF_CMD_ADD_TKIP_KEY;
- tkip->hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_tkip) -
- sizeof(struct iwm_umac_wifi_if));
-
- memcpy(&tkip->key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
-
- memcpy(tkip->tkip_key, key_data, IWM_TKIP_KEY_SIZE);
- memcpy(tkip->mic_tx_key, key_data + IWM_TKIP_KEY_SIZE,
- IWM_TKIP_MIC_SIZE);
- memcpy(tkip->mic_rx_key,
- key_data + IWM_TKIP_KEY_SIZE + IWM_TKIP_MIC_SIZE,
- IWM_TKIP_MIC_SIZE);
-
- if (key->seq_len)
- memcpy(ccmp->iv_count, key->seq, key->seq_len);
-
- cmd_size = sizeof(struct iwm_umac_key_tkip);
- break;
-
- default:
- return -ENOTSUPP;
- }
-
- if ((key->cipher == WLAN_CIPHER_SUITE_TKIP) ||
- (key->cipher == WLAN_CIPHER_SUITE_CCMP))
- /*
- * UGLY_UGLY_UGLY
- * Copied HACK from the MWG driver.
- * Without it, the key is set before the second
- * EAPOL frame is sent, and the latter is thus
- * encrypted.
- */
- schedule_timeout_interruptible(usecs_to_jiffies(300));
-
- ret = iwm_send_wifi_if_cmd(iwm, cmd, cmd_size, 1);
- } else {
- struct iwm_umac_key_remove key_remove;
-
- IWM_DBG_WEXT(iwm, ERR, "Removing key_idx:%d\n", key_idx);
-
- key_remove.hdr.oid = UMAC_WIFI_IF_CMD_REMOVE_KEY;
- key_remove.hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_remove) -
- sizeof(struct iwm_umac_wifi_if));
- memcpy(&key_remove.key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
-
- ret = iwm_send_wifi_if_cmd(iwm, &key_remove,
- sizeof(struct iwm_umac_key_remove),
- 1);
- if (ret)
- return ret;
-
- iwm->keys[key_idx].key_len = 0;
- }
-
- return ret;
-}
-
-
-int iwm_send_mlme_profile(struct iwm_priv *iwm)
-{
- int ret;
- struct iwm_umac_profile profile;
-
- memcpy(&profile, iwm->umac_profile, sizeof(profile));
-
- profile.hdr.oid = UMAC_WIFI_IF_CMD_SET_PROFILE;
- profile.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_profile) -
- sizeof(struct iwm_umac_wifi_if));
-
- ret = iwm_send_wifi_if_cmd(iwm, &profile, sizeof(profile), 1);
- if (ret) {
- IWM_ERR(iwm, "Send profile command failed\n");
- return ret;
- }
-
- set_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
- return 0;
-}
-
-int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
-{
- struct iwm_umac_invalidate_profile invalid;
-
- invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
- invalid.hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_invalidate_profile) -
- sizeof(struct iwm_umac_wifi_if));
-
- invalid.reason = WLAN_REASON_UNSPECIFIED;
-
- return iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
-}
-
-int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
-{
- int ret;
-
- ret = __iwm_invalidate_mlme_profile(iwm);
- if (ret)
- return ret;
-
- ret = wait_event_interruptible_timeout(iwm->mlme_queue,
- (iwm->umac_profile_active == 0), 5 * HZ);
-
- return ret ? 0 : -EBUSY;
-}
-
-int iwm_tx_power_trigger(struct iwm_priv *iwm)
-{
- struct iwm_umac_pwr_trigger pwr_trigger;
-
- pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER;
- pwr_trigger.hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) -
- sizeof(struct iwm_umac_wifi_if));
-
-
- return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1);
-}
-
-int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_stats_req stats_req;
-
- stats_req.flags = cpu_to_le32(flags);
-
- umac_cmd.id = UMAC_CMD_OPCODE_STATISTIC_REQUEST;
- umac_cmd.resp = 0;
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stats_req,
- sizeof(struct iwm_umac_cmd_stats_req));
-}
-
-int iwm_send_umac_channel_list(struct iwm_priv *iwm)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_get_channel_list *ch_list;
- int size = sizeof(struct iwm_umac_cmd_get_channel_list) +
- sizeof(struct iwm_umac_channel_info) * 4;
- int ret;
-
- ch_list = kzalloc(size, GFP_KERNEL);
- if (!ch_list) {
- IWM_ERR(iwm, "Couldn't allocate channel list cmd\n");
- return -ENOMEM;
- }
-
- ch_list->ch[0].band = UMAC_BAND_2GHZ;
- ch_list->ch[0].type = UMAC_CHANNEL_WIDTH_20MHZ;
- ch_list->ch[0].flags = UMAC_CHANNEL_FLAG_VALID;
-
- ch_list->ch[1].band = UMAC_BAND_5GHZ;
- ch_list->ch[1].type = UMAC_CHANNEL_WIDTH_20MHZ;
- ch_list->ch[1].flags = UMAC_CHANNEL_FLAG_VALID;
-
- ch_list->ch[2].band = UMAC_BAND_2GHZ;
- ch_list->ch[2].type = UMAC_CHANNEL_WIDTH_20MHZ;
- ch_list->ch[2].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS;
-
- ch_list->ch[3].band = UMAC_BAND_5GHZ;
- ch_list->ch[3].type = UMAC_CHANNEL_WIDTH_20MHZ;
- ch_list->ch[3].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS;
-
- ch_list->count = cpu_to_le16(4);
-
- umac_cmd.id = UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST;
- umac_cmd.resp = 1;
-
- ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, ch_list, size);
-
- kfree(ch_list);
-
- return ret;
-}
-
-int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
- int ssid_num)
-{
- struct iwm_umac_cmd_scan_request req;
- int i, ret;
-
- memset(&req, 0, sizeof(struct iwm_umac_cmd_scan_request));
-
- req.hdr.oid = UMAC_WIFI_IF_CMD_SCAN_REQUEST;
- req.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_cmd_scan_request)
- - sizeof(struct iwm_umac_wifi_if));
- req.type = UMAC_WIFI_IF_SCAN_TYPE_USER;
- req.timeout = 2;
- req.seq_num = iwm->scan_id;
- req.ssid_num = min(ssid_num, UMAC_WIFI_IF_PROBE_OPTION_MAX);
-
- for (i = 0; i < req.ssid_num; i++) {
- memcpy(req.ssids[i].ssid, ssids[i].ssid, ssids[i].ssid_len);
- req.ssids[i].ssid_len = ssids[i].ssid_len;
- }
-
- ret = iwm_send_wifi_if_cmd(iwm, &req, sizeof(req), 0);
- if (ret) {
- IWM_ERR(iwm, "Couldn't send scan request\n");
- return ret;
- }
-
- iwm->scan_id = (iwm->scan_id + 1) % IWM_SCAN_ID_MAX;
-
- return 0;
-}
-
-int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len)
-{
- struct cfg80211_ssid one_ssid;
-
- if (test_and_set_bit(IWM_STATUS_SCANNING, &iwm->status))
- return 0;
-
- one_ssid.ssid_len = min(ssid_len, IEEE80211_MAX_SSID_LEN);
- memcpy(&one_ssid.ssid, ssid, one_ssid.ssid_len);
-
- return iwm_scan_ssids(iwm, &one_ssid, 1);
-}
-
-int iwm_target_reset(struct iwm_priv *iwm)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_REBOOT;
- target_cmd.addr = 0;
- target_cmd.op1_sz = 0;
- target_cmd.op2 = 0;
- target_cmd.handle_by_hw = 0;
- target_cmd.resp = 0;
- target_cmd.eop = 1;
-
- return iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
-}
-
-int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
- struct iwm_umac_notif_stop_resume_tx *ntf)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_stop_resume_tx stp_res_cmd;
- struct iwm_sta_info *sta_info;
- u8 sta_id = STA_ID_N_COLOR_ID(ntf->sta_id);
- int i;
-
- sta_info = &iwm->sta_table[sta_id];
- if (!sta_info->valid) {
- IWM_ERR(iwm, "Invalid STA: %d\n", sta_id);
- return -EINVAL;
- }
-
- umac_cmd.id = UMAC_CMD_OPCODE_STOP_RESUME_STA_TX;
- umac_cmd.resp = 0;
-
- stp_res_cmd.flags = ntf->flags;
- stp_res_cmd.sta_id = ntf->sta_id;
- stp_res_cmd.stop_resume_tid_msk = ntf->stop_resume_tid_msk;
- for (i = 0; i < IWM_UMAC_TID_NR; i++)
- stp_res_cmd.last_seq_num[i] =
- sta_info->tid_info[i].last_seq_num;
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stp_res_cmd,
- sizeof(struct iwm_umac_cmd_stop_resume_tx));
-
-}
-
-int iwm_send_pmkid_update(struct iwm_priv *iwm,
- struct cfg80211_pmksa *pmksa, u32 command)
-{
- struct iwm_umac_pmkid_update update;
- int ret;
-
- memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
-
- update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
- update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
- sizeof(struct iwm_umac_wifi_if));
-
- update.command = cpu_to_le32(command);
- if (pmksa->bssid)
- memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
- if (pmksa->pmkid)
- memcpy(&update.pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
-
- ret = iwm_send_wifi_if_cmd(iwm, &update,
- sizeof(struct iwm_umac_pmkid_update), 0);
- if (ret) {
- IWM_ERR(iwm, "PMKID update command failed\n");
- return ret;
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
deleted file mode 100644
index 6421689..0000000
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_COMMANDS_H__
-#define __IWM_COMMANDS_H__
-
-#include <linux/ieee80211.h>
-
-#define IWM_BARKER_REBOOT_NOTIFICATION 0xF
-#define IWM_ACK_BARKER_NOTIFICATION 0x10
-
-/* UMAC commands */
-#define UMAC_RST_CTRL_FLG_LARC_CLK_EN 0x0001
-#define UMAC_RST_CTRL_FLG_LARC_RESET 0x0002
-#define UMAC_RST_CTRL_FLG_FUNC_RESET 0x0004
-#define UMAC_RST_CTRL_FLG_DEV_RESET 0x0008
-#define UMAC_RST_CTRL_FLG_WIFI_CORE_EN 0x0010
-#define UMAC_RST_CTRL_FLG_WIFI_LINK_EN 0x0040
-#define UMAC_RST_CTRL_FLG_WIFI_MLME_EN 0x0080
-#define UMAC_RST_CTRL_FLG_NVM_RELOAD 0x0100
-
-struct iwm_umac_cmd_reset {
- __le32 flags;
-} __packed;
-
-#define UMAC_PARAM_TBL_ORD_FIX 0x0
-#define UMAC_PARAM_TBL_ORD_VAR 0x1
-#define UMAC_PARAM_TBL_CFG_FIX 0x2
-#define UMAC_PARAM_TBL_CFG_VAR 0x3
-#define UMAC_PARAM_TBL_BSS_TRK 0x4
-#define UMAC_PARAM_TBL_FA_CFG_FIX 0x5
-#define UMAC_PARAM_TBL_STA 0x6
-#define UMAC_PARAM_TBL_CHN 0x7
-#define UMAC_PARAM_TBL_STATISTICS 0x8
-
-/* fast access table */
-enum {
- CFG_FRAG_THRESHOLD = 0,
- CFG_FRAME_RETRY_LIMIT,
- CFG_OS_QUEUE_UTIL_TH,
- CFG_RX_FILTER,
- /* <-- LAST --> */
- FAST_ACCESS_CFG_TBL_FIX_LAST
-};
-
-/* fixed size table */
-enum {
- CFG_POWER_INDEX = 0,
- CFG_PM_LEGACY_RX_TIMEOUT,
- CFG_PM_LEGACY_TX_TIMEOUT,
- CFG_PM_CTRL_FLAGS,
- CFG_PM_KEEP_ALIVE_IN_BEACONS,
- CFG_BT_ON_THRESHOLD,
- CFG_RTS_THRESHOLD,
- CFG_CTS_TO_SELF,
- CFG_COEX_MODE,
- CFG_WIRELESS_MODE,
- CFG_ASSOCIATION_TIMEOUT,
- CFG_ROAM_TIMEOUT,
- CFG_CAPABILITY_SUPPORTED_RATES,
- CFG_SCAN_ALLOWED_UNASSOC_FLAGS,
- CFG_SCAN_ALLOWED_MAIN_ASSOC_FLAGS,
- CFG_SCAN_ALLOWED_PAN_ASSOC_FLAGS,
- CFG_SCAN_INTERNAL_PERIODIC_ENABLED,
- CFG_SCAN_IMM_INTERNAL_PERIODIC_SCAN_ON_INIT,
- CFG_SCAN_DEFAULT_PERIODIC_FREQ_SEC,
- CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
- CFG_TLC_SUPPORTED_TX_HT_RATES,
- CFG_TLC_SUPPORTED_TX_RATES,
- CFG_TLC_SPATIAL_STREAM_SUPPORTED,
- CFG_TLC_RETRY_PER_RATE,
- CFG_TLC_RETRY_PER_HT_RATE,
- CFG_TLC_FIXED_MCS,
- CFG_TLC_CONTROL_FLAGS,
- CFG_TLC_SR_MIN_FAIL,
- CFG_TLC_SR_MIN_PASS,
- CFG_TLC_HT_STAY_IN_COL_PASS_THRESH,
- CFG_TLC_HT_STAY_IN_COL_FAIL_THRESH,
- CFG_TLC_LEGACY_STAY_IN_COL_PASS_THRESH,
- CFG_TLC_LEGACY_STAY_IN_COL_FAIL_THRESH,
- CFG_TLC_HT_FLUSH_STATS_PACKETS,
- CFG_TLC_LEGACY_FLUSH_STATS_PACKETS,
- CFG_TLC_LEGACY_FLUSH_STATS_MS,
- CFG_TLC_HT_FLUSH_STATS_MS,
- CFG_TLC_STAY_IN_COL_TIME_OUT,
- CFG_TLC_AGG_SHORT_LIM,
- CFG_TLC_AGG_LONG_LIM,
- CFG_TLC_HT_SR_NO_DECREASE,
- CFG_TLC_LEGACY_SR_NO_DECREASE,
- CFG_TLC_SR_FORCE_DECREASE,
- CFG_TLC_SR_ALLOW_INCREASE,
- CFG_TLC_AGG_SET_LONG,
- CFG_TLC_AUTO_AGGREGATION,
- CFG_TLC_AGG_THRESHOLD,
- CFG_TLC_TID_LOAD_THRESHOLD,
- CFG_TLC_BLOCK_ACK_TIMEOUT,
- CFG_TLC_NO_BA_COUNTED_AS_ONE,
- CFG_TLC_NUM_BA_STREAMS_ALLOWED,
- CFG_TLC_NUM_BA_STREAMS_PRESENT,
- CFG_TLC_RENEW_ADDBA_DELAY,
- CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
- CFG_TLC_IS_STABLE_IN_HT,
- CFG_TLC_SR_SIC_1ST_FAIL,
- CFG_TLC_SR_SIC_1ST_PASS,
- CFG_TLC_SR_SIC_TOTAL_FAIL,
- CFG_TLC_SR_SIC_TOTAL_PASS,
- CFG_RLC_CHAIN_CTRL,
- CFG_TRK_TABLE_OP_MODE,
- CFG_TRK_TABLE_RSSI_THRESHOLD,
- CFG_TX_PWR_TARGET, /* Used By xVT */
- CFG_TX_PWR_LIMIT_USR,
- CFG_TX_PWR_LIMIT_BSS, /* 11d limit */
- CFG_TX_PWR_LIMIT_BSS_CONSTRAINT, /* 11h constraint */
- CFG_TX_PWR_MODE,
- CFG_MLME_DBG_NOTIF_BLOCK,
- CFG_BT_OFF_BECONS_INTERVALS,
- CFG_BT_FRAG_DURATION,
- CFG_ACTIVE_CHAINS,
- CFG_CALIB_CTRL,
- CFG_CAPABILITY_SUPPORTED_HT_RATES,
- CFG_HT_MAC_PARAM_INFO,
- CFG_MIMO_PS_MODE,
- CFG_HT_DEFAULT_CAPABILIES_INFO,
- CFG_LED_SC_RESOLUTION_FACTOR,
- CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
- CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
- CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
- CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
- CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
- CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
- CFG_PTAM_LINK_SENS_FA_CCK_MAX,
- CFG_PTAM_LINK_SENS_FA_CCK_MIN,
- CFG_PTAM_LINK_SENS_NRG_DIFF,
- CFG_PTAM_LINK_SENS_NRG_MARGIN,
- CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
- CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
- CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
- CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
- CFG_AGG_MGG_ADDBA_BUF_SIZE,
- CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
- CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
- CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
- CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
- CFG_11D_ENABLED,
- CFG_11H_FEATURE_FLAGS,
-
- /* <-- LAST --> */
- CFG_TBL_FIX_LAST
-};
-
-/* variable size table */
-enum {
- CFG_NET_ADDR = 0,
- CFG_LED_PATTERN_TABLE,
-
- /* <-- LAST --> */
- CFG_TBL_VAR_LAST
-};
-
-struct iwm_umac_cmd_set_param_fix {
- __le16 tbl;
- __le16 key;
- __le32 value;
-} __packed;
-
-struct iwm_umac_cmd_set_param_var {
- __le16 tbl;
- __le16 key;
- __le16 len;
- __le16 reserved;
-} __packed;
-
-struct iwm_umac_cmd_get_param {
- __le16 tbl;
- __le16 key;
-} __packed;
-
-struct iwm_umac_cmd_get_param_resp {
- __le16 tbl;
- __le16 key;
- __le16 len;
- __le16 reserved;
-} __packed;
-
-struct iwm_umac_cmd_eeprom_proxy_hdr {
- __le32 type;
- __le32 offset;
- __le32 len;
-} __packed;
-
-struct iwm_umac_cmd_eeprom_proxy {
- struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
- u8 buf[0];
-} __packed;
-
-#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1
-#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2
-
-#define UMAC_CHANNEL_FLAG_VALID BIT(0)
-#define UMAC_CHANNEL_FLAG_IBSS BIT(1)
-#define UMAC_CHANNEL_FLAG_ACTIVE BIT(3)
-#define UMAC_CHANNEL_FLAG_RADAR BIT(4)
-#define UMAC_CHANNEL_FLAG_DFS BIT(7)
-
-struct iwm_umac_channel_info {
- u8 band;
- u8 type;
- u8 reserved;
- u8 flags;
- __le32 channels_mask;
-} __packed;
-
-struct iwm_umac_cmd_get_channel_list {
- __le16 count;
- __le16 reserved;
- struct iwm_umac_channel_info ch[0];
-} __packed;
-
-
-/* UMAC WiFi interface commands */
-
-/* Coexistence mode */
-#define COEX_MODE_SA 0x1
-#define COEX_MODE_XOR 0x2
-#define COEX_MODE_CM 0x3
-#define COEX_MODE_MAX 0x4
-
-/* Wireless mode */
-#define WIRELESS_MODE_11A 0x1
-#define WIRELESS_MODE_11G 0x2
-#define WIRELESS_MODE_11N 0x4
-
-#define UMAC_PROFILE_EX_IE_REQUIRED 0x1
-#define UMAC_PROFILE_QOS_ALLOWED 0x2
-
-/* Scanning */
-#define UMAC_WIFI_IF_PROBE_OPTION_MAX 10
-
-#define UMAC_WIFI_IF_SCAN_TYPE_USER 0x0
-#define UMAC_WIFI_IF_SCAN_TYPE_UMAC_RESERVED 0x1
-#define UMAC_WIFI_IF_SCAN_TYPE_HOST_PERIODIC 0x2
-#define UMAC_WIFI_IF_SCAN_TYPE_MAX 0x3
-
-struct iwm_umac_ssid {
- u8 ssid_len;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 reserved[3];
-} __packed;
-
-struct iwm_umac_cmd_scan_request {
- struct iwm_umac_wifi_if hdr;
- __le32 type; /* UMAC_WIFI_IF_SCAN_TYPE_* */
- u8 ssid_num;
- u8 seq_num;
- u8 timeout; /* In seconds */
- u8 reserved;
- struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
-} __packed;
-
-#define UMAC_CIPHER_TYPE_NONE 0xFF
-#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00
-#define UMAC_CIPHER_TYPE_WEP_40 0x01
-#define UMAC_CIPHER_TYPE_WEP_104 0x02
-#define UMAC_CIPHER_TYPE_TKIP 0x04
-#define UMAC_CIPHER_TYPE_CCMP 0x08
-
-/* Supported authentication types - bitmap */
-#define UMAC_AUTH_TYPE_OPEN 0x00
-#define UMAC_AUTH_TYPE_LEGACY_PSK 0x01
-#define UMAC_AUTH_TYPE_8021X 0x02
-#define UMAC_AUTH_TYPE_RSNA_PSK 0x04
-
-/* iwm_umac_security.flag is WPA supported -- bits[0:0] */
-#define UMAC_SEC_FLG_WPA_ON_POS 0
-#define UMAC_SEC_FLG_WPA_ON_SEED 1
-#define UMAC_SEC_FLG_WPA_ON_MSK (UMAC_SEC_FLG_WPA_ON_SEED << \
- UMAC_SEC_FLG_WPA_ON_POS)
-
-/* iwm_umac_security.flag is WPA2 supported -- bits [1:1] */
-#define UMAC_SEC_FLG_RSNA_ON_POS 1
-#define UMAC_SEC_FLG_RSNA_ON_SEED 1
-#define UMAC_SEC_FLG_RSNA_ON_MSK (UMAC_SEC_FLG_RSNA_ON_SEED << \
- UMAC_SEC_FLG_RSNA_ON_POS)
-
-/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
-#define UMAC_SEC_FLG_WSC_ON_POS 2
-#define UMAC_SEC_FLG_WSC_ON_SEED 1
-#define UMAC_SEC_FLG_WSC_ON_MSK (UMAC_SEC_FLG_WSC_ON_SEED << \
- UMAC_SEC_FLG_WSC_ON_POS)
-
-
-/* Legacy profile can use only WEP40 and WEP104 for encryption and
- * OPEN or PSK for authentication */
-#define UMAC_SEC_FLG_LEGACY_PROFILE 0
-
-struct iwm_umac_security {
- u8 auth_type;
- u8 ucast_cipher;
- u8 mcast_cipher;
- u8 flags;
-} __packed;
-
-struct iwm_umac_ibss {
- u8 beacon_interval; /* in millisecond */
- u8 atim; /* in millisecond */
- s8 join_only;
- u8 band;
- u8 channel;
- u8 reserved[3];
-} __packed;
-
-#define UMAC_MODE_BSS 0
-#define UMAC_MODE_IBSS 1
-
-#define UMAC_BSSID_MAX 4
-
-struct iwm_umac_profile {
- struct iwm_umac_wifi_if hdr;
- __le32 mode;
- struct iwm_umac_ssid ssid;
- u8 bssid[UMAC_BSSID_MAX][ETH_ALEN];
- struct iwm_umac_security sec;
- struct iwm_umac_ibss ibss;
- __le32 channel_2ghz;
- __le32 channel_5ghz;
- __le16 flags;
- u8 wireless_mode;
- u8 bss_num;
-} __packed;
-
-struct iwm_umac_invalidate_profile {
- struct iwm_umac_wifi_if hdr;
- u8 reason;
- u8 reserved[3];
-} __packed;
-
-/* Encryption key commands */
-struct iwm_umac_key_wep40 {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
- u8 key[WLAN_KEY_LEN_WEP40];
- u8 static_key;
- u8 reserved[2];
-} __packed;
-
-struct iwm_umac_key_wep104 {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
- u8 key[WLAN_KEY_LEN_WEP104];
- u8 static_key;
- u8 reserved[2];
-} __packed;
-
-#define IWM_TKIP_KEY_SIZE 16
-#define IWM_TKIP_MIC_SIZE 8
-struct iwm_umac_key_tkip {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
- u8 iv_count[6];
- u8 reserved[2];
- u8 tkip_key[IWM_TKIP_KEY_SIZE];
- u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
- u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
-} __packed;
-
-struct iwm_umac_key_ccmp {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
- u8 iv_count[6];
- u8 reserved[2];
- u8 key[WLAN_KEY_LEN_CCMP];
-} __packed;
-
-struct iwm_umac_key_remove {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
-} __packed;
-
-struct iwm_umac_tx_key_id {
- struct iwm_umac_wifi_if hdr;
- u8 key_idx;
- u8 reserved[3];
-} __packed;
-
-struct iwm_umac_pwr_trigger {
- struct iwm_umac_wifi_if hdr;
- __le32 reseved;
-} __packed;
-
-struct iwm_umac_cmd_stats_req {
- __le32 flags;
-} __packed;
-
-struct iwm_umac_cmd_stop_resume_tx {
- u8 flags;
- u8 sta_id;
- __le16 stop_resume_tid_msk;
- __le16 last_seq_num[IWM_UMAC_TID_NR];
- u16 reserved;
-} __packed;
-
-#define IWM_CMD_PMKID_ADD 1
-#define IWM_CMD_PMKID_DEL 2
-#define IWM_CMD_PMKID_FLUSH 3
-
-struct iwm_umac_pmkid_update {
- struct iwm_umac_wifi_if hdr;
- __le32 command;
- u8 bssid[ETH_ALEN];
- __le16 reserved;
- u8 pmkid[WLAN_PMKID_LEN];
-} __packed;
-
-/* LMAC commands */
-int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
-int iwm_send_prio_table(struct iwm_priv *iwm);
-int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
-int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
-int iwm_send_calib_results(struct iwm_priv *iwm);
-int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
-int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
-
-/* UMAC commands */
-int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
- bool resp);
-int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp);
-int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value);
-int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
- void *payload, u16 payload_size);
-int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
-int iwm_send_mlme_profile(struct iwm_priv *iwm);
-int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
-int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
-int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
-int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
-int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
-int iwm_tx_power_trigger(struct iwm_priv *iwm);
-int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
-int iwm_send_umac_channel_list(struct iwm_priv *iwm);
-int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
- int ssid_num);
-int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len);
-int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
- struct iwm_umac_notif_stop_resume_tx *ntf);
-int iwm_send_pmkid_update(struct iwm_priv *iwm,
- struct cfg80211_pmksa *pmksa, u32 command);
-
-/* UDMA commands */
-int iwm_target_reset(struct iwm_priv *iwm);
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
deleted file mode 100644
index a0c13a4..0000000
--- a/drivers/net/wireless/iwmc3200wifi/debug.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IWM_DEBUG_H__
-#define __IWM_DEBUG_H__
-
-#define IWM_ERR(p, f, a...) dev_err(iwm_to_dev(p), f, ## a)
-#define IWM_WARN(p, f, a...) dev_warn(iwm_to_dev(p), f, ## a)
-#define IWM_INFO(p, f, a...) dev_info(iwm_to_dev(p), f, ## a)
-#define IWM_CRIT(p, f, a...) dev_crit(iwm_to_dev(p), f, ## a)
-
-#ifdef CONFIG_IWM_DEBUG
-
-#define IWM_DEBUG_MODULE(i, level, module, f, a...) \
-do { \
- if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\
- dev_printk(KERN_INFO, (iwm_to_dev(i)), \
- "%s " f, __func__ , ## a); \
-} while (0)
-
-#define IWM_HEXDUMP(i, level, module, pref, buf, len) \
-do { \
- if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\
- print_hex_dump(KERN_INFO, pref, DUMP_PREFIX_OFFSET, \
- 16, 1, buf, len, 1); \
-} while (0)
-
-#else
-
-#define IWM_DEBUG_MODULE(i, level, module, f, a...)
-#define IWM_HEXDUMP(i, level, module, pref, buf, len)
-
-#endif /* CONFIG_IWM_DEBUG */
-
-/* Debug modules */
-enum iwm_debug_module_id {
- IWM_DM_BOOT = 0,
- IWM_DM_FW,
- IWM_DM_SDIO,
- IWM_DM_NTF,
- IWM_DM_RX,
- IWM_DM_TX,
- IWM_DM_MLME,
- IWM_DM_CMD,
- IWM_DM_WEXT,
- __IWM_DM_NR,
-};
-#define IWM_DM_DEFAULT 0
-
-#define IWM_DBG_BOOT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, BOOT, f, ## a)
-#define IWM_DBG_FW(i, l, f, a...) IWM_DEBUG_MODULE(i, l, FW, f, ## a)
-#define IWM_DBG_SDIO(i, l, f, a...) IWM_DEBUG_MODULE(i, l, SDIO, f, ## a)
-#define IWM_DBG_NTF(i, l, f, a...) IWM_DEBUG_MODULE(i, l, NTF, f, ## a)
-#define IWM_DBG_RX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, RX, f, ## a)
-#define IWM_DBG_TX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, TX, f, ## a)
-#define IWM_DBG_MLME(i, l, f, a...) IWM_DEBUG_MODULE(i, l, MLME, f, ## a)
-#define IWM_DBG_CMD(i, l, f, a...) IWM_DEBUG_MODULE(i, l, CMD, f, ## a)
-#define IWM_DBG_WEXT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, WEXT, f, ## a)
-
-/* Debug levels */
-enum iwm_debug_level {
- IWM_DL_NONE = 0,
- IWM_DL_ERR,
- IWM_DL_WARN,
- IWM_DL_INFO,
- IWM_DL_DBG,
-};
-#define IWM_DL_DEFAULT IWM_DL_ERR
-
-struct iwm_debugfs {
- struct iwm_priv *iwm;
- struct dentry *rootdir;
- struct dentry *devdir;
- struct dentry *dbgdir;
- struct dentry *txdir;
- struct dentry *rxdir;
- struct dentry *busdir;
-
- u32 dbg_level;
- struct dentry *dbg_level_dentry;
-
- unsigned long dbg_modules;
- struct dentry *dbg_modules_dentry;
-
- u8 dbg_module[__IWM_DM_NR];
- struct dentry *dbg_module_dentries[__IWM_DM_NR];
-
- struct dentry *txq_dentry;
- struct dentry *tx_credit_dentry;
- struct dentry *rx_ticket_dentry;
-
- struct dentry *fw_err_dentry;
-};
-
-#ifdef CONFIG_IWM_DEBUG
-void iwm_debugfs_init(struct iwm_priv *iwm);
-void iwm_debugfs_exit(struct iwm_priv *iwm);
-#else
-static inline void iwm_debugfs_init(struct iwm_priv *iwm) {}
-static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
-#endif
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
deleted file mode 100644
index b6199d1..0000000
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/debugfs.h>
-#include <linux/export.h>
-
-#include "iwm.h"
-#include "bus.h"
-#include "rx.h"
-#include "debug.h"
-
-static struct {
- u8 id;
- char *name;
-} iwm_debug_module[__IWM_DM_NR] = {
- {IWM_DM_BOOT, "boot"},
- {IWM_DM_FW, "fw"},
- {IWM_DM_SDIO, "sdio"},
- {IWM_DM_NTF, "ntf"},
- {IWM_DM_RX, "rx"},
- {IWM_DM_TX, "tx"},
- {IWM_DM_MLME, "mlme"},
- {IWM_DM_CMD, "cmd"},
- {IWM_DM_WEXT, "wext"},
-};
-
-#define add_dbg_module(dbg, name, id, initlevel) \
-do { \
- dbg.dbg_module[id] = (initlevel); \
- dbg.dbg_module_dentries[id] = \
- debugfs_create_x8(name, 0600, \
- dbg.dbgdir, \
- &(dbg.dbg_module[id])); \
-} while (0)
-
-static int iwm_debugfs_u32_read(void *data, u64 *val)
-{
- struct iwm_priv *iwm = data;
-
- *val = iwm->dbg.dbg_level;
- return 0;
-}
-
-static int iwm_debugfs_dbg_level_write(void *data, u64 val)
-{
- struct iwm_priv *iwm = data;
- int i;
-
- iwm->dbg.dbg_level = val;
-
- for (i = 0; i < __IWM_DM_NR; i++)
- iwm->dbg.dbg_module[i] = val;
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_level,
- iwm_debugfs_u32_read, iwm_debugfs_dbg_level_write,
- "%llu\n");
-
-static int iwm_debugfs_dbg_modules_write(void *data, u64 val)
-{
- struct iwm_priv *iwm = data;
- int i, bit;
-
- iwm->dbg.dbg_modules = val;
-
- for (i = 0; i < __IWM_DM_NR; i++)
- iwm->dbg.dbg_module[i] = 0;
-
- for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
- iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_modules,
- iwm_debugfs_u32_read, iwm_debugfs_dbg_modules_write,
- "%llu\n");
-
-
-static ssize_t iwm_debugfs_txq_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct iwm_priv *iwm = filp->private_data;
- char *buf;
- int i, buf_len = 4096;
- size_t len = 0;
- ssize_t ret;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (i = 0; i < IWM_TX_QUEUES; i++) {
- struct iwm_tx_queue *txq = &iwm->txq[i];
- struct sk_buff *skb;
- int j;
- unsigned long flags;
-
- spin_lock_irqsave(&txq->queue.lock, flags);
-
- skb = (struct sk_buff *)&txq->queue;
-
- len += snprintf(buf + len, buf_len - len, "TXQ #%d\n", i);
- len += snprintf(buf + len, buf_len - len, "\tStopped: %d\n",
- __netif_subqueue_stopped(iwm_to_ndev(iwm),
- txq->id));
- len += snprintf(buf + len, buf_len - len, "\tConcat count:%d\n",
- txq->concat_count);
- len += snprintf(buf + len, buf_len - len, "\tQueue len: %d\n",
- skb_queue_len(&txq->queue));
- for (j = 0; j < skb_queue_len(&txq->queue); j++) {
- struct iwm_tx_info *tx_info;
-
- skb = skb->next;
- tx_info = skb_to_tx_info(skb);
-
- len += snprintf(buf + len, buf_len - len,
- "\tSKB #%d\n", j);
- len += snprintf(buf + len, buf_len - len,
- "\t\tsta: %d\n", tx_info->sta);
- len += snprintf(buf + len, buf_len - len,
- "\t\tcolor: %d\n", tx_info->color);
- len += snprintf(buf + len, buf_len - len,
- "\t\ttid: %d\n", tx_info->tid);
- }
-
- spin_unlock_irqrestore(&txq->queue.lock, flags);
-
- spin_lock_irqsave(&txq->stopped_queue.lock, flags);
-
- len += snprintf(buf + len, buf_len - len,
- "\tStopped Queue len: %d\n",
- skb_queue_len(&txq->stopped_queue));
- for (j = 0; j < skb_queue_len(&txq->stopped_queue); j++) {
- struct iwm_tx_info *tx_info;
-
- skb = skb->next;
- tx_info = skb_to_tx_info(skb);
-
- len += snprintf(buf + len, buf_len - len,
- "\tSKB #%d\n", j);
- len += snprintf(buf + len, buf_len - len,
- "\t\tsta: %d\n", tx_info->sta);
- len += snprintf(buf + len, buf_len - len,
- "\t\tcolor: %d\n", tx_info->color);
- len += snprintf(buf + len, buf_len - len,
- "\t\ttid: %d\n", tx_info->tid);
- }
-
- spin_unlock_irqrestore(&txq->stopped_queue.lock, flags);
- }
-
- ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
- kfree(buf);
-
- return ret;
-}
-
-static ssize_t iwm_debugfs_tx_credit_read(struct file *filp,
- char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct iwm_priv *iwm = filp->private_data;
- struct iwm_tx_credit *credit = &iwm->tx_credit;
- char *buf;
- int i, buf_len = 4096;
- size_t len = 0;
- ssize_t ret;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len += snprintf(buf + len, buf_len - len,
- "NR pools: %d\n", credit->pool_nr);
- len += snprintf(buf + len, buf_len - len,
- "pools map: 0x%lx\n", credit->full_pools_map);
-
- len += snprintf(buf + len, buf_len - len, "\n### POOLS ###\n");
- for (i = 0; i < IWM_MACS_OUT_GROUPS; i++) {
- len += snprintf(buf + len, buf_len - len,
- "pools entry #%d\n", i);
- len += snprintf(buf + len, buf_len - len,
- "\tid: %d\n",
- credit->pools[i].id);
- len += snprintf(buf + len, buf_len - len,
- "\tsid: %d\n",
- credit->pools[i].sid);
- len += snprintf(buf + len, buf_len - len,
- "\tmin_pages: %d\n",
- credit->pools[i].min_pages);
- len += snprintf(buf + len, buf_len - len,
- "\tmax_pages: %d\n",
- credit->pools[i].max_pages);
- len += snprintf(buf + len, buf_len - len,
- "\talloc_pages: %d\n",
- credit->pools[i].alloc_pages);
- len += snprintf(buf + len, buf_len - len,
- "\tfreed_pages: %d\n",
- credit->pools[i].total_freed_pages);
- }
-
- len += snprintf(buf + len, buf_len - len, "\n### SPOOLS ###\n");
- for (i = 0; i < IWM_MACS_OUT_SGROUPS; i++) {
- len += snprintf(buf + len, buf_len - len,
- "spools entry #%d\n", i);
- len += snprintf(buf + len, buf_len - len,
- "\tid: %d\n",
- credit->spools[i].id);
- len += snprintf(buf + len, buf_len - len,
- "\tmax_pages: %d\n",
- credit->spools[i].max_pages);
- len += snprintf(buf + len, buf_len - len,
- "\talloc_pages: %d\n",
- credit->spools[i].alloc_pages);
-
- }
-
- ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
- kfree(buf);
-
- return ret;
-}
-
-static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
- char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct iwm_priv *iwm = filp->private_data;
- struct iwm_rx_ticket_node *ticket;
- char *buf;
- int buf_len = 4096, i;
- size_t len = 0;
- ssize_t ret;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- spin_lock(&iwm->ticket_lock);
- list_for_each_entry(ticket, &iwm->rx_tickets, node) {
- len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
- ticket->ticket->id);
- len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
- ticket->ticket->action);
- len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
- ticket->ticket->flags);
- }
- spin_unlock(&iwm->ticket_lock);
-
- for (i = 0; i < IWM_RX_ID_HASH; i++) {
- struct iwm_rx_packet *packet;
- struct list_head *pkt_list = &iwm->rx_packets[i];
-
- if (!list_empty(pkt_list)) {
- len += snprintf(buf + len, buf_len - len,
- "Packet hash #%d\n", i);
- spin_lock(&iwm->packet_lock[i]);
- list_for_each_entry(packet, pkt_list, node) {
- len += snprintf(buf + len, buf_len - len,
- "\tPacket id: %d\n",
- packet->id);
- len += snprintf(buf + len, buf_len - len,
- "\tPacket length: %lu\n",
- packet->pkt_size);
- }
- spin_unlock(&iwm->packet_lock[i]);
- }
- }
-
- ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
- kfree(buf);
-
- return ret;
-}
-
-static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
- char __user *buffer,
- size_t count, loff_t *ppos)
-{
-
- struct iwm_priv *iwm = filp->private_data;
- char buf[512];
- int buf_len = 512;
- size_t len = 0;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- if (!iwm->last_fw_err)
- return -ENOMEM;
-
- if (iwm->last_fw_err->line_num == 0)
- goto out;
-
- len += snprintf(buf + len, buf_len - len, "%cMAC FW ERROR:\n",
- (le32_to_cpu(iwm->last_fw_err->category) == UMAC_SYS_ERR_CAT_LMAC)
- ? 'L' : 'U');
- len += snprintf(buf + len, buf_len - len,
- "\tCategory: %d\n",
- le32_to_cpu(iwm->last_fw_err->category));
-
- len += snprintf(buf + len, buf_len - len,
- "\tStatus: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->status));
-
- len += snprintf(buf + len, buf_len - len,
- "\tPC: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->pc));
-
- len += snprintf(buf + len, buf_len - len,
- "\tblink1: %d\n",
- le32_to_cpu(iwm->last_fw_err->blink1));
-
- len += snprintf(buf + len, buf_len - len,
- "\tblink2: %d\n",
- le32_to_cpu(iwm->last_fw_err->blink2));
-
- len += snprintf(buf + len, buf_len - len,
- "\tilink1: %d\n",
- le32_to_cpu(iwm->last_fw_err->ilink1));
-
- len += snprintf(buf + len, buf_len - len,
- "\tilink2: %d\n",
- le32_to_cpu(iwm->last_fw_err->ilink2));
-
- len += snprintf(buf + len, buf_len - len,
- "\tData1: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->data1));
-
- len += snprintf(buf + len, buf_len - len,
- "\tData2: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->data2));
-
- len += snprintf(buf + len, buf_len - len,
- "\tLine number: %d\n",
- le32_to_cpu(iwm->last_fw_err->line_num));
-
- len += snprintf(buf + len, buf_len - len,
- "\tUMAC status: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->umac_status));
-
- len += snprintf(buf + len, buf_len - len,
- "\tLMAC status: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->lmac_status));
-
- len += snprintf(buf + len, buf_len - len,
- "\tSDIO status: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->sdio_status));
-
-out:
-
- return simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
-}
-
-static const struct file_operations iwm_debugfs_txq_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_txq_read,
- .llseek = default_llseek,
-};
-
-static const struct file_operations iwm_debugfs_tx_credit_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_tx_credit_read,
- .llseek = default_llseek,
-};
-
-static const struct file_operations iwm_debugfs_rx_ticket_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_rx_ticket_read,
- .llseek = default_llseek,
-};
-
-static const struct file_operations iwm_debugfs_fw_err_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_fw_err_read,
- .llseek = default_llseek,
-};
-
-void iwm_debugfs_init(struct iwm_priv *iwm)
-{
- int i;
-
- iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- iwm->dbg.devdir = debugfs_create_dir(wiphy_name(iwm_to_wiphy(iwm)),
- iwm->dbg.rootdir);
- iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
- iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
- iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
- iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
- if (iwm->bus_ops->debugfs_init)
- iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
-
- iwm->dbg.dbg_level = IWM_DL_NONE;
- iwm->dbg.dbg_level_dentry =
- debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
- &fops_iwm_dbg_level);
-
- iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
- iwm->dbg.dbg_modules_dentry =
- debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
- &fops_iwm_dbg_modules);
-
- for (i = 0; i < __IWM_DM_NR; i++)
- add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
- iwm_debug_module[i].id, IWM_DL_DEFAULT);
-
- iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
- iwm->dbg.txdir, iwm,
- &iwm_debugfs_txq_fops);
- iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
- iwm->dbg.txdir, iwm,
- &iwm_debugfs_tx_credit_fops);
- iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
- iwm->dbg.rxdir, iwm,
- &iwm_debugfs_rx_ticket_fops);
- iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200,
- iwm->dbg.dbgdir, iwm,
- &iwm_debugfs_fw_err_fops);
-}
-
-void iwm_debugfs_exit(struct iwm_priv *iwm)
-{
- int i;
-
- for (i = 0; i < __IWM_DM_NR; i++)
- debugfs_remove(iwm->dbg.dbg_module_dentries[i]);
-
- debugfs_remove(iwm->dbg.dbg_modules_dentry);
- debugfs_remove(iwm->dbg.dbg_level_dentry);
- debugfs_remove(iwm->dbg.txq_dentry);
- debugfs_remove(iwm->dbg.tx_credit_dentry);
- debugfs_remove(iwm->dbg.rx_ticket_dentry);
- debugfs_remove(iwm->dbg.fw_err_dentry);
- if (iwm->bus_ops->debugfs_exit)
- iwm->bus_ops->debugfs_exit(iwm);
-
- debugfs_remove(iwm->dbg.busdir);
- debugfs_remove(iwm->dbg.dbgdir);
- debugfs_remove(iwm->dbg.txdir);
- debugfs_remove(iwm->dbg.rxdir);
- debugfs_remove(iwm->dbg.devdir);
- debugfs_remove(iwm->dbg.rootdir);
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.c b/drivers/net/wireless/iwmc3200wifi/eeprom.c
deleted file mode 100644
index e80e776..0000000
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.c
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include "iwm.h"
-#include "umac.h"
-#include "commands.h"
-#include "eeprom.h"
-
-static struct iwm_eeprom_entry eeprom_map[] = {
- [IWM_EEPROM_SIG] =
- {"Signature", IWM_EEPROM_SIG_OFF, IWM_EEPROM_SIG_LEN},
-
- [IWM_EEPROM_VERSION] =
- {"Version", IWM_EEPROM_VERSION_OFF, IWM_EEPROM_VERSION_LEN},
-
- [IWM_EEPROM_OEM_HW_VERSION] =
- {"OEM HW version", IWM_EEPROM_OEM_HW_VERSION_OFF,
- IWM_EEPROM_OEM_HW_VERSION_LEN},
-
- [IWM_EEPROM_MAC_VERSION] =
- {"MAC version", IWM_EEPROM_MAC_VERSION_OFF, IWM_EEPROM_MAC_VERSION_LEN},
-
- [IWM_EEPROM_CARD_ID] =
- {"Card ID", IWM_EEPROM_CARD_ID_OFF, IWM_EEPROM_CARD_ID_LEN},
-
- [IWM_EEPROM_RADIO_CONF] =
- {"Radio config", IWM_EEPROM_RADIO_CONF_OFF, IWM_EEPROM_RADIO_CONF_LEN},
-
- [IWM_EEPROM_SKU_CAP] =
- {"SKU capabilities", IWM_EEPROM_SKU_CAP_OFF, IWM_EEPROM_SKU_CAP_LEN},
-
- [IWM_EEPROM_FAT_CHANNELS_CAP] =
- {"HT channels capabilities", IWM_EEPROM_FAT_CHANNELS_CAP_OFF,
- IWM_EEPROM_FAT_CHANNELS_CAP_LEN},
-
- [IWM_EEPROM_CALIB_RXIQ_OFFSET] =
- {"RX IQ offset", IWM_EEPROM_CALIB_RXIQ_OFF, IWM_EEPROM_INDIRECT_LEN},
-
- [IWM_EEPROM_CALIB_RXIQ] =
- {"Calib RX IQ", 0, IWM_EEPROM_CALIB_RXIQ_LEN},
-};
-
-
-static int iwm_eeprom_read(struct iwm_priv *iwm, u8 eeprom_id)
-{
- int ret;
- u32 entry_size, chunk_size, data_offset = 0, addr_offset = 0;
- u32 addr;
- struct iwm_udma_wifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_eeprom_proxy eeprom_cmd;
-
- if (eeprom_id > (IWM_EEPROM_LAST - 1))
- return -EINVAL;
-
- entry_size = eeprom_map[eeprom_id].length;
-
- if (eeprom_id >= IWM_EEPROM_INDIRECT_DATA) {
- /* indirect data */
- u32 off_id = eeprom_id - IWM_EEPROM_INDIRECT_DATA +
- IWM_EEPROM_INDIRECT_OFFSET;
-
- eeprom_map[eeprom_id].offset =
- *(u16 *)(iwm->eeprom + eeprom_map[off_id].offset) << 1;
- }
-
- addr = eeprom_map[eeprom_id].offset;
-
- udma_cmd.eop = 1;
- udma_cmd.credit_group = 0x4;
- udma_cmd.ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD;
- udma_cmd.lmac_offset = 0;
-
- umac_cmd.id = UMAC_CMD_OPCODE_EEPROM_PROXY;
- umac_cmd.resp = 1;
-
- while (entry_size > 0) {
- chunk_size = min_t(u32, entry_size, IWM_MAX_EEPROM_DATA_LEN);
-
- eeprom_cmd.hdr.type =
- cpu_to_le32(IWM_UMAC_CMD_EEPROM_TYPE_READ);
- eeprom_cmd.hdr.offset = cpu_to_le32(addr + addr_offset);
- eeprom_cmd.hdr.len = cpu_to_le32(chunk_size);
-
- ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd,
- &umac_cmd, &eeprom_cmd,
- sizeof(struct iwm_umac_cmd_eeprom_proxy));
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't read eeprom\n");
- return ret;
- }
-
- ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_EEPROM_PROXY,
- IWM_SRC_UMAC, 2*HZ);
- if (ret < 0) {
- IWM_ERR(iwm, "Did not get any eeprom answer\n");
- return ret;
- }
-
- data_offset += chunk_size;
- addr_offset += chunk_size;
- entry_size -= chunk_size;
- }
-
- return 0;
-}
-
-u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id)
-{
- if (!iwm->eeprom)
- return ERR_PTR(-ENODEV);
-
- return iwm->eeprom + eeprom_map[eeprom_id].offset;
-}
-
-int iwm_eeprom_fat_channels(struct iwm_priv *iwm)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct ieee80211_supported_band *band;
- u16 *channels, i;
-
- channels = (u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_FAT_CHANNELS_CAP);
- if (IS_ERR(channels))
- return PTR_ERR(channels);
-
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- band->ht_cap.ht_supported = true;
-
- for (i = 0; i < IWM_EEPROM_FAT_CHANNELS_24; i++)
- if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED))
- band->ht_cap.ht_supported = false;
-
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- band->ht_cap.ht_supported = true;
- for (i = IWM_EEPROM_FAT_CHANNELS_24; i < IWM_EEPROM_FAT_CHANNELS; i++)
- if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED))
- band->ht_cap.ht_supported = false;
-
- return 0;
-}
-
-u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm)
-{
- u16 sku_cap;
- u32 wireless_mode = 0;
-
- sku_cap = *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP));
-
- if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_24GHZ)
- wireless_mode |= WIRELESS_MODE_11G;
-
- if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_52GHZ)
- wireless_mode |= WIRELESS_MODE_11A;
-
- if (sku_cap & IWM_EEPROM_SKU_CAP_11N_ENABLE)
- wireless_mode |= WIRELESS_MODE_11N;
-
- return wireless_mode;
-}
-
-
-int iwm_eeprom_init(struct iwm_priv *iwm)
-{
- int i, ret = 0;
- char name[32];
-
- iwm->eeprom = kzalloc(IWM_EEPROM_LEN, GFP_KERNEL);
- if (!iwm->eeprom)
- return -ENOMEM;
-
- for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) {
- ret = iwm_eeprom_read(iwm, i);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't read eeprom entry #%d: %s\n",
- i, eeprom_map[i].name);
- break;
- }
- }
-
- IWM_DBG_BOOT(iwm, DBG, "EEPROM dump:\n");
- for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) {
- memset(name, 0, 32);
- sprintf(name, "%s: ", eeprom_map[i].name);
-
- IWM_HEXDUMP(iwm, DBG, BOOT, name,
- iwm->eeprom + eeprom_map[i].offset,
- eeprom_map[i].length);
- }
-
- return ret;
-}
-
-void iwm_eeprom_exit(struct iwm_priv *iwm)
-{
- kfree(iwm->eeprom);
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.h b/drivers/net/wireless/iwmc3200wifi/eeprom.h
deleted file mode 100644
index 4e3a3fd..0000000
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_EEPROM_H__
-#define __IWM_EEPROM_H__
-
-enum {
- IWM_EEPROM_SIG = 0,
- IWM_EEPROM_FIRST = IWM_EEPROM_SIG,
- IWM_EEPROM_VERSION,
- IWM_EEPROM_OEM_HW_VERSION,
- IWM_EEPROM_MAC_VERSION,
- IWM_EEPROM_CARD_ID,
- IWM_EEPROM_RADIO_CONF,
- IWM_EEPROM_SKU_CAP,
- IWM_EEPROM_FAT_CHANNELS_CAP,
-
- IWM_EEPROM_INDIRECT_OFFSET,
- IWM_EEPROM_CALIB_RXIQ_OFFSET = IWM_EEPROM_INDIRECT_OFFSET,
-
- IWM_EEPROM_INDIRECT_DATA,
- IWM_EEPROM_CALIB_RXIQ = IWM_EEPROM_INDIRECT_DATA,
-
- IWM_EEPROM_LAST,
-};
-
-#define IWM_EEPROM_SIG_OFF 0x00
-#define IWM_EEPROM_VERSION_OFF (0x54 << 1)
-#define IWM_EEPROM_OEM_HW_VERSION_OFF (0x56 << 1)
-#define IWM_EEPROM_MAC_VERSION_OFF (0x30 << 1)
-#define IWM_EEPROM_CARD_ID_OFF (0x5d << 1)
-#define IWM_EEPROM_RADIO_CONF_OFF (0x58 << 1)
-#define IWM_EEPROM_SKU_CAP_OFF (0x55 << 1)
-#define IWM_EEPROM_CALIB_CONFIG_OFF (0x7c << 1)
-#define IWM_EEPROM_FAT_CHANNELS_CAP_OFF (0xde << 1)
-
-#define IWM_EEPROM_SIG_LEN 4
-#define IWM_EEPROM_VERSION_LEN 2
-#define IWM_EEPROM_OEM_HW_VERSION_LEN 2
-#define IWM_EEPROM_MAC_VERSION_LEN 1
-#define IWM_EEPROM_CARD_ID_LEN 2
-#define IWM_EEPROM_RADIO_CONF_LEN 2
-#define IWM_EEPROM_SKU_CAP_LEN 2
-#define IWM_EEPROM_FAT_CHANNELS_CAP_LEN 40
-#define IWM_EEPROM_INDIRECT_LEN 2
-
-#define IWM_MAX_EEPROM_DATA_LEN 240
-#define IWM_EEPROM_LEN 0x800
-
-#define IWM_EEPROM_MIN_ALLOWED_VERSION 0x0610
-#define IWM_EEPROM_MAX_ALLOWED_VERSION 0x0700
-#define IWM_EEPROM_CURRENT_VERSION 0x0612
-
-#define IWM_EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
-#define IWM_EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
-#define IWM_EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
-
-#define IWM_EEPROM_FAT_CHANNELS 20
-/* 2.4 gHz FAT primary channels: 1, 2, 3, 4, 5, 6, 7, 8, 9 */
-#define IWM_EEPROM_FAT_CHANNELS_24 9
-/* 5.2 gHz FAT primary channels: 36,44,52,60,100,108,116,124,132,149,157 */
-#define IWM_EEPROM_FAT_CHANNELS_52 11
-
-#define IWM_EEPROM_FAT_CHANNEL_ENABLED (1 << 0)
-
-enum {
- IWM_EEPROM_CALIB_CAL_HDR,
- IWM_EEPROM_CALIB_TX_POWER,
- IWM_EEPROM_CALIB_XTAL,
- IWM_EEPROM_CALIB_TEMPERATURE,
- IWM_EEPROM_CALIB_RX_BB_FILTER,
- IWM_EEPROM_CALIB_RX_IQ,
- IWM_EEPROM_CALIB_MAX,
-};
-
-#define IWM_EEPROM_CALIB_RXIQ_OFF (IWM_EEPROM_CALIB_CONFIG_OFF + \
- (IWM_EEPROM_CALIB_RX_IQ << 1))
-#define IWM_EEPROM_CALIB_RXIQ_LEN sizeof(struct iwm_lmac_calib_rxiq)
-
-struct iwm_eeprom_entry {
- char *name;
- u32 offset;
- u32 length;
-};
-
-int iwm_eeprom_init(struct iwm_priv *iwm);
-void iwm_eeprom_exit(struct iwm_priv *iwm);
-u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id);
-int iwm_eeprom_fat_channels(struct iwm_priv *iwm);
-u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
deleted file mode 100644
index 6f1afe6..0000000
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/firmware.h>
-
-#include "iwm.h"
-#include "bus.h"
-#include "hal.h"
-#include "umac.h"
-#include "debug.h"
-#include "fw.h"
-#include "commands.h"
-
-static const char fw_barker[] = "*WESTOPFORNOONE*";
-
-/*
- * @op_code: Op code we're looking for.
- * @index: There can be several instances of the same opcode within
- * the firmware. Index specifies which one we're looking for.
- */
-static int iwm_fw_op_offset(struct iwm_priv *iwm, const struct firmware *fw,
- u16 op_code, u32 index)
-{
- int offset = -EINVAL, fw_offset;
- u32 op_index = 0;
- const u8 *fw_ptr;
- struct iwm_fw_hdr_rec *rec;
-
- fw_offset = 0;
- fw_ptr = fw->data;
-
- /* We first need to look for the firmware barker */
- if (memcmp(fw_ptr, fw_barker, IWM_HDR_BARKER_LEN)) {
- IWM_ERR(iwm, "No barker string in this FW\n");
- return -EINVAL;
- }
-
- if (fw->size < IWM_HDR_LEN) {
- IWM_ERR(iwm, "FW is too small (%zu)\n", fw->size);
- return -EINVAL;
- }
-
- fw_offset += IWM_HDR_BARKER_LEN;
-
- while (fw_offset < fw->size) {
- rec = (struct iwm_fw_hdr_rec *)(fw_ptr + fw_offset);
-
- IWM_DBG_FW(iwm, DBG, "FW: op_code: 0x%x, len: %d @ 0x%x\n",
- rec->op_code, rec->len, fw_offset);
-
- if (rec->op_code == IWM_HDR_REC_OP_INVALID) {
- IWM_DBG_FW(iwm, DBG, "Reached INVALID op code\n");
- break;
- }
-
- if (rec->op_code == op_code) {
- if (op_index == index) {
- fw_offset += sizeof(struct iwm_fw_hdr_rec);
- offset = fw_offset;
- goto out;
- }
- op_index++;
- }
-
- fw_offset += sizeof(struct iwm_fw_hdr_rec) + rec->len;
- }
-
- out:
- return offset;
-}
-
-static int iwm_load_firmware_chunk(struct iwm_priv *iwm,
- const struct firmware *fw,
- struct iwm_fw_img_desc *img_desc)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
- u32 chunk_size;
- const u8 *chunk_ptr;
- int ret = 0;
-
- IWM_DBG_FW(iwm, INFO, "Loading FW chunk: %d bytes @ 0x%x\n",
- img_desc->length, img_desc->address);
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
- target_cmd.handle_by_hw = 1;
- target_cmd.op2 = 0;
- target_cmd.resp = 0;
- target_cmd.eop = 1;
-
- chunk_size = img_desc->length;
- chunk_ptr = fw->data + img_desc->offset;
-
- while (chunk_size > 0) {
- u32 tmp_chunk_size;
-
- tmp_chunk_size = min_t(u32, chunk_size,
- IWM_MAX_NONWIFI_CMD_BUFF_SIZE);
-
- target_cmd.addr = cpu_to_le32(img_desc->address +
- (chunk_ptr - fw->data - img_desc->offset));
- target_cmd.op1_sz = cpu_to_le32(tmp_chunk_size);
-
- IWM_DBG_FW(iwm, DBG, "\t%d bytes @ 0x%x\n",
- tmp_chunk_size, target_cmd.addr);
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, chunk_ptr);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't load FW chunk\n");
- break;
- }
-
- chunk_size -= tmp_chunk_size;
- chunk_ptr += tmp_chunk_size;
- }
-
- return ret;
-}
-/*
- * To load a fw image to the target, we basically go through the
- * fw, looking for OP_MEM_DESC records. Once we found one, we
- * pass it to iwm_load_firmware_chunk().
- * The OP_MEM_DESC records contain the actuall memory chunk to be
- * sent, but also the destination address.
- */
-static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
-{
- const struct firmware *fw;
- struct iwm_fw_img_desc *img_desc;
- struct iwm_fw_img_ver *ver;
- int ret = 0, fw_offset;
- u32 opcode_idx = 0, build_date;
- char *build_tag;
-
- ret = request_firmware(&fw, img_name, iwm_to_dev(iwm));
- if (ret) {
- IWM_ERR(iwm, "Request firmware failed");
- return ret;
- }
-
- IWM_DBG_FW(iwm, INFO, "Start to load FW %s\n", img_name);
-
- while (1) {
- fw_offset = iwm_fw_op_offset(iwm, fw,
- IWM_HDR_REC_OP_MEM_DESC,
- opcode_idx);
- if (fw_offset < 0)
- break;
-
- img_desc = (struct iwm_fw_img_desc *)(fw->data + fw_offset);
- ret = iwm_load_firmware_chunk(iwm, fw, img_desc);
- if (ret < 0)
- goto err_release_fw;
- opcode_idx++;
- }
-
- /* Read firmware version */
- fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_SW_VER, 0);
- if (fw_offset < 0)
- goto err_release_fw;
-
- ver = (struct iwm_fw_img_ver *)(fw->data + fw_offset);
-
- /* Read build tag */
- fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_TAG, 0);
- if (fw_offset < 0)
- goto err_release_fw;
-
- build_tag = (char *)(fw->data + fw_offset);
-
- /* Read build date */
- fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_DATE, 0);
- if (fw_offset < 0)
- goto err_release_fw;
-
- build_date = *(u32 *)(fw->data + fw_offset);
-
- IWM_INFO(iwm, "%s:\n", img_name);
- IWM_INFO(iwm, "\tVersion: %02X.%02X\n", ver->major, ver->minor);
- IWM_INFO(iwm, "\tBuild tag: %s\n", build_tag);
- IWM_INFO(iwm, "\tBuild date: %x-%x-%x\n",
- IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
- IWM_BUILD_DAY(build_date));
-
- if (!strcmp(img_name, iwm->bus_ops->umac_name))
- sprintf(iwm->umac_version, "%02X.%02X",
- ver->major, ver->minor);
-
- if (!strcmp(img_name, iwm->bus_ops->lmac_name))
- sprintf(iwm->lmac_version, "%02X.%02X",
- ver->major, ver->minor);
-
- err_release_fw:
- release_firmware(fw);
-
- return ret;
-}
-
-static int iwm_load_umac(struct iwm_priv *iwm)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
- int ret;
-
- ret = iwm_load_img(iwm, iwm->bus_ops->umac_name);
- if (ret < 0)
- return ret;
-
- /* We've loaded the UMAC, we can tell the target to jump there */
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_JUMP;
- target_cmd.addr = cpu_to_le32(UMAC_MU_FW_INST_DATA_12_ADDR);
- target_cmd.op1_sz = 0;
- target_cmd.op2 = 0;
- target_cmd.handle_by_hw = 0;
- target_cmd.resp = 1 ;
- target_cmd.eop = 1;
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
- if (ret < 0)
- IWM_ERR(iwm, "Couldn't send JMP command\n");
-
- return ret;
-}
-
-static int iwm_load_lmac(struct iwm_priv *iwm, const char *img_name)
-{
- int ret;
-
- ret = iwm_load_img(iwm, img_name);
- if (ret < 0)
- return ret;
-
- return iwm_send_umac_reset(iwm,
- cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_CLK_EN), 0);
-}
-
-static int iwm_init_calib(struct iwm_priv *iwm, unsigned long cfg_bitmap,
- unsigned long expected_bitmap, u8 rx_iq_cmd)
-{
- /* Read RX IQ calibration result from EEPROM */
- if (test_bit(rx_iq_cmd, &cfg_bitmap)) {
- iwm_store_rxiq_calib_result(iwm);
- set_bit(PHY_CALIBRATE_RX_IQ_CMD, &iwm->calib_done_map);
- }
-
- iwm_send_prio_table(iwm);
- iwm_send_init_calib_cfg(iwm, cfg_bitmap);
-
- while (iwm->calib_done_map != expected_bitmap) {
- if (iwm_notif_handle(iwm, CALIBRATION_RES_NOTIFICATION,
- IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT)) {
- IWM_DBG_FW(iwm, DBG, "Initial calibration timeout\n");
- return -ETIMEDOUT;
- }
-
- IWM_DBG_FW(iwm, DBG, "Got calibration result. calib_done_map: "
- "0x%lx, expected calibrations: 0x%lx\n",
- iwm->calib_done_map, expected_bitmap);
- }
-
- return 0;
-}
-
-/*
- * We currently have to load 3 FWs:
- * 1) The UMAC (Upper MAC).
- * 2) The calibration LMAC (Lower MAC).
- * We then send the calibration init command, so that the device can
- * run a first calibration round.
- * 3) The operational LMAC, which replaces the calibration one when it's
- * done with the first calibration round.
- *
- * Once those 3 FWs have been loaded, we send the periodic calibration
- * command, and then the device is available for regular 802.11 operations.
- */
-int iwm_load_fw(struct iwm_priv *iwm)
-{
- unsigned long init_calib_map, periodic_calib_map;
- unsigned long expected_calib_map;
- int ret;
-
- /* We first start downloading the UMAC */
- ret = iwm_load_umac(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "UMAC loading failed\n");
- return ret;
- }
-
- /* Handle UMAC_ALIVE notification */
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_ALIVE, IWM_SRC_UMAC,
- WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Handle UMAC_ALIVE failed: %d\n", ret);
- return ret;
- }
-
- /* UMAC is alive, we can download the calibration LMAC */
- ret = iwm_load_lmac(iwm, iwm->bus_ops->calib_lmac_name);
- if (ret) {
- IWM_ERR(iwm, "Calibration LMAC loading failed\n");
- return ret;
- }
-
- /* Handle UMAC_INIT_COMPLETE notification */
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Handle INIT_COMPLETE failed for calibration "
- "LMAC: %d\n", ret);
- return ret;
- }
-
- /* Read EEPROM data */
- ret = iwm_eeprom_init(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't init eeprom array\n");
- return ret;
- }
-
- init_calib_map = iwm->conf.calib_map & IWM_CALIB_MAP_INIT_MSK;
- expected_calib_map = iwm->conf.expected_calib_map &
- IWM_CALIB_MAP_INIT_MSK;
- periodic_calib_map = IWM_CALIB_MAP_PER_LMAC(iwm->conf.calib_map);
-
- ret = iwm_init_calib(iwm, init_calib_map, expected_calib_map,
- CALIB_CFG_RX_IQ_IDX);
- if (ret < 0) {
- /* Let's try the old way */
- ret = iwm_init_calib(iwm, expected_calib_map,
- expected_calib_map,
- PHY_CALIBRATE_RX_IQ_CMD);
- if (ret < 0) {
- IWM_ERR(iwm, "Calibration result timeout\n");
- goto out;
- }
- }
-
- /* Handle LMAC CALIBRATION_COMPLETE notification */
- ret = iwm_notif_handle(iwm, CALIBRATION_COMPLETE_NOTIFICATION,
- IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Wait for CALIBRATION_COMPLETE timeout\n");
- goto out;
- }
-
- IWM_INFO(iwm, "LMAC calibration done: 0x%lx\n", iwm->calib_done_map);
-
- iwm_send_umac_reset(iwm, cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_RESET), 1);
-
- ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
- WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
- goto out;
- }
-
- /* Download the operational LMAC */
- ret = iwm_load_lmac(iwm, iwm->bus_ops->lmac_name);
- if (ret) {
- IWM_ERR(iwm, "LMAC loading failed\n");
- goto out;
- }
-
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Handle INIT_COMPLETE failed for LMAC: %d\n", ret);
- goto out;
- }
-
- iwm_send_prio_table(iwm);
- iwm_send_calib_results(iwm);
- iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
- iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry,
- iwm->conf.ct_kill_exit);
-
- return 0;
-
- out:
- iwm_eeprom_exit(iwm);
- return ret;
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.h b/drivers/net/wireless/iwmc3200wifi/fw.h
deleted file mode 100644
index c70a3b4..0000000
--- a/drivers/net/wireless/iwmc3200wifi/fw.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_FW_H__
-#define __IWM_FW_H__
-
-/**
- * struct iwm_fw_hdr_rec - An iwm firmware image is a
- * concatenation of various records. Each of them is
- * defined by an ID (aka op code), a length, and the
- * actual data.
- * @op_code: The record ID, see IWM_HDR_REC_OP_*
- *
- * @len: The record payload length
- *
- * @buf: The record payload
- */
-struct iwm_fw_hdr_rec {
- u16 op_code;
- u16 len;
- u8 buf[0];
-};
-
-/* Header's definitions */
-#define IWM_HDR_LEN (512)
-#define IWM_HDR_BARKER_LEN (16)
-
-/* Header's opcodes */
-#define IWM_HDR_REC_OP_INVALID (0x00)
-#define IWM_HDR_REC_OP_BUILD_DATE (0x01)
-#define IWM_HDR_REC_OP_BUILD_TAG (0x02)
-#define IWM_HDR_REC_OP_SW_VER (0x03)
-#define IWM_HDR_REC_OP_HW_SKU (0x04)
-#define IWM_HDR_REC_OP_BUILD_OPT (0x05)
-#define IWM_HDR_REC_OP_MEM_DESC (0x06)
-#define IWM_HDR_REC_USERDEFS (0x07)
-
-/* Header's records length (in bytes) */
-#define IWM_HDR_REC_LEN_BUILD_DATE (4)
-#define IWM_HDR_REC_LEN_BUILD_TAG (64)
-#define IWM_HDR_REC_LEN_SW_VER (4)
-#define IWM_HDR_REC_LEN_HW_SKU (4)
-#define IWM_HDR_REC_LEN_BUILD_OPT (4)
-#define IWM_HDR_REC_LEN_MEM_DESC (12)
-#define IWM_HDR_REC_LEN_USERDEF (64)
-
-#define IWM_BUILD_YEAR(date) ((date >> 16) & 0xffff)
-#define IWM_BUILD_MONTH(date) ((date >> 8) & 0xff)
-#define IWM_BUILD_DAY(date) (date & 0xff)
-
-struct iwm_fw_img_desc {
- u32 offset;
- u32 address;
- u32 length;
-};
-
-struct iwm_fw_img_ver {
- u8 minor;
- u8 major;
- u16 reserved;
-};
-
-int iwm_load_fw(struct iwm_priv *iwm);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
deleted file mode 100644
index 1cabcb3..0000000
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-/*
- * Hardware Abstraction Layer for iwm.
- *
- * This file mostly defines an abstraction API for
- * sending various commands to the target.
- *
- * We have 2 types of commands: wifi and non-wifi ones.
- *
- * - wifi commands:
- * They are used for sending LMAC and UMAC commands,
- * and thus are the most commonly used ones.
- * There are 2 different wifi command types, the regular
- * one and the LMAC one. The former is used to send
- * UMAC commands (see UMAC_CMD_OPCODE_* from umac.h)
- * while the latter is used for sending commands to the
- * LMAC. If you look at LMAC commands you'll se that they
- * are actually regular iwlwifi target commands encapsulated
- * into a special UMAC command called UMAC passthrough.
- * This is due to the fact the host talks exclusively
- * to the UMAC and so there needs to be a special UMAC
- * command for talking to the LMAC.
- * This is how a wifi command is laid out:
- * ------------------------
- * | iwm_udma_out_wifi_hdr |
- * ------------------------
- * | SW meta_data (32 bits) |
- * ------------------------
- * | iwm_dev_cmd_hdr |
- * ------------------------
- * | payload |
- * | .... |
- *
- * - non-wifi, or general commands:
- * Those commands are handled by the device's bootrom,
- * and are typically sent when the UMAC and the LMAC
- * are not yet available.
- * * This is how a non-wifi command is laid out:
- * ---------------------------
- * | iwm_udma_out_nonwifi_hdr |
- * ---------------------------
- * | payload |
- * | .... |
-
- *
- * All the commands start with a UDMA header, which is
- * basically a 32 bits field. The 4 LSB there define
- * an opcode that allows the target to differentiate
- * between wifi (opcode is 0xf) and non-wifi commands
- * (opcode is [0..0xe]).
- *
- * When a command (wifi or non-wifi) is supposed to receive
- * an answer, we queue the command buffer. When we do receive
- * a command response from the UMAC, we go through the list
- * of pending command, and pass both the command and the answer
- * to the rx handler. Each command is sent with a unique
- * sequence id, and the answer is sent with the same one. This
- * is how we're supposed to match an answer with its command.
- * See rx.c:iwm_rx_handle_[non]wifi() and iwm_get_pending_[non]wifi()
- * for the implementation details.
- */
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#include "iwm.h"
-#include "bus.h"
-#include "hal.h"
-#include "umac.h"
-#include "debug.h"
-#include "trace.h"
-
-static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
- struct iwm_nonwifi_cmd *cmd,
- struct iwm_udma_nonwifi_cmd *udma_cmd)
-{
- INIT_LIST_HEAD(&cmd->pending);
-
- spin_lock(&iwm->cmd_lock);
-
- cmd->resp_received = 0;
-
- cmd->seq_num = iwm->nonwifi_seq_num;
- udma_cmd->seq_num = cpu_to_le16(cmd->seq_num);
-
- iwm->nonwifi_seq_num++;
- iwm->nonwifi_seq_num %= UMAC_NONWIFI_SEQ_NUM_MAX;
-
- if (udma_cmd->resp)
- list_add_tail(&cmd->pending, &iwm->nonwifi_pending_cmd);
-
- spin_unlock(&iwm->cmd_lock);
-
- cmd->buf.start = cmd->buf.payload;
- cmd->buf.len = 0;
-
- memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd));
-
- return cmd->seq_num;
-}
-
-u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm)
-{
- u16 seq_num = iwm->wifi_seq_num;
-
- iwm->wifi_seq_num++;
- iwm->wifi_seq_num %= UMAC_WIFI_SEQ_NUM_MAX;
-
- return seq_num;
-}
-
-static void iwm_wifi_cmd_init(struct iwm_priv *iwm,
- struct iwm_wifi_cmd *cmd,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- struct iwm_lmac_cmd *lmac_cmd,
- u16 payload_size)
-{
- INIT_LIST_HEAD(&cmd->pending);
-
- spin_lock(&iwm->cmd_lock);
-
- cmd->seq_num = iwm_alloc_wifi_cmd_seq(iwm);
- umac_cmd->seq_num = cpu_to_le16(cmd->seq_num);
-
- if (umac_cmd->resp)
- list_add_tail(&cmd->pending, &iwm->wifi_pending_cmd);
-
- spin_unlock(&iwm->cmd_lock);
-
- cmd->buf.start = cmd->buf.payload;
- cmd->buf.len = 0;
-
- if (lmac_cmd) {
- cmd->buf.start -= sizeof(struct iwm_lmac_hdr);
-
- lmac_cmd->seq_num = cpu_to_le16(cmd->seq_num);
- lmac_cmd->count = cpu_to_le16(payload_size);
-
- memcpy(&cmd->lmac_cmd, lmac_cmd, sizeof(*lmac_cmd));
-
- umac_cmd->count = cpu_to_le16(sizeof(struct iwm_lmac_hdr));
- } else
- umac_cmd->count = 0;
-
- umac_cmd->count = cpu_to_le16(payload_size +
- le16_to_cpu(umac_cmd->count));
- udma_cmd->count = cpu_to_le16(sizeof(struct iwm_umac_fw_cmd_hdr) +
- le16_to_cpu(umac_cmd->count));
-
- memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd));
- memcpy(&cmd->umac_cmd, umac_cmd, sizeof(*umac_cmd));
-}
-
-void iwm_cmd_flush(struct iwm_priv *iwm)
-{
- struct iwm_wifi_cmd *wcmd, *wnext;
- struct iwm_nonwifi_cmd *nwcmd, *nwnext;
-
- list_for_each_entry_safe(wcmd, wnext, &iwm->wifi_pending_cmd, pending) {
- list_del(&wcmd->pending);
- kfree(wcmd);
- }
-
- list_for_each_entry_safe(nwcmd, nwnext, &iwm->nonwifi_pending_cmd,
- pending) {
- list_del(&nwcmd->pending);
- kfree(nwcmd);
- }
-}
-
-struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
-{
- struct iwm_wifi_cmd *cmd;
-
- list_for_each_entry(cmd, &iwm->wifi_pending_cmd, pending)
- if (cmd->seq_num == seq_num) {
- list_del(&cmd->pending);
- return cmd;
- }
-
- return NULL;
-}
-
-struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
- u8 seq_num, u8 cmd_opcode)
-{
- struct iwm_nonwifi_cmd *cmd;
-
- list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
- if ((cmd->seq_num == seq_num) &&
- (cmd->udma_cmd.opcode == cmd_opcode) &&
- (cmd->resp_received)) {
- list_del(&cmd->pending);
- return cmd;
- }
-
- return NULL;
-}
-
-static void iwm_build_udma_nonwifi_hdr(struct iwm_priv *iwm,
- struct iwm_udma_out_nonwifi_hdr *hdr,
- struct iwm_udma_nonwifi_cmd *cmd)
-{
- memset(hdr, 0, sizeof(*hdr));
-
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, cmd->opcode);
- SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP, cmd->resp);
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, 1);
- SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW,
- cmd->handle_by_hw);
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE);
- SET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM,
- le16_to_cpu(cmd->seq_num));
-
- hdr->addr = cmd->addr;
- hdr->op1_sz = cmd->op1_sz;
- hdr->op2 = cmd->op2;
-}
-
-static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
- struct iwm_nonwifi_cmd *cmd)
-{
- struct iwm_udma_out_nonwifi_hdr *udma_hdr;
- struct iwm_nonwifi_cmd_buff *buf;
- struct iwm_udma_nonwifi_cmd *udma_cmd = &cmd->udma_cmd;
-
- buf = &cmd->buf;
-
- buf->start -= sizeof(struct iwm_umac_nonwifi_out_hdr);
- buf->len += sizeof(struct iwm_umac_nonwifi_out_hdr);
-
- udma_hdr = (struct iwm_udma_out_nonwifi_hdr *)(buf->start);
-
- iwm_build_udma_nonwifi_hdr(iwm, udma_hdr, udma_cmd);
-
- IWM_DBG_CMD(iwm, DBG,
- "Send UDMA nonwifi cmd: opcode = 0x%x, resp = 0x%x, "
- "hw = 0x%x, seqnum = %d, addr = 0x%x, op1_sz = 0x%x, "
- "op2 = 0x%x\n", udma_cmd->opcode, udma_cmd->resp,
- udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
- udma_cmd->op1_sz, udma_cmd->op2);
-
- trace_iwm_tx_nonwifi_cmd(iwm, udma_hdr);
- return iwm_bus_send_chunk(iwm, buf->start, buf->len);
-}
-
-void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop)
-{
- struct iwm_udma_out_wifi_hdr *hdr = (struct iwm_udma_out_wifi_hdr *)buf;
-
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, eop);
-}
-
-void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm,
- struct iwm_udma_out_wifi_hdr *hdr,
- struct iwm_udma_wifi_cmd *cmd)
-{
- memset(hdr, 0, sizeof(*hdr));
-
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, UMAC_HDI_OUT_OPCODE_WIFI);
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, cmd->eop);
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE);
-
- SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_BYTE_COUNT,
- le16_to_cpu(cmd->count));
- SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_CREDIT_GRP, cmd->credit_group);
- SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_RATID, cmd->ra_tid);
- SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_LMAC_OFFSET, cmd->lmac_offset);
-}
-
-void iwm_build_umac_hdr(struct iwm_priv *iwm,
- struct iwm_umac_fw_cmd_hdr *hdr,
- struct iwm_umac_cmd *cmd)
-{
- memset(hdr, 0, sizeof(*hdr));
-
- SET_VAL32(hdr->meta_data, UMAC_FW_CMD_BYTE_COUNT,
- le16_to_cpu(cmd->count));
- SET_VAL32(hdr->meta_data, UMAC_FW_CMD_TX_STA_COLOR, cmd->color);
- SET_VAL8(hdr->cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ, cmd->resp);
-
- hdr->cmd.cmd = cmd->id;
- hdr->cmd.seq_num = cmd->seq_num;
-}
-
-static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_wifi_out_hdr *umac_hdr;
- struct iwm_wifi_cmd_buff *buf;
- struct iwm_udma_wifi_cmd *udma_cmd = &cmd->udma_cmd;
- struct iwm_umac_cmd *umac_cmd = &cmd->umac_cmd;
- int ret;
-
- buf = &cmd->buf;
-
- buf->start -= sizeof(struct iwm_umac_wifi_out_hdr);
- buf->len += sizeof(struct iwm_umac_wifi_out_hdr);
-
- umac_hdr = (struct iwm_umac_wifi_out_hdr *)(buf->start);
-
- iwm_build_udma_wifi_hdr(iwm, &umac_hdr->hw_hdr, udma_cmd);
- iwm_build_umac_hdr(iwm, &umac_hdr->sw_hdr, umac_cmd);
-
- IWM_DBG_CMD(iwm, DBG,
- "Send UDMA wifi cmd: opcode = 0x%x, UMAC opcode = 0x%x, "
- "eop = 0x%x, count = 0x%x, credit_group = 0x%x, "
- "ra_tid = 0x%x, lmac_offset = 0x%x, seqnum = %d\n",
- UMAC_HDI_OUT_OPCODE_WIFI, umac_cmd->id,
- udma_cmd->eop, udma_cmd->count, udma_cmd->credit_group,
- udma_cmd->ra_tid, udma_cmd->lmac_offset, cmd->seq_num);
-
- if (umac_cmd->id == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH)
- IWM_DBG_CMD(iwm, DBG, "\tLMAC opcode: 0x%x\n",
- cmd->lmac_cmd.id);
-
- ret = iwm_tx_credit_alloc(iwm, udma_cmd->credit_group, buf->len);
-
- /* We keep sending UMAC reset regardless of the command credits.
- * The UMAC is supposed to be reset anyway and the Tx credits are
- * reinitialized afterwards. If we are lucky, the reset could
- * still be done even though we have run out of credits for the
- * command pool at this moment.*/
- if (ret && (umac_cmd->id != UMAC_CMD_OPCODE_RESET)) {
- IWM_DBG_TX(iwm, DBG, "Failed to alloc tx credit for cmd %d\n",
- umac_cmd->id);
- return ret;
- }
-
- trace_iwm_tx_wifi_cmd(iwm, umac_hdr);
- return iwm_bus_send_chunk(iwm, buf->start, buf->len);
-}
-
-/* target_cmd a.k.a udma_nonwifi_cmd can be sent when UMAC is not available */
-int iwm_hal_send_target_cmd(struct iwm_priv *iwm,
- struct iwm_udma_nonwifi_cmd *udma_cmd,
- const void *payload)
-{
- struct iwm_nonwifi_cmd *cmd;
- int ret, seq_num;
-
- cmd = kzalloc(sizeof(struct iwm_nonwifi_cmd), GFP_KERNEL);
- if (!cmd) {
- IWM_ERR(iwm, "Couldn't alloc memory for hal cmd\n");
- return -ENOMEM;
- }
-
- seq_num = iwm_nonwifi_cmd_init(iwm, cmd, udma_cmd);
-
- if (cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE ||
- cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT) {
- cmd->buf.len = le32_to_cpu(cmd->udma_cmd.op1_sz);
- memcpy(&cmd->buf.payload, payload, cmd->buf.len);
- }
-
- ret = iwm_send_udma_nonwifi_cmd(iwm, cmd);
-
- if (!udma_cmd->resp)
- kfree(cmd);
-
- if (ret < 0)
- return ret;
-
- return seq_num;
-}
-
-static void iwm_build_lmac_hdr(struct iwm_priv *iwm, struct iwm_lmac_hdr *hdr,
- struct iwm_lmac_cmd *cmd)
-{
- memset(hdr, 0, sizeof(*hdr));
-
- hdr->id = cmd->id;
- hdr->flags = 0; /* Is this ever used? */
- hdr->seq_num = cmd->seq_num;
-}
-
-/*
- * iwm_hal_send_host_cmd(): sends commands to the UMAC or the LMAC.
- * Sending command to the LMAC is equivalent to sending a
- * regular UMAC command with the LMAC passthrough or the LMAC
- * wrapper UMAC command IDs.
- */
-int iwm_hal_send_host_cmd(struct iwm_priv *iwm,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- struct iwm_lmac_cmd *lmac_cmd,
- const void *payload, u16 payload_size)
-{
- struct iwm_wifi_cmd *cmd;
- struct iwm_lmac_hdr *hdr;
- int lmac_hdr_len = 0;
- int ret;
-
- cmd = kzalloc(sizeof(struct iwm_wifi_cmd), GFP_KERNEL);
- if (!cmd) {
- IWM_ERR(iwm, "Couldn't alloc memory for wifi hal cmd\n");
- return -ENOMEM;
- }
-
- iwm_wifi_cmd_init(iwm, cmd, udma_cmd, umac_cmd, lmac_cmd, payload_size);
-
- if (lmac_cmd) {
- hdr = (struct iwm_lmac_hdr *)(cmd->buf.start);
-
- iwm_build_lmac_hdr(iwm, hdr, &cmd->lmac_cmd);
- lmac_hdr_len = sizeof(struct iwm_lmac_hdr);
- }
-
- memcpy(cmd->buf.payload, payload, payload_size);
- cmd->buf.len = le16_to_cpu(umac_cmd->count);
-
- ret = iwm_send_udma_wifi_cmd(iwm, cmd);
-
- /* We free the cmd if we're not expecting any response */
- if (!umac_cmd->resp)
- kfree(cmd);
- return ret;
-}
-
-/*
- * iwm_hal_send_umac_cmd(): This is a special case for
- * iwm_hal_send_host_cmd() to send direct UMAC cmd (without
- * LMAC involved).
- */
-int iwm_hal_send_umac_cmd(struct iwm_priv *iwm,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- const void *payload, u16 payload_size)
-{
- return iwm_hal_send_host_cmd(iwm, udma_cmd, umac_cmd, NULL,
- payload, payload_size);
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
deleted file mode 100644
index c20936d..0000000
--- a/drivers/net/wireless/iwmc3200wifi/hal.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef _IWM_HAL_H_
-#define _IWM_HAL_H_
-
-#include "umac.h"
-
-#define GET_VAL8(s, name) ((s >> name##_POS) & name##_SEED)
-#define GET_VAL16(s, name) ((le16_to_cpu(s) >> name##_POS) & name##_SEED)
-#define GET_VAL32(s, name) ((le32_to_cpu(s) >> name##_POS) & name##_SEED)
-
-#define SET_VAL8(s, name, val) \
-do { \
- s = (s & ~(name##_SEED << name##_POS)) | \
- ((val & name##_SEED) << name##_POS); \
-} while (0)
-
-#define SET_VAL16(s, name, val) \
-do { \
- s = cpu_to_le16((le16_to_cpu(s) & ~(name##_SEED << name##_POS)) | \
- ((val & name##_SEED) << name##_POS)); \
-} while (0)
-
-#define SET_VAL32(s, name, val) \
-do { \
- s = cpu_to_le32((le32_to_cpu(s) & ~(name##_SEED << name##_POS)) | \
- ((val & name##_SEED) << name##_POS)); \
-} while (0)
-
-
-#define UDMA_UMAC_INIT { .eop = 1, \
- .credit_group = 0x4, \
- .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \
- .lmac_offset = 0 }
-#define UDMA_LMAC_INIT { .eop = 1, \
- .credit_group = 0x4, \
- .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \
- .lmac_offset = 4 }
-
-
-/* UDMA IN OP CODE -- cmd bits [3:0] */
-#define UDMA_HDI_IN_NW_CMD_OPCODE_POS 0
-#define UDMA_HDI_IN_NW_CMD_OPCODE_SEED 0xF
-
-#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
-#define UDMA_IN_OPCODE_READ_RESP 0x1
-#define UDMA_IN_OPCODE_WRITE_RESP 0x2
-#define UDMA_IN_OPCODE_PERS_WRITE_RESP 0x5
-#define UDMA_IN_OPCODE_PERS_READ_RESP 0x6
-#define UDMA_IN_OPCODE_RD_MDFY_WR_RESP 0x7
-#define UDMA_IN_OPCODE_EP_MNGMT_MSG 0x8
-#define UDMA_IN_OPCODE_CRDT_CHNG_MSG 0x9
-#define UDMA_IN_OPCODE_CNTRL_DATABASE_MSG 0xA
-#define UDMA_IN_OPCODE_SW_MSG 0xB
-#define UDMA_IN_OPCODE_WIFI 0xF
-#define UDMA_IN_OPCODE_WIFI_LMAC 0x1F
-#define UDMA_IN_OPCODE_WIFI_UMAC 0x2F
-
-/* HW API: udma_hdi_nonwifi API (OUT and IN) */
-
-/* iwm_udma_nonwifi_cmd request response -- bits [9:9] */
-#define UDMA_HDI_OUT_NW_CMD_RESP_POS 9
-#define UDMA_HDI_OUT_NW_CMD_RESP_SEED 0x1
-
-/* iwm_udma_nonwifi_cmd handle by HW -- bits [11:11] */
-#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_POS 11
-#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_SEED 0x1
-
-/* iwm_udma_nonwifi_cmd sequence-number -- bits [12:15] */
-#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_POS 12
-#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_SEED 0xF
-
-/* UDMA IN Non-WIFI HW sequence number -- bits [12:15] */
-#define UDMA_IN_NW_HW_SEQ_NUM_POS 12
-#define UDMA_IN_NW_HW_SEQ_NUM_SEED 0xF
-
-/* UDMA IN Non-WIFI HW signature -- bits [16:31] */
-#define UDMA_IN_NW_HW_SIG_POS 16
-#define UDMA_IN_NW_HW_SIG_SEED 0xFFFF
-
-/* fixed signature */
-#define UDMA_IN_NW_HW_SIG 0xCBBC
-
-/* UDMA IN Non-WIFI HW block length -- bits [32:35] */
-#define UDMA_IN_NW_HW_LENGTH_SEED 0xF
-#define UDMA_IN_NW_HW_LENGTH_POS 32
-
-/* End of HW API: udma_hdi_nonwifi API (OUT and IN) */
-
-#define IWM_SDIO_FW_MAX_CHUNK_SIZE 2032
-#define IWM_MAX_WIFI_HEADERS_SIZE 32
-#define IWM_MAX_NONWIFI_HEADERS_SIZE 16
-#define IWM_MAX_NONWIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
- IWM_MAX_NONWIFI_HEADERS_SIZE)
-#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
- IWM_MAX_WIFI_HEADERS_SIZE)
-
-#define IWM_HAL_CONCATENATE_BUF_SIZE (32 * 1024)
-
-struct iwm_wifi_cmd_buff {
- u16 len;
- u8 *start;
- u8 hdr[IWM_MAX_WIFI_HEADERS_SIZE];
- u8 payload[IWM_MAX_WIFI_CMD_BUFF_SIZE];
-};
-
-struct iwm_nonwifi_cmd_buff {
- u16 len;
- u8 *start;
- u8 hdr[IWM_MAX_NONWIFI_HEADERS_SIZE];
- u8 payload[IWM_MAX_NONWIFI_CMD_BUFF_SIZE];
-};
-
-struct iwm_udma_nonwifi_cmd {
- u8 opcode;
- u8 eop;
- u8 resp;
- u8 handle_by_hw;
- __le32 addr;
- __le32 op1_sz;
- __le32 op2;
- __le16 seq_num;
-};
-
-struct iwm_udma_wifi_cmd {
- __le16 count;
- u8 eop;
- u8 credit_group;
- u8 ra_tid;
- u8 lmac_offset;
-};
-
-struct iwm_umac_cmd {
- u8 id;
- __le16 count;
- u8 resp;
- __le16 seq_num;
- u8 color;
-};
-
-struct iwm_lmac_cmd {
- u8 id;
- __le16 count;
- u8 resp;
- __le16 seq_num;
-};
-
-struct iwm_nonwifi_cmd {
- u16 seq_num;
- bool resp_received;
- struct list_head pending;
- struct iwm_udma_nonwifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_lmac_cmd lmac_cmd;
- struct iwm_nonwifi_cmd_buff buf;
- u32 flags;
-};
-
-struct iwm_wifi_cmd {
- u16 seq_num;
- struct list_head pending;
- struct iwm_udma_wifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_lmac_cmd lmac_cmd;
- struct iwm_wifi_cmd_buff buf;
- u32 flags;
-};
-
-void iwm_cmd_flush(struct iwm_priv *iwm);
-
-struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm,
- u16 seq_num);
-struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
- u8 seq_num, u8 cmd_opcode);
-
-
-int iwm_hal_send_target_cmd(struct iwm_priv *iwm,
- struct iwm_udma_nonwifi_cmd *ucmd,
- const void *payload);
-
-int iwm_hal_send_host_cmd(struct iwm_priv *iwm,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- struct iwm_lmac_cmd *lmac_cmd,
- const void *payload, u16 payload_size);
-
-int iwm_hal_send_umac_cmd(struct iwm_priv *iwm,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- const void *payload, u16 payload_size);
-
-u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm);
-
-void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop);
-void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm,
- struct iwm_udma_out_wifi_hdr *hdr,
- struct iwm_udma_wifi_cmd *cmd);
-void iwm_build_umac_hdr(struct iwm_priv *iwm,
- struct iwm_umac_fw_cmd_hdr *hdr,
- struct iwm_umac_cmd *cmd);
-#endif /* _IWM_HAL_H_ */
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
deleted file mode 100644
index 51d7efa..0000000
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_H__
-#define __IWM_H__
-
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-
-#include "debug.h"
-#include "hal.h"
-#include "umac.h"
-#include "lmac.h"
-#include "eeprom.h"
-#include "trace.h"
-
-#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
-#define IWM_AUTHOR "<ilw@linux.intel.com>"
-
-#define IWM_SRC_LMAC UMAC_HDI_IN_SOURCE_FHRX
-#define IWM_SRC_UDMA UMAC_HDI_IN_SOURCE_UDMA
-#define IWM_SRC_UMAC UMAC_HDI_IN_SOURCE_FW
-#define IWM_SRC_NUM 3
-
-#define IWM_POWER_INDEX_MIN 0
-#define IWM_POWER_INDEX_MAX 5
-#define IWM_POWER_INDEX_DEFAULT 3
-
-struct iwm_conf {
- u32 sdio_ior_timeout;
- unsigned long calib_map;
- unsigned long expected_calib_map;
- u8 ct_kill_entry;
- u8 ct_kill_exit;
- bool reset_on_fatal_err;
- bool auto_connect;
- bool wimax_not_present;
- bool enable_qos;
- u32 mode;
-
- u32 power_index;
- u32 frag_threshold;
- u32 rts_threshold;
- bool cts_to_self;
-
- u32 assoc_timeout;
- u32 roam_timeout;
- u32 wireless_mode;
-
- u8 ibss_band;
- u8 ibss_channel;
-
- u8 mac_addr[ETH_ALEN];
-};
-
-enum {
- COEX_MODE_SA = 1,
- COEX_MODE_XOR,
- COEX_MODE_CM,
- COEX_MODE_MAX,
-};
-
-struct iwm_if_ops;
-struct iwm_wifi_cmd;
-
-struct pool_entry {
- int id; /* group id */
- int sid; /* super group id */
- int min_pages; /* min capacity in pages */
- int max_pages; /* max capacity in pages */
- int alloc_pages; /* allocated # of pages. incresed by driver */
- int total_freed_pages; /* total freed # of pages. incresed by UMAC */
-};
-
-struct spool_entry {
- int id;
- int max_pages;
- int alloc_pages;
-};
-
-struct iwm_tx_credit {
- spinlock_t lock;
- int pool_nr;
- unsigned long full_pools_map; /* bitmap for # of filled tx pools */
- struct pool_entry pools[IWM_MACS_OUT_GROUPS];
- struct spool_entry spools[IWM_MACS_OUT_SGROUPS];
-};
-
-struct iwm_notif {
- struct list_head pending;
- u32 cmd_id;
- void *cmd;
- u8 src;
- void *buf;
- unsigned long buf_size;
-};
-
-struct iwm_tid_info {
- __le16 last_seq_num;
- bool stopped;
- struct mutex mutex;
-};
-
-struct iwm_sta_info {
- u8 addr[ETH_ALEN];
- bool valid;
- bool qos;
- u8 color;
- struct iwm_tid_info tid_info[IWM_UMAC_TID_NR];
-};
-
-struct iwm_tx_info {
- u8 sta;
- u8 color;
- u8 tid;
-};
-
-struct iwm_rx_info {
- unsigned long rx_size;
- unsigned long rx_buf_size;
-};
-
-#define IWM_NUM_KEYS 4
-
-struct iwm_umac_key_hdr {
- u8 mac[ETH_ALEN];
- u8 key_idx;
- u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */
-} __packed;
-
-struct iwm_key {
- struct iwm_umac_key_hdr hdr;
- u32 cipher;
- u8 key[WLAN_MAX_KEY_LEN];
- u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
- int key_len;
- int seq_len;
-};
-
-#define IWM_RX_ID_HASH 0xff
-#define IWM_RX_ID_GET_HASH(id) ((id) % IWM_RX_ID_HASH)
-
-#define IWM_STA_TABLE_NUM 16
-#define IWM_TX_LIST_SIZE 64
-#define IWM_RX_LIST_SIZE 256
-
-#define IWM_SCAN_ID_MAX 0xff
-
-#define IWM_STATUS_READY 0
-#define IWM_STATUS_SCANNING 1
-#define IWM_STATUS_SCAN_ABORTING 2
-#define IWM_STATUS_SME_CONNECTING 3
-#define IWM_STATUS_ASSOCIATED 4
-#define IWM_STATUS_RESETTING 5
-
-struct iwm_tx_queue {
- int id;
- struct sk_buff_head queue;
- struct sk_buff_head stopped_queue;
- spinlock_t lock;
- struct workqueue_struct *wq;
- struct work_struct worker;
- u8 concat_buf[IWM_HAL_CONCATENATE_BUF_SIZE];
- int concat_count;
- u8 *concat_ptr;
-};
-
-/* Queues 0 ~ 3 for AC data, 5 for iPAN */
-#define IWM_TX_QUEUES 5
-#define IWM_TX_DATA_QUEUES 4
-#define IWM_TX_CMD_QUEUE 4
-
-struct iwm_bss_info {
- struct list_head node;
- struct cfg80211_bss *cfg_bss;
- struct iwm_umac_notif_bss_info *bss;
-};
-
-typedef int (*iwm_handler)(struct iwm_priv *priv, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd);
-
-#define IWM_WATCHDOG_PERIOD (6 * HZ)
-
-struct iwm_priv {
- struct wireless_dev *wdev;
- struct iwm_if_ops *bus_ops;
-
- struct iwm_conf conf;
-
- unsigned long status;
-
- struct list_head pending_notif;
- wait_queue_head_t notif_queue;
-
- wait_queue_head_t nonwifi_queue;
-
- unsigned long calib_done_map;
- struct {
- u8 *buf;
- u32 size;
- } calib_res[CALIBRATION_CMD_NUM];
-
- struct iwm_umac_profile *umac_profile;
- bool umac_profile_active;
-
- u8 bssid[ETH_ALEN];
- u8 channel;
- u16 rate;
- u32 txpower;
-
- struct iwm_sta_info sta_table[IWM_STA_TABLE_NUM];
- struct list_head bss_list;
-
- void (*nonwifi_rx_handlers[UMAC_HDI_IN_OPCODE_NONWIFI_MAX])
- (struct iwm_priv *priv, u8 *buf, unsigned long buf_size);
-
- const iwm_handler *umac_handlers;
- const iwm_handler *lmac_handlers;
- DECLARE_BITMAP(lmac_handler_map, LMAC_COMMAND_ID_NUM);
- DECLARE_BITMAP(umac_handler_map, LMAC_COMMAND_ID_NUM);
- DECLARE_BITMAP(udma_handler_map, LMAC_COMMAND_ID_NUM);
-
- struct list_head wifi_pending_cmd;
- struct list_head nonwifi_pending_cmd;
- u16 wifi_seq_num;
- u8 nonwifi_seq_num;
- spinlock_t cmd_lock;
-
- u32 core_enabled;
-
- u8 scan_id;
- struct cfg80211_scan_request *scan_request;
-
- struct sk_buff_head rx_list;
- struct list_head rx_tickets;
- spinlock_t ticket_lock;
- struct list_head rx_packets[IWM_RX_ID_HASH];
- spinlock_t packet_lock[IWM_RX_ID_HASH];
- struct workqueue_struct *rx_wq;
- struct work_struct rx_worker;
-
- struct iwm_tx_credit tx_credit;
- struct iwm_tx_queue txq[IWM_TX_QUEUES];
-
- struct iwm_key keys[IWM_NUM_KEYS];
- s8 default_key;
-
- DECLARE_BITMAP(wifi_ntfy, WIFI_IF_NTFY_MAX);
- wait_queue_head_t wifi_ntfy_queue;
-
- wait_queue_head_t mlme_queue;
-
- struct iw_statistics wstats;
- struct delayed_work stats_request;
- struct delayed_work disconnect;
- struct delayed_work ct_kill_delay;
-
- struct iwm_debugfs dbg;
-
- u8 *eeprom;
- struct timer_list watchdog;
- struct work_struct reset_worker;
- struct work_struct auth_retry_worker;
- struct mutex mutex;
-
- u8 *req_ie;
- int req_ie_len;
- u8 *resp_ie;
- int resp_ie_len;
-
- struct iwm_fw_error_hdr *last_fw_err;
- char umac_version[8];
- char lmac_version[8];
-
- char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
-};
-
-static inline void *iwm_private(struct iwm_priv *iwm)
-{
- BUG_ON(!iwm);
- return &iwm->private;
-}
-
-#define hw_to_iwm(h) (h->iwm)
-#define iwm_to_dev(i) (wiphy_dev(i->wdev->wiphy))
-#define iwm_to_wiphy(i) (i->wdev->wiphy)
-#define wiphy_to_iwm(w) (struct iwm_priv *)(wiphy_priv(w))
-#define iwm_to_wdev(i) (i->wdev)
-#define wdev_to_iwm(w) (struct iwm_priv *)(wdev_priv(w))
-#define iwm_to_ndev(i) (i->wdev->netdev)
-#define ndev_to_iwm(n) (wdev_to_iwm(n->ieee80211_ptr))
-#define skb_to_rx_info(s) ((struct iwm_rx_info *)(s->cb))
-#define skb_to_tx_info(s) ((struct iwm_tx_info *)s->cb)
-
-void *iwm_if_alloc(int sizeof_bus, struct device *dev,
- struct iwm_if_ops *if_ops);
-void iwm_if_free(struct iwm_priv *iwm);
-int iwm_if_add(struct iwm_priv *iwm);
-void iwm_if_remove(struct iwm_priv *iwm);
-int iwm_mode_to_nl80211_iftype(int mode);
-int iwm_priv_init(struct iwm_priv *iwm);
-void iwm_priv_deinit(struct iwm_priv *iwm);
-void iwm_reset(struct iwm_priv *iwm);
-void iwm_resetting(struct iwm_priv *iwm);
-void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
- struct iwm_umac_notif_alive *alive);
-int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb);
-int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
- u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size);
-int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout);
-void iwm_init_default_profile(struct iwm_priv *iwm,
- struct iwm_umac_profile *profile);
-void iwm_link_on(struct iwm_priv *iwm);
-void iwm_link_off(struct iwm_priv *iwm);
-int iwm_up(struct iwm_priv *iwm);
-int iwm_down(struct iwm_priv *iwm);
-
-/* TX API */
-int iwm_tid_to_queue(u16 tid);
-void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
-void iwm_tx_worker(struct work_struct *work);
-int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-
-/* RX API */
-void iwm_rx_setup_handlers(struct iwm_priv *iwm);
-int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size);
-int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
- struct iwm_wifi_cmd *cmd);
-void iwm_rx_free(struct iwm_priv *iwm);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
deleted file mode 100644
index 5ddcdf8..0000000
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_LMAC_H__
-#define __IWM_LMAC_H__
-
-struct iwm_lmac_hdr {
- u8 id;
- u8 flags;
- __le16 seq_num;
-} __packed;
-
-/* LMAC commands */
-#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1
-
-struct iwm_lmac_cal_cfg_elt {
- __le32 enable; /* 1 means LMAC needs to do something */
- __le32 start; /* 1 to start calibration, 0 to stop */
- __le32 send_res; /* 1 for sending back results */
- __le32 apply_res; /* 1 for applying calibration results to HW */
- __le32 reserved;
-} __packed;
-
-struct iwm_lmac_cal_cfg_status {
- struct iwm_lmac_cal_cfg_elt init;
- struct iwm_lmac_cal_cfg_elt periodic;
- __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */
-} __packed;
-
-struct iwm_lmac_cal_cfg_cmd {
- struct iwm_lmac_cal_cfg_status ucode_cfg;
- struct iwm_lmac_cal_cfg_status driver_cfg;
- __le32 reserved;
-} __packed;
-
-struct iwm_lmac_cal_cfg_resp {
- __le32 status;
-} __packed;
-
-#define IWM_CARD_STATE_SW_HW_ENABLED 0x00
-#define IWM_CARD_STATE_HW_DISABLED 0x01
-#define IWM_CARD_STATE_SW_DISABLED 0x02
-#define IWM_CARD_STATE_CTKILL_DISABLED 0x04
-#define IWM_CARD_STATE_IS_RXON 0x10
-
-struct iwm_lmac_card_state {
- __le32 flags;
-} __packed;
-
-/**
- * COEX_PRIORITY_TABLE_CMD
- *
- * Priority entry for each state
- * Will keep two tables, for STA and WIPAN
- */
-enum {
- /* UN-ASSOCIATION PART */
- COEX_UNASSOC_IDLE = 0,
- COEX_UNASSOC_MANUAL_SCAN,
- COEX_UNASSOC_AUTO_SCAN,
-
- /* CALIBRATION */
- COEX_CALIBRATION,
- COEX_PERIODIC_CALIBRATION,
-
- /* CONNECTION */
- COEX_CONNECTION_ESTAB,
-
- /* ASSOCIATION PART */
- COEX_ASSOCIATED_IDLE,
- COEX_ASSOC_MANUAL_SCAN,
- COEX_ASSOC_AUTO_SCAN,
- COEX_ASSOC_ACTIVE_LEVEL,
-
- /* RF ON/OFF */
- COEX_RF_ON,
- COEX_RF_OFF,
- COEX_STAND_ALONE_DEBUG,
-
- /* IPNN */
- COEX_IPAN_ASSOC_LEVEL,
-
- /* RESERVED */
- COEX_RSRVD1,
- COEX_RSRVD2,
-
- COEX_EVENTS_NUM
-};
-
-#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK 0x1
-#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK 0x2
-#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK 0x4
-
-struct coex_event {
- u8 req_prio;
- u8 win_med_prio;
- u8 reserved;
- u8 flags;
-} __packed;
-
-#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1
-#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4
-#define COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK 0x8
-#define COEX_FLAGS_COEX_ENABLE_MSK 0x80
-
-struct iwm_coex_prio_table_cmd {
- u8 flags;
- u8 reserved[3];
- struct coex_event sta_prio[COEX_EVENTS_NUM];
-} __packed;
-
-/* Coexistence definitions
- *
- * Constants to fill in the Priorities' Tables
- * RP - Requested Priority
- * WP - Win Medium Priority: priority assigned when the contention has been won
- * FLAGS - Combination of COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK and
- * COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK
- */
-
-#define COEX_UNASSOC_IDLE_FLAGS 0
-#define COEX_UNASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_UNASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_CALIBRATION_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_PERIODIC_CALIBRATION_FLAGS 0
-/* COEX_CONNECTION_ESTAB: we need DELAY_MEDIUM_FREE_NTFY to let WiMAX
- * disconnect from network. */
-#define COEX_CONNECTION_ESTAB_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
- COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
-#define COEX_ASSOCIATED_IDLE_FLAGS 0
-#define COEX_ASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_ASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
-#define COEX_RF_ON_FLAGS 0
-#define COEX_RF_OFF_FLAGS 0
-#define COEX_STAND_ALONE_DEBUG_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_IPAN_ASSOC_LEVEL_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
- COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
-#define COEX_RSRVD1_FLAGS 0
-#define COEX_RSRVD2_FLAGS 0
-/* XOR_RF_ON is the event wrapping all radio ownership. We need
- * DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. */
-#define COEX_XOR_RF_ON_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
- COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
-
-/* CT kill config command */
-struct iwm_ct_kill_cfg_cmd {
- u32 exit_threshold;
- u32 reserved;
- u32 entry_threshold;
-} __packed;
-
-
-/* LMAC OP CODES */
-#define REPLY_PAD 0x0
-#define REPLY_ALIVE 0x1
-#define REPLY_ERROR 0x2
-#define REPLY_ECHO 0x3
-#define REPLY_HALT 0x6
-
-/* RXON state commands */
-#define REPLY_RX_ON 0x10
-#define REPLY_RX_ON_ASSOC 0x11
-#define REPLY_RX_OFF 0x12
-#define REPLY_QOS_PARAM 0x13
-#define REPLY_RX_ON_TIMING 0x14
-#define REPLY_INTERNAL_QOS_PARAM 0x15
-#define REPLY_RX_INT_TIMEOUT_CNFG 0x16
-#define REPLY_NULL 0x17
-
-/* Multi-Station support */
-#define REPLY_ADD_STA 0x18
-#define REPLY_REMOVE_STA 0x19
-#define REPLY_RESET_ALL_STA 0x1a
-
-/* RX, TX */
-#define REPLY_ALM_RX 0x1b
-#define REPLY_TX 0x1c
-#define REPLY_TXFIFO_FLUSH 0x1e
-
-/* MISC commands */
-#define REPLY_MGMT_MCAST_KEY 0x1f
-#define REPLY_WEPKEY 0x20
-#define REPLY_INIT_IV 0x21
-#define REPLY_WRITE_MIB 0x22
-#define REPLY_READ_MIB 0x23
-#define REPLY_RADIO_FE 0x24
-#define REPLY_TXFIFO_CFG 0x25
-#define REPLY_WRITE_READ 0x26
-#define REPLY_INSTALL_SEC_KEY 0x27
-
-
-#define REPLY_RATE_SCALE 0x47
-#define REPLY_LEDS_CMD 0x48
-#define REPLY_TX_LINK_QUALITY_CMD 0x4e
-#define REPLY_ANA_MIB_OVERRIDE_CMD 0x4f
-#define REPLY_WRITE2REG_CMD 0x50
-
-/* winfi-wifi coexistence */
-#define COEX_PRIORITY_TABLE_CMD 0x5a
-#define COEX_MEDIUM_NOTIFICATION 0x5b
-#define COEX_EVENT_CMD 0x5c
-
-/* more Protocol and Protocol-test commands */
-#define REPLY_MAX_SLEEP_TIME_CMD 0x61
-#define CALIBRATION_CFG_CMD 0x65
-#define CALIBRATION_RES_NOTIFICATION 0x66
-#define CALIBRATION_COMPLETE_NOTIFICATION 0x67
-
-/* Measurements */
-#define REPLY_QUIET_CMD 0x71
-#define REPLY_CHANNEL_SWITCH 0x72
-#define CHANNEL_SWITCH_NOTIFICATION 0x73
-
-#define REPLY_SPECTRUM_MEASUREMENT_CMD 0x74
-#define SPECTRUM_MEASURE_NOTIFICATION 0x75
-#define REPLY_MEASUREMENT_ABORT_CMD 0x76
-
-/* Power Management */
-#define POWER_TABLE_CMD 0x77
-#define SAVE_RESTORE_ADDRESS_CMD 0x78
-#define REPLY_WATERMARK_CMD 0x79
-#define PM_DEBUG_STATISTIC_NOTIFIC 0x7B
-#define PD_FLUSH_N_NOTIFICATION 0x7C
-
-/* Scan commands and notifications */
-#define REPLY_SCAN_REQUEST_CMD 0x80
-#define REPLY_SCAN_ABORT_CMD 0x81
-#define SCAN_START_NOTIFICATION 0x82
-#define SCAN_RESULTS_NOTIFICATION 0x83
-#define SCAN_COMPLETE_NOTIFICATION 0x84
-
-/* Continuous TX commands */
-#define REPLY_CONT_TX_CMD 0x85
-#define END_OF_CONT_TX_NOTIFICATION 0x86
-
-/* Timer/Eeprom commands */
-#define TIMER_CMD 0x87
-#define EEPROM_WRITE_CMD 0x88
-
-/* PAPD commands */
-#define FEEDBACK_REQUEST_NOTIFICATION 0x8b
-#define REPLY_CW_CMD 0x8c
-
-/* IBSS/AP commands Continue */
-#define BEACON_NOTIFICATION 0x90
-#define REPLY_TX_BEACON 0x91
-#define REPLY_REQUEST_ATIM 0x93
-#define WHO_IS_AWAKE_NOTIFICATION 0x94
-#define TX_PWR_DBM_LIMIT_CMD 0x95
-#define QUIET_NOTIFICATION 0x96
-#define TX_PWR_TABLE_CMD 0x97
-#define TX_ANT_CONFIGURATION_CMD 0x98
-#define MEASURE_ABORT_NOTIFICATION 0x99
-#define REPLY_CALIBRATION_TUNE 0x9a
-
-/* bt config command */
-#define REPLY_BT_CONFIG 0x9b
-#define REPLY_STATISTICS_CMD 0x9c
-#define STATISTICS_NOTIFICATION 0x9d
-
-/* RF-KILL commands and notifications */
-#define REPLY_CARD_STATE_CMD 0xa0
-#define CARD_STATE_NOTIFICATION 0xa1
-
-/* Missed beacons notification */
-#define MISSED_BEACONS_NOTIFICATION 0xa2
-#define MISSED_BEACONS_NOTIFICATION_TH_CMD 0xa3
-
-#define REPLY_CT_KILL_CONFIG_CMD 0xa4
-
-/* HD commands and notifications */
-#define REPLY_HD_PARAMS_CMD 0xa6
-#define HD_PARAMS_NOTIFICATION 0xa7
-#define SENSITIVITY_CMD 0xa8
-#define U_APSD_PARAMS_CMD 0xa9
-#define NOISY_PLATFORM_CMD 0xaa
-#define ILLEGAL_CMD 0xac
-#define REPLY_PHY_CALIBRATION_CMD 0xb0
-#define REPLAY_RX_GAIN_CALIB_CMD 0xb1
-
-/* WiPAN commands */
-#define REPLY_WIPAN_PARAMS_CMD 0xb2
-#define REPLY_WIPAN_RX_ON_CMD 0xb3
-#define REPLY_WIPAN_RX_ON_TIMING 0xb4
-#define REPLY_WIPAN_TX_PWR_TABLE_CMD 0xb5
-#define REPLY_WIPAN_RXON_ASSOC_CMD 0xb6
-#define REPLY_WIPAN_QOS_PARAM 0xb7
-#define WIPAN_REPLY_WEPKEY 0xb8
-
-/* BeamForming commands */
-#define BEAMFORMER_CFG_CMD 0xba
-#define BEAMFORMEE_NOTIFICATION 0xbb
-
-/* TGn new Commands */
-#define REPLY_RX_PHY_CMD 0xc0
-#define REPLY_RX_MPDU_CMD 0xc1
-#define REPLY_MULTICAST_HASH 0xc2
-#define REPLY_KDR_RX 0xc3
-#define REPLY_RX_DSP_EXT_INFO 0xc4
-#define REPLY_COMPRESSED_BA 0xc5
-
-/* PNC commands */
-#define PNC_CONFIG_CMD 0xc8
-#define PNC_UPDATE_TABLE_CMD 0xc9
-#define XVT_GENERAL_CTRL_CMD 0xca
-#define REPLY_LEGACY_RADIO_FE 0xdd
-
-/* WoWLAN commands */
-#define WOWLAN_PATTERNS 0xe0
-#define WOWLAN_WAKEUP_FILTER 0xe1
-#define WOWLAN_TSC_RSC_PARAM 0xe2
-#define WOWLAN_TKIP_PARAM 0xe3
-#define WOWLAN_KEK_KCK_MATERIAL 0xe4
-#define WOWLAN_GET_STATUSES 0xe5
-#define WOWLAN_TX_POWER_PER_DB 0xe6
-#define REPLY_WOWLAN_GET_STATUSES WOWLAN_GET_STATUSES
-
-#define REPLY_DEBUG_CMD 0xf0
-#define REPLY_DSP_DEBUG_CMD 0xf1
-#define REPLY_DEBUG_MONITOR_CMD 0xf2
-#define REPLY_DEBUG_XVT_CMD 0xf3
-#define REPLY_DEBUG_DC_CALIB 0xf4
-#define REPLY_DYNAMIC_BP 0xf5
-
-/* General purpose Commands */
-#define REPLY_GP1_CMD 0xfa
-#define REPLY_GP2_CMD 0xfb
-#define REPLY_GP3_CMD 0xfc
-#define REPLY_GP4_CMD 0xfd
-#define REPLY_REPLAY_WRAPPER 0xfe
-#define REPLY_FRAME_DURATION_CALC_CMD 0xff
-
-#define LMAC_COMMAND_ID_MAX 0xff
-#define LMAC_COMMAND_ID_NUM (LMAC_COMMAND_ID_MAX + 1)
-
-
-/* Calibration */
-
-enum {
- PHY_CALIBRATE_DC_CMD = 0,
- PHY_CALIBRATE_LO_CMD = 1,
- PHY_CALIBRATE_RX_BB_CMD = 2,
- PHY_CALIBRATE_TX_IQ_CMD = 3,
- PHY_CALIBRATE_RX_IQ_CMD = 4,
- PHY_CALIBRATION_NOISE_CMD = 5,
- PHY_CALIBRATE_AGC_TABLE_CMD = 6,
- PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 7,
- PHY_CALIBRATE_OPCODES_NUM,
- SHILOH_PHY_CALIBRATE_DC_CMD = 8,
- SHILOH_PHY_CALIBRATE_LO_CMD = 9,
- SHILOH_PHY_CALIBRATE_RX_BB_CMD = 10,
- SHILOH_PHY_CALIBRATE_TX_IQ_CMD = 11,
- SHILOH_PHY_CALIBRATE_RX_IQ_CMD = 12,
- SHILOH_PHY_CALIBRATION_NOISE_CMD = 13,
- SHILOH_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
- SHILOH_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
- SHILOH_PHY_CALIBRATE_BASE_BAND_CMD = 16,
- SHILOH_PHY_CALIBRATE_TXIQ_PERIODIC_CMD = 17,
- CALIBRATION_CMD_NUM,
-};
-
-enum {
- CALIB_CFG_RX_BB_IDX = 0,
- CALIB_CFG_DC_IDX = 1,
- CALIB_CFG_LO_IDX = 2,
- CALIB_CFG_TX_IQ_IDX = 3,
- CALIB_CFG_RX_IQ_IDX = 4,
- CALIB_CFG_NOISE_IDX = 5,
- CALIB_CFG_CRYSTAL_IDX = 6,
- CALIB_CFG_TEMPERATURE_IDX = 7,
- CALIB_CFG_PAPD_IDX = 8,
- CALIB_CFG_LAST_IDX = CALIB_CFG_PAPD_IDX,
- CALIB_CFG_MODULE_NUM,
-};
-
-#define IWM_CALIB_MAP_INIT_MSK 0xFFFF
-#define IWM_CALIB_MAP_PER_LMAC(m) ((m & 0xFF0000) >> 16)
-#define IWM_CALIB_MAP_PER_UMAC(m) ((m & 0xFF000000) >> 24)
-#define IWM_CALIB_OPCODE_TO_INDEX(op) (op - PHY_CALIBRATE_OPCODES_NUM)
-
-struct iwm_lmac_calib_hdr {
- u8 opcode;
- u8 first_grp;
- u8 grp_num;
- u8 all_data_valid;
-} __packed;
-
-#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7
-#define IWM_CALIB_FREQ_GROUPS_NR 5
-#define IWM_CALIB_DC_MODES_NR 12
-
-struct iwm_calib_rxiq_entry {
- u16 ptam_postdist_ars;
- u16 ptam_postdist_arc;
-} __packed;
-
-struct iwm_calib_rxiq_group {
- struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR];
-} __packed;
-
-struct iwm_lmac_calib_rxiq {
- struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR];
-} __packed;
-
-struct iwm_calib_rxiq {
- struct iwm_lmac_calib_hdr hdr;
- struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR];
-} __packed;
-
-#define LMAC_STA_ID_SEED 0x0f
-#define LMAC_STA_ID_POS 0
-
-#define LMAC_STA_COLOR_SEED 0x7
-#define LMAC_STA_COLOR_POS 4
-
-struct iwm_lmac_power_report {
- u8 pa_status;
- u8 pa_integ_res_A[3];
- u8 pa_integ_res_B[3];
- u8 pa_integ_res_C[3];
-} __packed;
-
-struct iwm_lmac_tx_resp {
- u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */
- u8 bt_kill_cnt;
- __le16 retry_cnt;
- __le32 initial_tx_rate;
- __le16 wireless_media_time;
- struct iwm_lmac_power_report power_report;
- __le32 tfd_info;
- __le16 seq_ctl;
- __le16 byte_cnt;
- u8 tlc_rate_info;
- u8 ra_tid;
- __le16 frame_ctl;
- __le32 status;
-} __packed;
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
deleted file mode 100644
index 1f868b1..0000000
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ /dev/null
@@ -1,847 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/ieee80211.h>
-#include <linux/wireless.h>
-#include <linux/slab.h>
-#include <linux/moduleparam.h>
-
-#include "iwm.h"
-#include "debug.h"
-#include "bus.h"
-#include "umac.h"
-#include "commands.h"
-#include "hal.h"
-#include "fw.h"
-#include "rx.h"
-
-static struct iwm_conf def_iwm_conf = {
-
- .sdio_ior_timeout = 5000,
- .calib_map = BIT(CALIB_CFG_DC_IDX) |
- BIT(CALIB_CFG_LO_IDX) |
- BIT(CALIB_CFG_TX_IQ_IDX) |
- BIT(CALIB_CFG_RX_IQ_IDX) |
- BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
- .expected_calib_map = BIT(PHY_CALIBRATE_DC_CMD) |
- BIT(PHY_CALIBRATE_LO_CMD) |
- BIT(PHY_CALIBRATE_TX_IQ_CMD) |
- BIT(PHY_CALIBRATE_RX_IQ_CMD) |
- BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
- .ct_kill_entry = 110,
- .ct_kill_exit = 110,
- .reset_on_fatal_err = 1,
- .auto_connect = 1,
- .enable_qos = 1,
- .mode = UMAC_MODE_BSS,
-
- /* UMAC configuration */
- .power_index = 0,
- .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
- .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
- .cts_to_self = 0,
-
- .assoc_timeout = 2,
- .roam_timeout = 10,
- .wireless_mode = WIRELESS_MODE_11A | WIRELESS_MODE_11G |
- WIRELESS_MODE_11N,
-
- /* IBSS */
- .ibss_band = UMAC_BAND_2GHZ,
- .ibss_channel = 1,
-
- .mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
-};
-
-static bool modparam_reset;
-module_param_named(reset, modparam_reset, bool, 0644);
-MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
-
-static bool modparam_wimax_enable = true;
-module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
-MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
-
-int iwm_mode_to_nl80211_iftype(int mode)
-{
- switch (mode) {
- case UMAC_MODE_BSS:
- return NL80211_IFTYPE_STATION;
- case UMAC_MODE_IBSS:
- return NL80211_IFTYPE_ADHOC;
- default:
- return NL80211_IFTYPE_UNSPECIFIED;
- }
-
- return 0;
-}
-
-static void iwm_statistics_request(struct work_struct *work)
-{
- struct iwm_priv *iwm =
- container_of(work, struct iwm_priv, stats_request.work);
-
- iwm_send_umac_stats_req(iwm, 0);
-}
-
-static void iwm_disconnect_work(struct work_struct *work)
-{
- struct iwm_priv *iwm =
- container_of(work, struct iwm_priv, disconnect.work);
-
- if (iwm->umac_profile_active)
- iwm_invalidate_mlme_profile(iwm);
-
- clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = false;
- memset(iwm->bssid, 0, ETH_ALEN);
- iwm->channel = 0;
-
- iwm_link_off(iwm);
-
- wake_up_interruptible(&iwm->mlme_queue);
-
- cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
-}
-
-static void iwm_ct_kill_work(struct work_struct *work)
-{
- struct iwm_priv *iwm =
- container_of(work, struct iwm_priv, ct_kill_delay.work);
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
-
- IWM_INFO(iwm, "CT kill delay timeout\n");
-
- wiphy_rfkill_set_hw_state(wiphy, false);
-}
-
-static int __iwm_up(struct iwm_priv *iwm);
-static int __iwm_down(struct iwm_priv *iwm);
-
-static void iwm_reset_worker(struct work_struct *work)
-{
- struct iwm_priv *iwm;
- struct iwm_umac_profile *profile = NULL;
- int uninitialized_var(ret), retry = 0;
-
- iwm = container_of(work, struct iwm_priv, reset_worker);
-
- /*
- * XXX: The iwm->mutex is introduced purely for this reset work,
- * because the other users for iwm_up and iwm_down are only netdev
- * ndo_open and ndo_stop which are already protected by rtnl.
- * Please remove iwm->mutex together if iwm_reset_worker() is not
- * required in the future.
- */
- if (!mutex_trylock(&iwm->mutex)) {
- IWM_WARN(iwm, "We are in the middle of interface bringing "
- "UP/DOWN. Skip driver resetting.\n");
- return;
- }
-
- if (iwm->umac_profile_active) {
- profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
- if (profile)
- memcpy(profile, iwm->umac_profile, sizeof(*profile));
- else
- IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
- }
-
- __iwm_down(iwm);
-
- while (retry++ < 3) {
- ret = __iwm_up(iwm);
- if (!ret)
- break;
-
- schedule_timeout_uninterruptible(10 * HZ);
- }
-
- if (ret) {
- IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
-
- kfree(profile);
- goto out;
- }
-
- if (profile) {
- IWM_DBG_MLME(iwm, DBG, "Resend UMAC profile\n");
- memcpy(iwm->umac_profile, profile, sizeof(*profile));
- iwm_send_mlme_profile(iwm);
- kfree(profile);
- } else
- clear_bit(IWM_STATUS_RESETTING, &iwm->status);
-
- out:
- mutex_unlock(&iwm->mutex);
-}
-
-static void iwm_auth_retry_worker(struct work_struct *work)
-{
- struct iwm_priv *iwm;
- int i, ret;
-
- iwm = container_of(work, struct iwm_priv, auth_retry_worker);
- if (iwm->umac_profile_active) {
- ret = iwm_invalidate_mlme_profile(iwm);
- if (ret < 0)
- return;
- }
-
- iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
-
- ret = iwm_send_mlme_profile(iwm);
- if (ret < 0)
- return;
-
- for (i = 0; i < IWM_NUM_KEYS; i++)
- if (iwm->keys[i].key_len)
- iwm_set_key(iwm, 0, &iwm->keys[i]);
-
- iwm_set_tx_key(iwm, iwm->default_key);
-}
-
-
-
-static void iwm_watchdog(unsigned long data)
-{
- struct iwm_priv *iwm = (struct iwm_priv *)data;
-
- IWM_WARN(iwm, "Watchdog expired: UMAC stalls!\n");
-
- if (modparam_reset)
- iwm_resetting(iwm);
-}
-
-int iwm_priv_init(struct iwm_priv *iwm)
-{
- int i, j;
- char name[32];
-
- iwm->status = 0;
- INIT_LIST_HEAD(&iwm->pending_notif);
- init_waitqueue_head(&iwm->notif_queue);
- init_waitqueue_head(&iwm->nonwifi_queue);
- init_waitqueue_head(&iwm->wifi_ntfy_queue);
- init_waitqueue_head(&iwm->mlme_queue);
- memcpy(&iwm->conf, &def_iwm_conf, sizeof(struct iwm_conf));
- spin_lock_init(&iwm->tx_credit.lock);
- INIT_LIST_HEAD(&iwm->wifi_pending_cmd);
- INIT_LIST_HEAD(&iwm->nonwifi_pending_cmd);
- iwm->wifi_seq_num = UMAC_WIFI_SEQ_NUM_BASE;
- iwm->nonwifi_seq_num = UMAC_NONWIFI_SEQ_NUM_BASE;
- spin_lock_init(&iwm->cmd_lock);
- iwm->scan_id = 1;
- INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
- INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
- INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work);
- INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
- INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker);
- INIT_LIST_HEAD(&iwm->bss_list);
-
- skb_queue_head_init(&iwm->rx_list);
- INIT_LIST_HEAD(&iwm->rx_tickets);
- spin_lock_init(&iwm->ticket_lock);
- for (i = 0; i < IWM_RX_ID_HASH; i++) {
- INIT_LIST_HEAD(&iwm->rx_packets[i]);
- spin_lock_init(&iwm->packet_lock[i]);
- }
-
- INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
-
- iwm->rx_wq = create_singlethread_workqueue(KBUILD_MODNAME "_rx");
- if (!iwm->rx_wq)
- return -EAGAIN;
-
- for (i = 0; i < IWM_TX_QUEUES; i++) {
- INIT_WORK(&iwm->txq[i].worker, iwm_tx_worker);
- snprintf(name, 32, KBUILD_MODNAME "_tx_%d", i);
- iwm->txq[i].id = i;
- iwm->txq[i].wq = create_singlethread_workqueue(name);
- if (!iwm->txq[i].wq)
- return -EAGAIN;
-
- skb_queue_head_init(&iwm->txq[i].queue);
- skb_queue_head_init(&iwm->txq[i].stopped_queue);
- spin_lock_init(&iwm->txq[i].lock);
- }
-
- for (i = 0; i < IWM_NUM_KEYS; i++)
- memset(&iwm->keys[i], 0, sizeof(struct iwm_key));
-
- iwm->default_key = -1;
-
- for (i = 0; i < IWM_STA_TABLE_NUM; i++)
- for (j = 0; j < IWM_UMAC_TID_NR; j++) {
- mutex_init(&iwm->sta_table[i].tid_info[j].mutex);
- iwm->sta_table[i].tid_info[j].stopped = false;
- }
-
- init_timer(&iwm->watchdog);
- iwm->watchdog.function = iwm_watchdog;
- iwm->watchdog.data = (unsigned long)iwm;
- mutex_init(&iwm->mutex);
-
- iwm->last_fw_err = kzalloc(sizeof(struct iwm_fw_error_hdr),
- GFP_KERNEL);
- if (iwm->last_fw_err == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-void iwm_priv_deinit(struct iwm_priv *iwm)
-{
- int i;
-
- for (i = 0; i < IWM_TX_QUEUES; i++)
- destroy_workqueue(iwm->txq[i].wq);
-
- destroy_workqueue(iwm->rx_wq);
- kfree(iwm->last_fw_err);
-}
-
-/*
- * We reset all the structures, and we reset the UMAC.
- * After calling this routine, you're expected to reload
- * the firmware.
- */
-void iwm_reset(struct iwm_priv *iwm)
-{
- struct iwm_notif *notif, *next;
-
- if (test_bit(IWM_STATUS_READY, &iwm->status))
- iwm_target_reset(iwm);
-
- if (test_bit(IWM_STATUS_RESETTING, &iwm->status)) {
- iwm->status = 0;
- set_bit(IWM_STATUS_RESETTING, &iwm->status);
- } else
- iwm->status = 0;
- iwm->scan_id = 1;
-
- list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
- list_del(&notif->pending);
- kfree(notif->buf);
- kfree(notif);
- }
-
- iwm_cmd_flush(iwm);
-
- flush_workqueue(iwm->rx_wq);
-
- iwm_link_off(iwm);
-}
-
-void iwm_resetting(struct iwm_priv *iwm)
-{
- set_bit(IWM_STATUS_RESETTING, &iwm->status);
-
- schedule_work(&iwm->reset_worker);
-}
-
-/*
- * Notification code:
- *
- * We're faced with the following issue: Any host command can
- * have an answer or not, and if there's an answer to expect,
- * it can be treated synchronously or asynchronously.
- * To work around the synchronous answer case, we implemented
- * our notification mechanism.
- * When a code path needs to wait for a command response
- * synchronously, it calls notif_handle(), which waits for the
- * right notification to show up, and then process it. Before
- * starting to wait, it registered as a waiter for this specific
- * answer (by toggling a bit in on of the handler_map), so that
- * the rx code knows that it needs to send a notification to the
- * waiting processes. It does so by calling iwm_notif_send(),
- * which adds the notification to the pending notifications list,
- * and then wakes the waiting processes up.
- */
-int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
- u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size)
-{
- struct iwm_notif *notif;
-
- notif = kzalloc(sizeof(struct iwm_notif), GFP_KERNEL);
- if (!notif) {
- IWM_ERR(iwm, "Couldn't alloc memory for notification\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&notif->pending);
- notif->cmd = cmd;
- notif->cmd_id = cmd_id;
- notif->src = source;
- notif->buf = kzalloc(buf_size, GFP_KERNEL);
- if (!notif->buf) {
- IWM_ERR(iwm, "Couldn't alloc notification buffer\n");
- kfree(notif);
- return -ENOMEM;
- }
- notif->buf_size = buf_size;
- memcpy(notif->buf, buf, buf_size);
- list_add_tail(&notif->pending, &iwm->pending_notif);
-
- wake_up_interruptible(&iwm->notif_queue);
-
- return 0;
-}
-
-static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
- u8 source)
-{
- struct iwm_notif *notif;
-
- list_for_each_entry(notif, &iwm->pending_notif, pending) {
- if ((notif->cmd_id == cmd) && (notif->src == source)) {
- list_del(&notif->pending);
- return notif;
- }
- }
-
- return NULL;
-}
-
-static struct iwm_notif *iwm_notif_wait(struct iwm_priv *iwm, u32 cmd,
- u8 source, long timeout)
-{
- int ret;
- struct iwm_notif *notif;
- unsigned long *map = NULL;
-
- switch (source) {
- case IWM_SRC_LMAC:
- map = &iwm->lmac_handler_map[0];
- break;
- case IWM_SRC_UMAC:
- map = &iwm->umac_handler_map[0];
- break;
- case IWM_SRC_UDMA:
- map = &iwm->udma_handler_map[0];
- break;
- }
-
- set_bit(cmd, map);
-
- ret = wait_event_interruptible_timeout(iwm->notif_queue,
- ((notif = iwm_notif_find(iwm, cmd, source)) != NULL),
- timeout);
- clear_bit(cmd, map);
-
- if (!ret)
- return NULL;
-
- return notif;
-}
-
-int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout)
-{
- int ret;
- struct iwm_notif *notif;
-
- notif = iwm_notif_wait(iwm, cmd, source, timeout);
- if (!notif)
- return -ETIME;
-
- ret = iwm_rx_handle_resp(iwm, notif->buf, notif->buf_size, notif->cmd);
- kfree(notif->buf);
- kfree(notif);
-
- return ret;
-}
-
-static int iwm_config_boot_params(struct iwm_priv *iwm)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
- int ret;
-
- /* check Wimax is off and config debug monitor */
- if (!modparam_wimax_enable) {
- u32 data1 = 0x1f;
- u32 addr1 = 0x606BE258;
-
- u32 data2_set = 0x0;
- u32 data2_clr = 0x1;
- u32 addr2 = 0x606BE100;
-
- u32 data3 = 0x1;
- u32 addr3 = 0x606BEC00;
-
- target_cmd.resp = 0;
- target_cmd.handle_by_hw = 0;
- target_cmd.eop = 1;
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
- target_cmd.addr = cpu_to_le32(addr1);
- target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
- target_cmd.op2 = 0;
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
- if (ret < 0) {
- IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
- return ret;
- }
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE;
- target_cmd.addr = cpu_to_le32(addr2);
- target_cmd.op1_sz = cpu_to_le32(data2_set);
- target_cmd.op2 = cpu_to_le32(data2_clr);
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
- if (ret < 0) {
- IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
- return ret;
- }
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
- target_cmd.addr = cpu_to_le32(addr3);
- target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
- target_cmd.op2 = 0;
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data3);
- if (ret < 0) {
- IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-void iwm_init_default_profile(struct iwm_priv *iwm,
- struct iwm_umac_profile *profile)
-{
- memset(profile, 0, sizeof(struct iwm_umac_profile));
-
- profile->sec.auth_type = UMAC_AUTH_TYPE_OPEN;
- profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
- profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_NONE;
- profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_NONE;
-
- if (iwm->conf.enable_qos)
- profile->flags |= cpu_to_le16(UMAC_PROFILE_QOS_ALLOWED);
-
- profile->wireless_mode = iwm->conf.wireless_mode;
- profile->mode = cpu_to_le32(iwm->conf.mode);
-
- profile->ibss.atim = 0;
- profile->ibss.beacon_interval = 100;
- profile->ibss.join_only = 0;
- profile->ibss.band = iwm->conf.ibss_band;
- profile->ibss.channel = iwm->conf.ibss_channel;
-}
-
-void iwm_link_on(struct iwm_priv *iwm)
-{
- netif_carrier_on(iwm_to_ndev(iwm));
- netif_tx_wake_all_queues(iwm_to_ndev(iwm));
-
- iwm_send_umac_stats_req(iwm, 0);
-}
-
-void iwm_link_off(struct iwm_priv *iwm)
-{
- struct iw_statistics *wstats = &iwm->wstats;
- int i;
-
- netif_tx_stop_all_queues(iwm_to_ndev(iwm));
- netif_carrier_off(iwm_to_ndev(iwm));
-
- for (i = 0; i < IWM_TX_QUEUES; i++) {
- skb_queue_purge(&iwm->txq[i].queue);
- skb_queue_purge(&iwm->txq[i].stopped_queue);
-
- iwm->txq[i].concat_count = 0;
- iwm->txq[i].concat_ptr = iwm->txq[i].concat_buf;
-
- flush_workqueue(iwm->txq[i].wq);
- }
-
- iwm_rx_free(iwm);
-
- cancel_delayed_work_sync(&iwm->stats_request);
- memset(wstats, 0, sizeof(struct iw_statistics));
- wstats->qual.updated = IW_QUAL_ALL_INVALID;
-
- kfree(iwm->req_ie);
- iwm->req_ie = NULL;
- iwm->req_ie_len = 0;
- kfree(iwm->resp_ie);
- iwm->resp_ie = NULL;
- iwm->resp_ie_len = 0;
-
- del_timer_sync(&iwm->watchdog);
-}
-
-static void iwm_bss_list_clean(struct iwm_priv *iwm)
-{
- struct iwm_bss_info *bss, *next;
-
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
- list_del(&bss->node);
- kfree(bss->bss);
- kfree(bss);
- }
-}
-
-static int iwm_channels_init(struct iwm_priv *iwm)
-{
- int ret;
-
- ret = iwm_send_umac_channel_list(iwm);
- if (ret) {
- IWM_ERR(iwm, "Send channel list failed\n");
- return ret;
- }
-
- ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Didn't get a channel list notification\n");
- return ret;
- }
-
- return 0;
-}
-
-static int __iwm_up(struct iwm_priv *iwm)
-{
- int ret;
- struct iwm_notif *notif_reboot, *notif_ack = NULL;
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- u32 wireless_mode;
-
- ret = iwm_bus_enable(iwm);
- if (ret) {
- IWM_ERR(iwm, "Couldn't enable function\n");
- return ret;
- }
-
- iwm_rx_setup_handlers(iwm);
-
- /* Wait for initial BARKER_REBOOT from hardware */
- notif_reboot = iwm_notif_wait(iwm, IWM_BARKER_REBOOT_NOTIFICATION,
- IWM_SRC_UDMA, 2 * HZ);
- if (!notif_reboot) {
- IWM_ERR(iwm, "Wait for REBOOT_BARKER timeout\n");
- goto err_disable;
- }
-
- /* We send the barker back */
- ret = iwm_bus_send_chunk(iwm, notif_reboot->buf, 16);
- if (ret) {
- IWM_ERR(iwm, "REBOOT barker response failed\n");
- kfree(notif_reboot);
- goto err_disable;
- }
-
- kfree(notif_reboot->buf);
- kfree(notif_reboot);
-
- /* Wait for ACK_BARKER from hardware */
- notif_ack = iwm_notif_wait(iwm, IWM_ACK_BARKER_NOTIFICATION,
- IWM_SRC_UDMA, 2 * HZ);
- if (!notif_ack) {
- IWM_ERR(iwm, "Wait for ACK_BARKER timeout\n");
- goto err_disable;
- }
-
- kfree(notif_ack->buf);
- kfree(notif_ack);
-
- /* We start to config static boot parameters */
- ret = iwm_config_boot_params(iwm);
- if (ret) {
- IWM_ERR(iwm, "Config boot parameters failed\n");
- goto err_disable;
- }
-
- ret = iwm_read_mac(iwm, iwm_to_ndev(iwm)->dev_addr);
- if (ret) {
- IWM_ERR(iwm, "MAC reading failed\n");
- goto err_disable;
- }
- memcpy(iwm_to_ndev(iwm)->perm_addr, iwm_to_ndev(iwm)->dev_addr,
- ETH_ALEN);
-
- /* We can load the FWs */
- ret = iwm_load_fw(iwm);
- if (ret) {
- IWM_ERR(iwm, "FW loading failed\n");
- goto err_disable;
- }
-
- ret = iwm_eeprom_fat_channels(iwm);
- if (ret) {
- IWM_ERR(iwm, "Couldnt read HT channels EEPROM entries\n");
- goto err_fw;
- }
-
- /*
- * Read our SKU capabilities.
- * If it's valid, we AND the configured wireless mode with the
- * device EEPROM value as the current profile wireless mode.
- */
- wireless_mode = iwm_eeprom_wireless_mode(iwm);
- if (wireless_mode) {
- iwm->conf.wireless_mode &= wireless_mode;
- if (iwm->umac_profile)
- iwm->umac_profile->wireless_mode =
- iwm->conf.wireless_mode;
- } else
- IWM_ERR(iwm, "Wrong SKU capabilities: 0x%x\n",
- *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP)));
-
- snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s",
- iwm->lmac_version, iwm->umac_version);
-
- /* We configure the UMAC and enable the wifi module */
- ret = iwm_send_umac_config(iwm,
- cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
- cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_LINK_EN) |
- cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_MLME_EN));
- if (ret) {
- IWM_ERR(iwm, "UMAC config failed\n");
- goto err_fw;
- }
-
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Didn't get a wifi core status notification\n");
- goto err_fw;
- }
-
- if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
- UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
- IWM_DBG_BOOT(iwm, DBG, "Not all cores enabled:0x%x\n",
- iwm->core_enabled);
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Didn't get a core status notification\n");
- goto err_fw;
- }
-
- if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
- UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
- IWM_ERR(iwm, "Not all cores enabled: 0x%x\n",
- iwm->core_enabled);
- goto err_fw;
- } else {
- IWM_INFO(iwm, "All cores enabled\n");
- }
- }
-
- ret = iwm_channels_init(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't init channels\n");
- goto err_fw;
- }
-
- /* Set the READY bit to indicate interface is brought up successfully */
- set_bit(IWM_STATUS_READY, &iwm->status);
-
- return 0;
-
- err_fw:
- iwm_eeprom_exit(iwm);
-
- err_disable:
- ret = iwm_bus_disable(iwm);
- if (ret < 0)
- IWM_ERR(iwm, "Couldn't disable function\n");
-
- return -EIO;
-}
-
-int iwm_up(struct iwm_priv *iwm)
-{
- int ret;
-
- mutex_lock(&iwm->mutex);
- ret = __iwm_up(iwm);
- mutex_unlock(&iwm->mutex);
-
- return ret;
-}
-
-static int __iwm_down(struct iwm_priv *iwm)
-{
- int ret;
-
- /* The interface is already down */
- if (!test_bit(IWM_STATUS_READY, &iwm->status))
- return 0;
-
- if (iwm->scan_request) {
- cfg80211_scan_done(iwm->scan_request, true);
- iwm->scan_request = NULL;
- }
-
- clear_bit(IWM_STATUS_READY, &iwm->status);
-
- iwm_eeprom_exit(iwm);
- iwm_bss_list_clean(iwm);
- iwm_init_default_profile(iwm, iwm->umac_profile);
- iwm->umac_profile_active = false;
- iwm->default_key = -1;
- iwm->core_enabled = 0;
-
- ret = iwm_bus_disable(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't disable function\n");
- return ret;
- }
-
- return 0;
-}
-
-int iwm_down(struct iwm_priv *iwm)
-{
- int ret;
-
- mutex_lock(&iwm->mutex);
- ret = __iwm_down(iwm);
- mutex_unlock(&iwm->mutex);
-
- return ret;
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
deleted file mode 100644
index 5091d77..0000000
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-/*
- * This is the netdev related hooks for iwm.
- *
- * Some interesting code paths:
- *
- * iwm_open() (Called at netdev interface bringup time)
- * -> iwm_up() (main.c)
- * -> iwm_bus_enable()
- * -> if_sdio_enable() (In case of an SDIO bus)
- * -> sdio_enable_func()
- * -> iwm_notif_wait(BARKER_REBOOT) (wait for reboot barker)
- * -> iwm_notif_wait(ACK_BARKER) (wait for ACK barker)
- * -> iwm_load_fw() (fw.c)
- * -> iwm_load_umac()
- * -> iwm_load_lmac() (Calibration LMAC)
- * -> iwm_load_lmac() (Operational LMAC)
- * -> iwm_send_umac_config()
- *
- * iwm_stop() (Called at netdev interface bringdown time)
- * -> iwm_down()
- * -> iwm_bus_disable()
- * -> if_sdio_disable() (In case of an SDIO bus)
- * -> sdio_disable_func()
- */
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#include "iwm.h"
-#include "commands.h"
-#include "cfg80211.h"
-#include "debug.h"
-
-static int iwm_open(struct net_device *ndev)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
-
- return iwm_up(iwm);
-}
-
-static int iwm_stop(struct net_device *ndev)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
-
- return iwm_down(iwm);
-}
-
-/*
- * iwm AC to queue mapping
- *
- * AC_VO -> queue 3
- * AC_VI -> queue 2
- * AC_BE -> queue 1
- * AC_BK -> queue 0
- */
-static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-
-int iwm_tid_to_queue(u16 tid)
-{
- if (tid > IWM_UMAC_TID_NR - 2)
- return -EINVAL;
-
- return iwm_1d_to_queue[tid];
-}
-
-static u16 iwm_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- skb->priority = cfg80211_classify8021d(skb);
-
- return iwm_1d_to_queue[skb->priority];
-}
-
-static const struct net_device_ops iwm_netdev_ops = {
- .ndo_open = iwm_open,
- .ndo_stop = iwm_stop,
- .ndo_start_xmit = iwm_xmit_frame,
- .ndo_select_queue = iwm_select_queue,
-};
-
-void *iwm_if_alloc(int sizeof_bus, struct device *dev,
- struct iwm_if_ops *if_ops)
-{
- struct net_device *ndev;
- struct wireless_dev *wdev;
- struct iwm_priv *iwm;
- int ret = 0;
-
- wdev = iwm_wdev_alloc(sizeof_bus, dev);
- if (IS_ERR(wdev))
- return wdev;
-
- iwm = wdev_to_iwm(wdev);
- iwm->bus_ops = if_ops;
- iwm->wdev = wdev;
-
- ret = iwm_priv_init(iwm);
- if (ret) {
- dev_err(dev, "failed to init iwm_priv\n");
- goto out_wdev;
- }
-
- wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
-
- ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
- if (!ndev) {
- dev_err(dev, "no memory for network device instance\n");
- ret = -ENOMEM;
- goto out_priv;
- }
-
- ndev->netdev_ops = &iwm_netdev_ops;
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- wdev->netdev = ndev;
-
- iwm->umac_profile = kmalloc(sizeof(struct iwm_umac_profile),
- GFP_KERNEL);
- if (!iwm->umac_profile) {
- dev_err(dev, "Couldn't alloc memory for profile\n");
- ret = -ENOMEM;
- goto out_profile;
- }
-
- iwm_init_default_profile(iwm, iwm->umac_profile);
-
- return iwm;
-
- out_profile:
- free_netdev(ndev);
-
- out_priv:
- iwm_priv_deinit(iwm);
-
- out_wdev:
- iwm_wdev_free(iwm);
- return ERR_PTR(ret);
-}
-
-void iwm_if_free(struct iwm_priv *iwm)
-{
- if (!iwm_to_ndev(iwm))
- return;
-
- cancel_delayed_work_sync(&iwm->ct_kill_delay);
- free_netdev(iwm_to_ndev(iwm));
- iwm_priv_deinit(iwm);
- kfree(iwm->umac_profile);
- iwm->umac_profile = NULL;
- iwm_wdev_free(iwm);
-}
-
-int iwm_if_add(struct iwm_priv *iwm)
-{
- struct net_device *ndev = iwm_to_ndev(iwm);
- int ret;
-
- ret = register_netdev(ndev);
- if (ret < 0) {
- dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-void iwm_if_remove(struct iwm_priv *iwm)
-{
- unregister_netdev(iwm_to_ndev(iwm));
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
deleted file mode 100644
index 7d708f4..0000000
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ /dev/null
@@ -1,1701 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/if_arp.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <net/iw_handler.h>
-
-#include "iwm.h"
-#include "debug.h"
-#include "hal.h"
-#include "umac.h"
-#include "lmac.h"
-#include "commands.h"
-#include "rx.h"
-#include "cfg80211.h"
-#include "eeprom.h"
-
-static int iwm_rx_check_udma_hdr(struct iwm_udma_in_hdr *hdr)
-{
- if ((le32_to_cpu(hdr->cmd) == UMAC_PAD_TERMINAL) ||
- (le32_to_cpu(hdr->size) == UMAC_PAD_TERMINAL))
- return -EINVAL;
-
- return 0;
-}
-
-static inline int iwm_rx_resp_size(struct iwm_udma_in_hdr *hdr)
-{
- return ALIGN(le32_to_cpu(hdr->size) + sizeof(struct iwm_udma_in_hdr),
- 16);
-}
-
-/*
- * Notification handlers:
- *
- * For every possible notification we can receive from the
- * target, we have a handler.
- * When we get a target notification, and there is no one
- * waiting for it, it's just processed through the rx code
- * path:
- *
- * iwm_rx_handle()
- * -> iwm_rx_handle_umac()
- * -> iwm_rx_handle_wifi()
- * -> iwm_rx_handle_resp()
- * -> iwm_ntf_*()
- *
- * OR
- *
- * -> iwm_rx_handle_non_wifi()
- *
- * If there are processes waiting for this notification, then
- * iwm_rx_handle_wifi() just wakes those processes up and they
- * grab the pending notification.
- */
-static int iwm_ntf_error(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_error *error;
- struct iwm_fw_error_hdr *fw_err;
-
- error = (struct iwm_umac_notif_error *)buf;
- fw_err = &error->err;
-
- memcpy(iwm->last_fw_err, fw_err, sizeof(struct iwm_fw_error_hdr));
-
- IWM_ERR(iwm, "%cMAC FW ERROR:\n",
- (le32_to_cpu(fw_err->category) == UMAC_SYS_ERR_CAT_LMAC) ? 'L' : 'U');
- IWM_ERR(iwm, "\tCategory: %d\n", le32_to_cpu(fw_err->category));
- IWM_ERR(iwm, "\tStatus: 0x%x\n", le32_to_cpu(fw_err->status));
- IWM_ERR(iwm, "\tPC: 0x%x\n", le32_to_cpu(fw_err->pc));
- IWM_ERR(iwm, "\tblink1: %d\n", le32_to_cpu(fw_err->blink1));
- IWM_ERR(iwm, "\tblink2: %d\n", le32_to_cpu(fw_err->blink2));
- IWM_ERR(iwm, "\tilink1: %d\n", le32_to_cpu(fw_err->ilink1));
- IWM_ERR(iwm, "\tilink2: %d\n", le32_to_cpu(fw_err->ilink2));
- IWM_ERR(iwm, "\tData1: 0x%x\n", le32_to_cpu(fw_err->data1));
- IWM_ERR(iwm, "\tData2: 0x%x\n", le32_to_cpu(fw_err->data2));
- IWM_ERR(iwm, "\tLine number: %d\n", le32_to_cpu(fw_err->line_num));
- IWM_ERR(iwm, "\tUMAC status: 0x%x\n", le32_to_cpu(fw_err->umac_status));
- IWM_ERR(iwm, "\tLMAC status: 0x%x\n", le32_to_cpu(fw_err->lmac_status));
- IWM_ERR(iwm, "\tSDIO status: 0x%x\n", le32_to_cpu(fw_err->sdio_status));
-
- iwm_resetting(iwm);
-
- return 0;
-}
-
-static int iwm_ntf_umac_alive(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_alive *alive_resp =
- (struct iwm_umac_notif_alive *)(buf);
- u16 status = le16_to_cpu(alive_resp->status);
-
- if (status == UMAC_NTFY_ALIVE_STATUS_ERR) {
- IWM_ERR(iwm, "Receive error UMAC_ALIVE\n");
- return -EIO;
- }
-
- iwm_tx_credit_init_pools(iwm, alive_resp);
-
- return 0;
-}
-
-static int iwm_ntf_init_complete(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_umac_notif_init_complete *init_complete =
- (struct iwm_umac_notif_init_complete *)(buf);
- u16 status = le16_to_cpu(init_complete->status);
- bool blocked = (status == UMAC_NTFY_INIT_COMPLETE_STATUS_ERR);
-
- if (blocked)
- IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is on (radio off)\n");
- else
- IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is off (radio on)\n");
-
- wiphy_rfkill_set_hw_state(wiphy, blocked);
-
- return 0;
-}
-
-static int iwm_ntf_tx_credit_update(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- int pool_nr, total_freed_pages;
- unsigned long pool_map;
- int i, id;
- struct iwm_umac_notif_page_dealloc *dealloc =
- (struct iwm_umac_notif_page_dealloc *)buf;
-
- pool_nr = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_CNT);
- pool_map = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_MSK);
-
- IWM_DBG_TX(iwm, DBG, "UMAC dealloc notification: pool nr %d, "
- "update map 0x%lx\n", pool_nr, pool_map);
-
- spin_lock(&iwm->tx_credit.lock);
-
- for (i = 0; i < pool_nr; i++) {
- id = GET_VAL32(dealloc->grp_info[i],
- UMAC_DEALLOC_NTFY_GROUP_NUM);
- if (test_bit(id, &pool_map)) {
- total_freed_pages = GET_VAL32(dealloc->grp_info[i],
- UMAC_DEALLOC_NTFY_PAGE_CNT);
- iwm_tx_credit_inc(iwm, id, total_freed_pages);
- }
- }
-
- spin_unlock(&iwm->tx_credit.lock);
-
- return 0;
-}
-
-static int iwm_ntf_umac_reset(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- IWM_DBG_NTF(iwm, DBG, "UMAC RESET done\n");
-
- return 0;
-}
-
-static int iwm_ntf_lmac_version(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- IWM_DBG_NTF(iwm, INFO, "LMAC Version: %x.%x\n", buf[9], buf[8]);
-
- return 0;
-}
-
-static int iwm_ntf_tx(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_lmac_tx_resp *tx_resp;
- struct iwm_umac_wifi_in_hdr *hdr;
-
- tx_resp = (struct iwm_lmac_tx_resp *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
- hdr = (struct iwm_umac_wifi_in_hdr *)buf;
-
- IWM_DBG_TX(iwm, DBG, "REPLY_TX, buf size: %lu\n", buf_size);
-
- IWM_DBG_TX(iwm, DBG, "Seqnum: %d\n",
- le16_to_cpu(hdr->sw_hdr.cmd.seq_num));
- IWM_DBG_TX(iwm, DBG, "\tFrame cnt: %d\n", tx_resp->frame_cnt);
- IWM_DBG_TX(iwm, DBG, "\tRetry cnt: %d\n",
- le16_to_cpu(tx_resp->retry_cnt));
- IWM_DBG_TX(iwm, DBG, "\tSeq ctl: %d\n", le16_to_cpu(tx_resp->seq_ctl));
- IWM_DBG_TX(iwm, DBG, "\tByte cnt: %d\n",
- le16_to_cpu(tx_resp->byte_cnt));
- IWM_DBG_TX(iwm, DBG, "\tStatus: 0x%x\n", le32_to_cpu(tx_resp->status));
-
- return 0;
-}
-
-
-static int iwm_ntf_calib_res(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- u8 opcode;
- u8 *calib_buf;
- struct iwm_lmac_calib_hdr *hdr = (struct iwm_lmac_calib_hdr *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
-
- opcode = hdr->opcode;
-
- BUG_ON(opcode >= CALIBRATION_CMD_NUM ||
- opcode < PHY_CALIBRATE_OPCODES_NUM);
-
- IWM_DBG_NTF(iwm, DBG, "Store calibration result for opcode: %d\n",
- opcode);
-
- buf_size -= sizeof(struct iwm_umac_wifi_in_hdr);
- calib_buf = iwm->calib_res[opcode].buf;
-
- if (!calib_buf || (iwm->calib_res[opcode].size < buf_size)) {
- kfree(calib_buf);
- calib_buf = kzalloc(buf_size, GFP_KERNEL);
- if (!calib_buf) {
- IWM_ERR(iwm, "Memory allocation failed: calib_res\n");
- return -ENOMEM;
- }
- iwm->calib_res[opcode].buf = calib_buf;
- iwm->calib_res[opcode].size = buf_size;
- }
-
- memcpy(calib_buf, hdr, buf_size);
- set_bit(opcode - PHY_CALIBRATE_OPCODES_NUM, &iwm->calib_done_map);
-
- return 0;
-}
-
-static int iwm_ntf_calib_complete(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- IWM_DBG_NTF(iwm, DBG, "Calibration completed\n");
-
- return 0;
-}
-
-static int iwm_ntf_calib_cfg(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_lmac_cal_cfg_resp *cal_resp;
-
- cal_resp = (struct iwm_lmac_cal_cfg_resp *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
-
- IWM_DBG_NTF(iwm, DBG, "Calibration CFG command status: %d\n",
- le32_to_cpu(cal_resp->status));
-
- return 0;
-}
-
-static int iwm_ntf_wifi_status(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_wifi_status *status =
- (struct iwm_umac_notif_wifi_status *)buf;
-
- iwm->core_enabled |= le16_to_cpu(status->status);
-
- return 0;
-}
-
-static struct iwm_rx_ticket_node *
-iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket)
-{
- struct iwm_rx_ticket_node *ticket_node;
-
- ticket_node = kzalloc(sizeof(struct iwm_rx_ticket_node), GFP_KERNEL);
- if (!ticket_node) {
- IWM_ERR(iwm, "Couldn't allocate ticket node\n");
- return ERR_PTR(-ENOMEM);
- }
-
- ticket_node->ticket = kmemdup(ticket, sizeof(struct iwm_rx_ticket),
- GFP_KERNEL);
- if (!ticket_node->ticket) {
- IWM_ERR(iwm, "Couldn't allocate RX ticket\n");
- kfree(ticket_node);
- return ERR_PTR(-ENOMEM);
- }
-
- INIT_LIST_HEAD(&ticket_node->node);
-
- return ticket_node;
-}
-
-static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
-{
- kfree(ticket_node->ticket);
- kfree(ticket_node);
-}
-
-static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
-{
- u8 id_hash = IWM_RX_ID_GET_HASH(id);
- struct iwm_rx_packet *packet;
-
- spin_lock(&iwm->packet_lock[id_hash]);
- list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
- if (packet->id == id) {
- list_del(&packet->node);
- spin_unlock(&iwm->packet_lock[id_hash]);
- return packet;
- }
-
- spin_unlock(&iwm->packet_lock[id_hash]);
- return NULL;
-}
-
-static struct iwm_rx_packet *iwm_rx_packet_alloc(struct iwm_priv *iwm, u8 *buf,
- u32 size, u16 id)
-{
- struct iwm_rx_packet *packet;
-
- packet = kzalloc(sizeof(struct iwm_rx_packet), GFP_KERNEL);
- if (!packet) {
- IWM_ERR(iwm, "Couldn't allocate packet\n");
- return ERR_PTR(-ENOMEM);
- }
-
- packet->skb = dev_alloc_skb(size);
- if (!packet->skb) {
- IWM_ERR(iwm, "Couldn't allocate packet SKB\n");
- kfree(packet);
- return ERR_PTR(-ENOMEM);
- }
-
- packet->pkt_size = size;
-
- skb_put(packet->skb, size);
- memcpy(packet->skb->data, buf, size);
- INIT_LIST_HEAD(&packet->node);
- packet->id = id;
-
- return packet;
-}
-
-void iwm_rx_free(struct iwm_priv *iwm)
-{
- struct iwm_rx_ticket_node *ticket, *nt;
- struct iwm_rx_packet *packet, *np;
- int i;
-
- spin_lock(&iwm->ticket_lock);
- list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
- list_del(&ticket->node);
- iwm_rx_ticket_node_free(ticket);
- }
- spin_unlock(&iwm->ticket_lock);
-
- for (i = 0; i < IWM_RX_ID_HASH; i++) {
- spin_lock(&iwm->packet_lock[i]);
- list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
- node) {
- list_del(&packet->node);
- kfree_skb(packet->skb);
- kfree(packet);
- }
- spin_unlock(&iwm->packet_lock[i]);
- }
-}
-
-static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_rx_ticket *ntf_rx_ticket =
- (struct iwm_umac_notif_rx_ticket *)buf;
- struct iwm_rx_ticket *ticket =
- (struct iwm_rx_ticket *)ntf_rx_ticket->tickets;
- int i, schedule_rx = 0;
-
- for (i = 0; i < ntf_rx_ticket->num_tickets; i++) {
- struct iwm_rx_ticket_node *ticket_node;
-
- switch (le16_to_cpu(ticket->action)) {
- case IWM_RX_TICKET_RELEASE:
- case IWM_RX_TICKET_DROP:
- /* We can push the packet to the stack */
- ticket_node = iwm_rx_ticket_node_alloc(iwm, ticket);
- if (IS_ERR(ticket_node))
- return PTR_ERR(ticket_node);
-
- IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
- __le16_to_cpu(ticket->action) ==
- IWM_RX_TICKET_RELEASE ?
- "RELEASE" : "DROP",
- ticket->id);
- spin_lock(&iwm->ticket_lock);
- list_add_tail(&ticket_node->node, &iwm->rx_tickets);
- spin_unlock(&iwm->ticket_lock);
-
- /*
- * We received an Rx ticket, most likely there's
- * a packet pending for it, it's not worth going
- * through the packet hash list to double check.
- * Let's just fire the rx worker..
- */
- schedule_rx = 1;
-
- break;
-
- default:
- IWM_ERR(iwm, "Invalid RX ticket action: 0x%x\n",
- ticket->action);
- }
-
- ticket++;
- }
-
- if (schedule_rx)
- queue_work(iwm->rx_wq, &iwm->rx_worker);
-
- return 0;
-}
-
-static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_wifi_in_hdr *wifi_hdr;
- struct iwm_rx_packet *packet;
- u16 id, buf_offset;
- u32 packet_size;
- u8 id_hash;
-
- IWM_DBG_RX(iwm, DBG, "\n");
-
- wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
- id = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
- buf_offset = sizeof(struct iwm_umac_wifi_in_hdr);
- packet_size = buf_size - sizeof(struct iwm_umac_wifi_in_hdr);
-
- IWM_DBG_RX(iwm, DBG, "CMD:0x%x, seqnum: %d, packet size: %d\n",
- wifi_hdr->sw_hdr.cmd.cmd, id, packet_size);
- IWM_DBG_RX(iwm, DBG, "Packet id: %d\n", id);
- IWM_HEXDUMP(iwm, DBG, RX, "PACKET: ", buf + buf_offset, packet_size);
-
- packet = iwm_rx_packet_alloc(iwm, buf + buf_offset, packet_size, id);
- if (IS_ERR(packet))
- return PTR_ERR(packet);
-
- id_hash = IWM_RX_ID_GET_HASH(id);
- spin_lock(&iwm->packet_lock[id_hash]);
- list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
- spin_unlock(&iwm->packet_lock[id_hash]);
-
- /* We might (unlikely) have received the packet _after_ the ticket */
- queue_work(iwm->rx_wq, &iwm->rx_worker);
-
- return 0;
-}
-
-/* MLME handlers */
-static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_assoc_start *start;
-
- start = (struct iwm_umac_notif_assoc_start *)buf;
-
- IWM_DBG_MLME(iwm, INFO, "Association with %pM Started, reason: %d\n",
- start->bssid, le32_to_cpu(start->roam_reason));
-
- wake_up_interruptible(&iwm->mlme_queue);
-
- return 0;
-}
-
-static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
-{
- if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
- iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
- (iwm->umac_profile->sec.ucast_cipher ==
- iwm->umac_profile->sec.mcast_cipher) &&
- (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
- return 1;
-
- return 0;
-}
-
-static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct ieee80211_channel *chan;
- struct iwm_umac_notif_assoc_complete *complete =
- (struct iwm_umac_notif_assoc_complete *)buf;
-
- IWM_DBG_MLME(iwm, INFO, "Association with %pM completed, status: %d\n",
- complete->bssid, complete->status);
-
- switch (le32_to_cpu(complete->status)) {
- case UMAC_ASSOC_COMPLETE_SUCCESS:
- chan = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(complete->channel,
- complete->band == UMAC_BAND_2GHZ ?
- IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ));
- if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
- /* Associated to a unallowed channel, disassociate. */
- __iwm_invalidate_mlme_profile(iwm);
- IWM_WARN(iwm, "Couldn't associate with %pM due to "
- "channel %d is disabled. Check your local "
- "regulatory setting.\n",
- complete->bssid, complete->channel);
- goto failure;
- }
-
- set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
- iwm->channel = complete->channel;
-
- /* Internal roaming state, avoid notifying SME. */
- if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
- && iwm->conf.mode == UMAC_MODE_BSS) {
- cancel_delayed_work(&iwm->disconnect);
- cfg80211_roamed(iwm_to_ndev(iwm), NULL,
- complete->bssid,
- iwm->req_ie, iwm->req_ie_len,
- iwm->resp_ie, iwm->resp_ie_len,
- GFP_KERNEL);
- break;
- }
-
- iwm_link_on(iwm);
-
- if (iwm->conf.mode == UMAC_MODE_IBSS)
- goto ibss;
-
- if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
- cfg80211_connect_result(iwm_to_ndev(iwm),
- complete->bssid,
- iwm->req_ie, iwm->req_ie_len,
- iwm->resp_ie, iwm->resp_ie_len,
- WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
- else
- cfg80211_roamed(iwm_to_ndev(iwm), NULL,
- complete->bssid,
- iwm->req_ie, iwm->req_ie_len,
- iwm->resp_ie, iwm->resp_ie_len,
- GFP_KERNEL);
- break;
- case UMAC_ASSOC_COMPLETE_FAILURE:
- failure:
- clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- memset(iwm->bssid, 0, ETH_ALEN);
- iwm->channel = 0;
-
- /* Internal roaming state, avoid notifying SME. */
- if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
- && iwm->conf.mode == UMAC_MODE_BSS) {
- cancel_delayed_work(&iwm->disconnect);
- break;
- }
-
- iwm_link_off(iwm);
-
- if (iwm->conf.mode == UMAC_MODE_IBSS)
- goto ibss;
-
- if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
- if (!iwm_is_open_wep_profile(iwm)) {
- cfg80211_connect_result(iwm_to_ndev(iwm),
- complete->bssid,
- NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- } else {
- /* Let's try shared WEP auth */
- IWM_ERR(iwm, "Trying WEP shared auth\n");
- schedule_work(&iwm->auth_retry_worker);
- }
- else
- cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
- GFP_KERNEL);
- break;
- default:
- break;
- }
-
- clear_bit(IWM_STATUS_RESETTING, &iwm->status);
- return 0;
-
- ibss:
- cfg80211_ibss_joined(iwm_to_ndev(iwm), iwm->bssid, GFP_KERNEL);
- clear_bit(IWM_STATUS_RESETTING, &iwm->status);
- return 0;
-}
-
-static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_profile_invalidate *invalid;
- u32 reason;
-
- invalid = (struct iwm_umac_notif_profile_invalidate *)buf;
- reason = le32_to_cpu(invalid->reason);
-
- IWM_DBG_MLME(iwm, INFO, "Profile Invalidated. Reason: %d\n", reason);
-
- if (reason != UMAC_PROFILE_INVALID_REQUEST &&
- test_bit(IWM_STATUS_SME_CONNECTING, &iwm->status))
- cfg80211_connect_result(iwm_to_ndev(iwm), NULL, NULL, 0, NULL,
- 0, WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
-
- clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
- clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
-
- iwm->umac_profile_active = false;
- memset(iwm->bssid, 0, ETH_ALEN);
- iwm->channel = 0;
-
- iwm_link_off(iwm);
-
- wake_up_interruptible(&iwm->mlme_queue);
-
- return 0;
-}
-
-#define IWM_DISCONNECT_INTERVAL (5 * HZ)
-
-static int iwm_mlme_connection_terminated(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- IWM_DBG_MLME(iwm, DBG, "Connection terminated\n");
-
- schedule_delayed_work(&iwm->disconnect, IWM_DISCONNECT_INTERVAL);
-
- return 0;
-}
-
-static int iwm_mlme_scan_complete(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- int ret;
- struct iwm_umac_notif_scan_complete *scan_complete =
- (struct iwm_umac_notif_scan_complete *)buf;
- u32 result = le32_to_cpu(scan_complete->result);
-
- IWM_DBG_MLME(iwm, INFO, "type:0x%x result:0x%x seq:%d\n",
- le32_to_cpu(scan_complete->type),
- le32_to_cpu(scan_complete->result),
- scan_complete->seq_num);
-
- if (!test_and_clear_bit(IWM_STATUS_SCANNING, &iwm->status)) {
- IWM_ERR(iwm, "Scan complete while device not scanning\n");
- return -EIO;
- }
- if (!iwm->scan_request)
- return 0;
-
- ret = iwm_cfg80211_inform_bss(iwm);
-
- cfg80211_scan_done(iwm->scan_request,
- (result & UMAC_SCAN_RESULT_ABORTED) ? 1 : !!ret);
- iwm->scan_request = NULL;
-
- return ret;
-}
-
-static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_sta_info *umac_sta =
- (struct iwm_umac_notif_sta_info *)buf;
- struct iwm_sta_info *sta;
- int i;
-
- switch (le32_to_cpu(umac_sta->opcode)) {
- case UMAC_OPCODE_ADD_MODIFY:
- sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
-
- IWM_DBG_MLME(iwm, INFO, "%s STA: ID = %d, Color = %d, "
- "addr = %pM, qos = %d\n",
- sta->valid ? "Modify" : "Add",
- GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
- GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
- umac_sta->mac_addr,
- umac_sta->flags & UMAC_STA_FLAG_QOS);
-
- sta->valid = true;
- sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
- sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
- memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
- break;
- case UMAC_OPCODE_REMOVE:
- IWM_DBG_MLME(iwm, INFO, "Remove STA: ID = %d, Color = %d, "
- "addr = %pM\n",
- GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
- GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
- umac_sta->mac_addr);
-
- sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
-
- if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
- sta->valid = false;
-
- break;
- case UMAC_OPCODE_CLEAR_ALL:
- for (i = 0; i < IWM_STA_TABLE_NUM; i++)
- iwm->sta_table[i].valid = false;
-
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
-
- IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
-
- wiphy_rfkill_set_hw_state(wiphy, true);
-
- return 0;
-}
-
-static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct ieee80211_mgmt *mgmt;
- struct iwm_umac_notif_bss_info *umac_bss =
- (struct iwm_umac_notif_bss_info *)buf;
- struct ieee80211_channel *channel;
- struct ieee80211_supported_band *band;
- struct iwm_bss_info *bss;
- s32 signal;
- int freq;
- u16 frame_len = le16_to_cpu(umac_bss->frame_len);
- size_t bss_len = sizeof(struct iwm_umac_notif_bss_info) + frame_len;
-
- mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
-
- IWM_DBG_MLME(iwm, DBG, "New BSS info entry: %pM\n", mgmt->bssid);
- IWM_DBG_MLME(iwm, DBG, "\tType: 0x%x\n", le32_to_cpu(umac_bss->type));
- IWM_DBG_MLME(iwm, DBG, "\tTimestamp: %d\n",
- le32_to_cpu(umac_bss->timestamp));
- IWM_DBG_MLME(iwm, DBG, "\tTable Index: %d\n",
- le16_to_cpu(umac_bss->table_idx));
- IWM_DBG_MLME(iwm, DBG, "\tBand: %d\n", umac_bss->band);
- IWM_DBG_MLME(iwm, DBG, "\tChannel: %d\n", umac_bss->channel);
- IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
- IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
-
- list_for_each_entry(bss, &iwm->bss_list, node)
- if (bss->bss->table_idx == umac_bss->table_idx)
- break;
-
- if (&bss->node != &iwm->bss_list) {
- /* Remove the old BSS entry, we will add it back later. */
- list_del(&bss->node);
- kfree(bss->bss);
- } else {
- /* New BSS entry */
-
- bss = kzalloc(sizeof(struct iwm_bss_info), GFP_KERNEL);
- if (!bss) {
- IWM_ERR(iwm, "Couldn't allocate bss_info\n");
- return -ENOMEM;
- }
- }
-
- bss->bss = kzalloc(bss_len, GFP_KERNEL);
- if (!bss->bss) {
- kfree(bss);
- IWM_ERR(iwm, "Couldn't allocate bss\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&bss->node);
- memcpy(bss->bss, umac_bss, bss_len);
-
- if (umac_bss->band == UMAC_BAND_2GHZ)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- else if (umac_bss->band == UMAC_BAND_5GHZ)
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- else {
- IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
- goto err;
- }
-
- freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
- channel = ieee80211_get_channel(wiphy, freq);
- signal = umac_bss->rssi * 100;
-
- bss->cfg_bss = cfg80211_inform_bss_frame(wiphy, channel,
- mgmt, frame_len,
- signal, GFP_KERNEL);
- if (!bss->cfg_bss)
- goto err;
-
- list_add_tail(&bss->node, &iwm->bss_list);
-
- return 0;
- err:
- kfree(bss->bss);
- kfree(bss);
-
- return -EINVAL;
-}
-
-static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_bss_removed *bss_rm =
- (struct iwm_umac_notif_bss_removed *)buf;
- struct iwm_bss_info *bss, *next;
- u16 table_idx;
- int i;
-
- for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
- table_idx = le16_to_cpu(bss_rm->entries[i]) &
- IWM_BSS_REMOVE_INDEX_MSK;
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
- if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
- struct ieee80211_mgmt *mgmt;
-
- mgmt = (struct ieee80211_mgmt *)
- (bss->bss->frame_buf);
- IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
- mgmt->bssid);
- list_del(&bss->node);
- kfree(bss->bss);
- kfree(bss);
- }
- }
-
- return 0;
-}
-
-static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_mgt_frame *mgt_frame =
- (struct iwm_umac_notif_mgt_frame *)buf;
- struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
-
- IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
- le16_to_cpu(mgt_frame->len));
-
- if (ieee80211_is_assoc_req(mgt->frame_control)) {
- iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
- - offsetof(struct ieee80211_mgmt,
- u.assoc_req.variable);
- kfree(iwm->req_ie);
- iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
- iwm->req_ie_len, GFP_KERNEL);
- } else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
- iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
- - offsetof(struct ieee80211_mgmt,
- u.reassoc_req.variable);
- kfree(iwm->req_ie);
- iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
- iwm->req_ie_len, GFP_KERNEL);
- } else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
- iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
- - offsetof(struct ieee80211_mgmt,
- u.assoc_resp.variable);
- kfree(iwm->resp_ie);
- iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
- iwm->resp_ie_len, GFP_KERNEL);
- } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
- iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
- - offsetof(struct ieee80211_mgmt,
- u.reassoc_resp.variable);
- kfree(iwm->resp_ie);
- iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
- iwm->resp_ie_len, GFP_KERNEL);
- } else {
- IWM_ERR(iwm, "Unsupported management frame: 0x%x",
- le16_to_cpu(mgt->frame_control));
- return 0;
- }
-
- return 0;
-}
-
-static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_wifi_if *notif =
- (struct iwm_umac_notif_wifi_if *)buf;
-
- switch (notif->status) {
- case WIFI_IF_NTFY_ASSOC_START:
- return iwm_mlme_assoc_start(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_ASSOC_COMPLETE:
- return iwm_mlme_assoc_complete(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE:
- return iwm_mlme_profile_invalidate(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_CONNECTION_TERMINATED:
- return iwm_mlme_connection_terminated(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_SCAN_COMPLETE:
- return iwm_mlme_scan_complete(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_STA_TABLE_CHANGE:
- return iwm_mlme_update_sta_table(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
- IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
- break;
- case WIFI_IF_NTFY_RADIO_PREEMPTION:
- return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
- return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
- return iwm_mlme_remove_bss(iwm, buf, buf_size, cmd);
- break;
- case WIFI_IF_NTFY_MGMT_FRAME:
- return iwm_mlme_mgt_frame(iwm, buf, buf_size, cmd);
- case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START:
- case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE:
- case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START:
- case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT:
- case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START:
- case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE:
- case WIFI_DBG_IF_NTFY_CNCT_ATC_START:
- case WIFI_DBG_IF_NTFY_COEX_NOTIFICATION:
- case WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP:
- case WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP:
- IWM_DBG_MLME(iwm, DBG, "MLME debug notification: 0x%x\n",
- notif->status);
- break;
- default:
- IWM_ERR(iwm, "Unhandled notification: 0x%x\n", notif->status);
- break;
- }
-
- return 0;
-}
-
-#define IWM_STATS_UPDATE_INTERVAL (2 * HZ)
-
-static int iwm_ntf_statistics(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_stats *stats = (struct iwm_umac_notif_stats *)buf;
- struct iw_statistics *wstats = &iwm->wstats;
- u16 max_rate = 0;
- int i;
-
- IWM_DBG_MLME(iwm, DBG, "Statistics notification received\n");
-
- if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
- for (i = 0; i < UMAC_NTF_RATE_SAMPLE_NR; i++) {
- max_rate = max_t(u16, max_rate,
- max(le16_to_cpu(stats->tx_rate[i]),
- le16_to_cpu(stats->rx_rate[i])));
- }
- /* UMAC passes rate info multiplies by 2 */
- iwm->rate = max_rate >> 1;
- }
- iwm->txpower = le32_to_cpu(stats->tx_power);
-
- wstats->status = 0;
-
- wstats->discard.nwid = le32_to_cpu(stats->rx_drop_other_bssid);
- wstats->discard.code = le32_to_cpu(stats->rx_drop_decode);
- wstats->discard.fragment = le32_to_cpu(stats->rx_drop_reassembly);
- wstats->discard.retries = le32_to_cpu(stats->tx_drop_max_retry);
-
- wstats->miss.beacon = le32_to_cpu(stats->missed_beacons);
-
- /* according to cfg80211 */
- if (stats->rssi_dbm < -110)
- wstats->qual.qual = 0;
- else if (stats->rssi_dbm > -40)
- wstats->qual.qual = 70;
- else
- wstats->qual.qual = stats->rssi_dbm + 110;
-
- wstats->qual.level = stats->rssi_dbm;
- wstats->qual.noise = stats->noise_dbm;
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-
- schedule_delayed_work(&iwm->stats_request, IWM_STATS_UPDATE_INTERVAL);
-
- mod_timer(&iwm->watchdog, round_jiffies(jiffies + IWM_WATCHDOG_PERIOD));
-
- return 0;
-}
-
-static int iwm_ntf_eeprom_proxy(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_cmd_eeprom_proxy *eeprom_proxy =
- (struct iwm_umac_cmd_eeprom_proxy *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
- struct iwm_umac_cmd_eeprom_proxy_hdr *hdr = &eeprom_proxy->hdr;
- u32 hdr_offset = le32_to_cpu(hdr->offset);
- u32 hdr_len = le32_to_cpu(hdr->len);
- u32 hdr_type = le32_to_cpu(hdr->type);
-
- IWM_DBG_NTF(iwm, DBG, "type: 0x%x, len: %d, offset: 0x%x\n",
- hdr_type, hdr_len, hdr_offset);
-
- if ((hdr_offset + hdr_len) > IWM_EEPROM_LEN)
- return -EINVAL;
-
- switch (hdr_type) {
- case IWM_UMAC_CMD_EEPROM_TYPE_READ:
- memcpy(iwm->eeprom + hdr_offset, eeprom_proxy->buf, hdr_len);
- break;
- case IWM_UMAC_CMD_EEPROM_TYPE_WRITE:
- default:
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static int iwm_ntf_channel_info_list(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_cmd_get_channel_list *ch_list =
- (struct iwm_umac_cmd_get_channel_list *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct ieee80211_supported_band *band;
- int i;
-
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
-
- for (i = 0; i < band->n_channels; i++) {
- unsigned long ch_mask_0 =
- le32_to_cpu(ch_list->ch[0].channels_mask);
- unsigned long ch_mask_2 =
- le32_to_cpu(ch_list->ch[2].channels_mask);
-
- if (!test_bit(i, &ch_mask_0))
- band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
-
- if (!test_bit(i, &ch_mask_2))
- band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
- }
-
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
-
- for (i = 0; i < min(band->n_channels, 32); i++) {
- unsigned long ch_mask_1 =
- le32_to_cpu(ch_list->ch[1].channels_mask);
- unsigned long ch_mask_3 =
- le32_to_cpu(ch_list->ch[3].channels_mask);
-
- if (!test_bit(i, &ch_mask_1))
- band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
-
- if (!test_bit(i, &ch_mask_3))
- band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
- }
-
- return 0;
-}
-
-static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_stop_resume_tx *stp_res_tx =
- (struct iwm_umac_notif_stop_resume_tx *)buf;
- struct iwm_sta_info *sta_info;
- struct iwm_tid_info *tid_info;
- u8 sta_id = STA_ID_N_COLOR_ID(stp_res_tx->sta_id);
- u16 tid_msk = le16_to_cpu(stp_res_tx->stop_resume_tid_msk);
- int bit, ret = 0;
- bool stop = false;
-
- IWM_DBG_NTF(iwm, DBG, "stop/resume notification:\n"
- "\tflags: 0x%x\n"
- "\tSTA id: %d\n"
- "\tTID bitmask: 0x%x\n",
- stp_res_tx->flags, stp_res_tx->sta_id,
- stp_res_tx->stop_resume_tid_msk);
-
- if (stp_res_tx->flags & UMAC_STOP_TX_FLAG)
- stop = true;
-
- sta_info = &iwm->sta_table[sta_id];
- if (!sta_info->valid) {
- IWM_ERR(iwm, "Stoping an invalid STA: %d %d\n",
- sta_id, stp_res_tx->sta_id);
- return -EINVAL;
- }
-
- for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
- tid_info = &sta_info->tid_info[bit];
-
- mutex_lock(&tid_info->mutex);
- tid_info->stopped = stop;
- mutex_unlock(&tid_info->mutex);
-
- if (!stop) {
- struct iwm_tx_queue *txq;
- int queue = iwm_tid_to_queue(bit);
-
- if (queue < 0)
- continue;
-
- txq = &iwm->txq[queue];
- /*
- * If we resume, we have to move our SKBs
- * back to the tx queue and queue some work.
- */
- spin_lock_bh(&txq->lock);
- skb_queue_splice_init(&txq->queue, &txq->stopped_queue);
- spin_unlock_bh(&txq->lock);
-
- queue_work(txq->wq, &txq->worker);
- }
-
- }
-
- /* We send an ACK only for the stop case */
- if (stop)
- ret = iwm_send_umac_stop_resume_tx(iwm, stp_res_tx);
-
- return ret;
-}
-
-static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_wifi_if *hdr;
-
- if (cmd == NULL) {
- IWM_ERR(iwm, "Couldn't find expected wifi command\n");
- return -EINVAL;
- }
-
- hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
-
- IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
- "oid is 0x%x\n", hdr->oid);
-
- set_bit(hdr->oid, &iwm->wifi_ntfy[0]);
- wake_up_interruptible(&iwm->wifi_ntfy_queue);
-
- switch (hdr->oid) {
- case UMAC_WIFI_IF_CMD_SET_PROFILE:
- iwm->umac_profile_active = true;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-#define CT_KILL_DELAY (30 * HZ)
-static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_lmac_card_state *state = (struct iwm_lmac_card_state *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
- u32 flags = le32_to_cpu(state->flags);
-
- IWM_INFO(iwm, "HW RF Kill %s, CT Kill %s\n",
- flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
- flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
-
- if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
- /*
- * We got a CTKILL event: We bring the interface down in
- * oder to cool the device down, and try to bring it up
- * 30 seconds later. If it's still too hot, we'll go through
- * this code path again.
- */
- cancel_delayed_work_sync(&iwm->ct_kill_delay);
- schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
- }
-
- wiphy_rfkill_set_hw_state(wiphy, flags &
- (IWM_CARD_STATE_HW_DISABLED |
- IWM_CARD_STATE_CTKILL_DISABLED));
-
- return 0;
-}
-
-static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size)
-{
- struct iwm_umac_wifi_in_hdr *wifi_hdr;
- struct iwm_wifi_cmd *cmd;
- u8 source, cmd_id;
- u16 seq_num;
- u32 count;
-
- wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
- cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
- source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
- if (source >= IWM_SRC_NUM) {
- IWM_CRIT(iwm, "invalid source %d\n", source);
- return -EINVAL;
- }
-
- if (cmd_id == REPLY_RX_MPDU_CMD)
- trace_iwm_rx_packet(iwm, buf, buf_size);
- else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
- (source == UMAC_HDI_IN_SOURCE_FW))
- trace_iwm_rx_ticket(iwm, buf, buf_size);
- else
- trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
-
- count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
- count += sizeof(struct iwm_umac_wifi_in_hdr) -
- sizeof(struct iwm_dev_cmd_hdr);
- if (count > buf_size) {
- IWM_CRIT(iwm, "count %d, buf size:%ld\n", count, buf_size);
- return -EINVAL;
- }
-
- seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
-
- IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
- cmd_id, source, seq_num);
-
- /*
- * If this is a response to a previously sent command, there must
- * be a pending command for this sequence number.
- */
- cmd = iwm_get_pending_wifi_cmd(iwm, seq_num);
-
- /* Notify the caller only for sync commands. */
- switch (source) {
- case UMAC_HDI_IN_SOURCE_FHRX:
- if (iwm->lmac_handlers[cmd_id] &&
- test_bit(cmd_id, &iwm->lmac_handler_map[0]))
- return iwm_notif_send(iwm, cmd, cmd_id, source,
- buf, count);
- break;
- case UMAC_HDI_IN_SOURCE_FW:
- if (iwm->umac_handlers[cmd_id] &&
- test_bit(cmd_id, &iwm->umac_handler_map[0]))
- return iwm_notif_send(iwm, cmd, cmd_id, source,
- buf, count);
- break;
- case UMAC_HDI_IN_SOURCE_UDMA:
- break;
- }
-
- return iwm_rx_handle_resp(iwm, buf, count, cmd);
-}
-
-int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- u8 source, cmd_id;
- struct iwm_umac_wifi_in_hdr *wifi_hdr;
- int ret = 0;
-
- wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
- cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
-
- source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
-
- IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x\n", cmd_id, source);
-
- switch (source) {
- case UMAC_HDI_IN_SOURCE_FHRX:
- if (iwm->lmac_handlers[cmd_id])
- ret = iwm->lmac_handlers[cmd_id]
- (iwm, buf, buf_size, cmd);
- break;
- case UMAC_HDI_IN_SOURCE_FW:
- if (iwm->umac_handlers[cmd_id])
- ret = iwm->umac_handlers[cmd_id]
- (iwm, buf, buf_size, cmd);
- break;
- case UMAC_HDI_IN_SOURCE_UDMA:
- ret = -EINVAL;
- break;
- }
-
- kfree(cmd);
-
- return ret;
-}
-
-static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size)
-{
- u8 seq_num;
- struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
- struct iwm_nonwifi_cmd *cmd;
-
- trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
- seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
-
- /*
- * We received a non wifi answer.
- * Let's check if there's a pending command for it, and if so
- * replace the command payload with the buffer, and then wake the
- * callers up.
- * That means we only support synchronised non wifi command response
- * schemes.
- */
- list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
- if (cmd->seq_num == seq_num) {
- cmd->resp_received = true;
- cmd->buf.len = buf_size;
- memcpy(cmd->buf.hdr, buf, buf_size);
- wake_up_interruptible(&iwm->nonwifi_queue);
- }
-
- return 0;
-}
-
-static int iwm_rx_handle_umac(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size)
-{
- int ret = 0;
- u8 op_code;
- unsigned long buf_offset = 0;
- struct iwm_udma_in_hdr *hdr;
-
- /*
- * To allow for a more efficient bus usage, UMAC
- * messages are encapsulated into UDMA ones. This
- * way we can have several UMAC messages in one bus
- * transfer.
- * A UDMA frame size is always aligned on 16 bytes,
- * and a UDMA frame must not start with a UMAC_PAD_TERMINAL
- * word. This is how we parse a bus frame into several
- * UDMA ones.
- */
- while (buf_offset < buf_size) {
-
- hdr = (struct iwm_udma_in_hdr *)(buf + buf_offset);
-
- if (iwm_rx_check_udma_hdr(hdr) < 0) {
- IWM_DBG_RX(iwm, DBG, "End of frame\n");
- break;
- }
-
- op_code = GET_VAL32(hdr->cmd, UMAC_HDI_IN_CMD_OPCODE);
-
- IWM_DBG_RX(iwm, DBG, "Op code: 0x%x\n", op_code);
-
- if (op_code == UMAC_HDI_IN_OPCODE_WIFI) {
- ret |= iwm_rx_handle_wifi(iwm, buf + buf_offset,
- buf_size - buf_offset);
- } else if (op_code < UMAC_HDI_IN_OPCODE_NONWIFI_MAX) {
- if (GET_VAL32(hdr->cmd,
- UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) !=
- UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) {
- IWM_ERR(iwm, "Incorrect hw signature\n");
- return -EINVAL;
- }
- ret |= iwm_rx_handle_nonwifi(iwm, buf + buf_offset,
- buf_size - buf_offset);
- } else {
- IWM_ERR(iwm, "Invalid RX opcode: 0x%x\n", op_code);
- ret |= -EINVAL;
- }
-
- buf_offset += iwm_rx_resp_size(hdr);
- }
-
- return ret;
-}
-
-int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
-{
- struct iwm_udma_in_hdr *hdr;
-
- hdr = (struct iwm_udma_in_hdr *)buf;
-
- switch (le32_to_cpu(hdr->cmd)) {
- case UMAC_REBOOT_BARKER:
- if (test_bit(IWM_STATUS_READY, &iwm->status)) {
- IWM_ERR(iwm, "Unexpected BARKER\n");
-
- schedule_work(&iwm->reset_worker);
-
- return 0;
- }
-
- return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
- IWM_SRC_UDMA, buf, buf_size);
- case UMAC_ACK_BARKER:
- return iwm_notif_send(iwm, NULL, IWM_ACK_BARKER_NOTIFICATION,
- IWM_SRC_UDMA, NULL, 0);
- default:
- IWM_DBG_RX(iwm, DBG, "Received cmd: 0x%x\n", hdr->cmd);
- return iwm_rx_handle_umac(iwm, buf, buf_size);
- }
-
- return 0;
-}
-
-static const iwm_handler iwm_umac_handlers[] =
-{
- [UMAC_NOTIFY_OPCODE_ERROR] = iwm_ntf_error,
- [UMAC_NOTIFY_OPCODE_ALIVE] = iwm_ntf_umac_alive,
- [UMAC_NOTIFY_OPCODE_INIT_COMPLETE] = iwm_ntf_init_complete,
- [UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS] = iwm_ntf_wifi_status,
- [UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_mlme,
- [UMAC_NOTIFY_OPCODE_PAGE_DEALLOC] = iwm_ntf_tx_credit_update,
- [UMAC_NOTIFY_OPCODE_RX_TICKET] = iwm_ntf_rx_ticket,
- [UMAC_CMD_OPCODE_RESET] = iwm_ntf_umac_reset,
- [UMAC_NOTIFY_OPCODE_STATS] = iwm_ntf_statistics,
- [UMAC_CMD_OPCODE_EEPROM_PROXY] = iwm_ntf_eeprom_proxy,
- [UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST] = iwm_ntf_channel_info_list,
- [UMAC_CMD_OPCODE_STOP_RESUME_STA_TX] = iwm_ntf_stop_resume_tx,
- [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
- [UMAC_CMD_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_wifi_if_wrapper,
-};
-
-static const iwm_handler iwm_lmac_handlers[] =
-{
- [REPLY_TX] = iwm_ntf_tx,
- [REPLY_ALIVE] = iwm_ntf_lmac_version,
- [CALIBRATION_RES_NOTIFICATION] = iwm_ntf_calib_res,
- [CALIBRATION_COMPLETE_NOTIFICATION] = iwm_ntf_calib_complete,
- [CALIBRATION_CFG_CMD] = iwm_ntf_calib_cfg,
- [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
- [CARD_STATE_NOTIFICATION] = iwm_ntf_card_state,
-};
-
-void iwm_rx_setup_handlers(struct iwm_priv *iwm)
-{
- iwm->umac_handlers = (iwm_handler *) iwm_umac_handlers;
- iwm->lmac_handlers = (iwm_handler *) iwm_lmac_handlers;
-}
-
-static void iwm_remove_iv(struct sk_buff *skb, u32 hdr_total_len)
-{
- struct ieee80211_hdr *hdr;
- unsigned int hdr_len;
-
- hdr = (struct ieee80211_hdr *)skb->data;
-
- if (!ieee80211_has_protected(hdr->frame_control))
- return;
-
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- if (hdr_total_len <= hdr_len)
- return;
-
- memmove(skb->data + (hdr_total_len - hdr_len), skb->data, hdr_len);
- skb_pull(skb, (hdr_total_len - hdr_len));
-}
-
-static void iwm_rx_adjust_packet(struct iwm_priv *iwm,
- struct iwm_rx_packet *packet,
- struct iwm_rx_ticket_node *ticket_node)
-{
- u32 payload_offset = 0, payload_len;
- struct iwm_rx_ticket *ticket = ticket_node->ticket;
- struct iwm_rx_mpdu_hdr *mpdu_hdr;
- struct ieee80211_hdr *hdr;
-
- mpdu_hdr = (struct iwm_rx_mpdu_hdr *)packet->skb->data;
- payload_offset += sizeof(struct iwm_rx_mpdu_hdr);
- /* Padding is 0 or 2 bytes */
- payload_len = le16_to_cpu(mpdu_hdr->len) +
- (le16_to_cpu(ticket->flags) & IWM_RX_TICKET_PAD_SIZE_MSK);
- payload_len -= ticket->tail_len;
-
- IWM_DBG_RX(iwm, DBG, "Packet adjusted, len:%d, offset:%d, "
- "ticket offset:%d ticket tail len:%d\n",
- payload_len, payload_offset, ticket->payload_offset,
- ticket->tail_len);
-
- IWM_HEXDUMP(iwm, DBG, RX, "RAW: ", packet->skb->data, packet->skb->len);
-
- skb_pull(packet->skb, payload_offset);
- skb_trim(packet->skb, payload_len);
-
- iwm_remove_iv(packet->skb, ticket->payload_offset);
-
- hdr = (struct ieee80211_hdr *) packet->skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- /* UMAC handed QOS_DATA frame with 2 padding bytes appended
- * to the qos_ctl field in IEEE 802.11 headers. */
- memmove(packet->skb->data + IEEE80211_QOS_CTL_LEN + 2,
- packet->skb->data,
- ieee80211_hdrlen(hdr->frame_control) -
- IEEE80211_QOS_CTL_LEN);
- hdr = (struct ieee80211_hdr *) skb_pull(packet->skb,
- IEEE80211_QOS_CTL_LEN + 2);
- hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
- }
-
- IWM_HEXDUMP(iwm, DBG, RX, "ADJUSTED: ",
- packet->skb->data, packet->skb->len);
-}
-
-static void classify8023(struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- /* frame has qos control */
- skb->priority = *qc & IEEE80211_QOS_CTL_TID_MASK;
- } else {
- skb->priority = 0;
- }
-}
-
-static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
-{
- struct wireless_dev *wdev = iwm_to_wdev(iwm);
- struct net_device *ndev = iwm_to_ndev(iwm);
- struct sk_buff_head list;
- struct sk_buff *frame;
-
- IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
-
- __skb_queue_head_init(&list);
- ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0,
- true);
-
- while ((frame = __skb_dequeue(&list))) {
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += frame->len;
-
- frame->protocol = eth_type_trans(frame, ndev);
- frame->ip_summed = CHECKSUM_NONE;
- memset(frame->cb, 0, sizeof(frame->cb));
-
- if (netif_rx_ni(frame) == NET_RX_DROP) {
- IWM_ERR(iwm, "Packet dropped\n");
- ndev->stats.rx_dropped++;
- }
- }
-}
-
-static void iwm_rx_process_packet(struct iwm_priv *iwm,
- struct iwm_rx_packet *packet,
- struct iwm_rx_ticket_node *ticket_node)
-{
- int ret;
- struct sk_buff *skb = packet->skb;
- struct wireless_dev *wdev = iwm_to_wdev(iwm);
- struct net_device *ndev = iwm_to_ndev(iwm);
-
- IWM_DBG_RX(iwm, DBG, "Processing packet ID %d\n", packet->id);
-
- switch (le16_to_cpu(ticket_node->ticket->action)) {
- case IWM_RX_TICKET_RELEASE:
- IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
-
- iwm_rx_adjust_packet(iwm, packet, ticket_node);
- skb->dev = iwm_to_ndev(iwm);
- classify8023(skb);
-
- if (le16_to_cpu(ticket_node->ticket->flags) &
- IWM_RX_TICKET_AMSDU_MSK) {
- iwm_rx_process_amsdu(iwm, skb);
- break;
- }
-
- ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
- if (ret < 0) {
- IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
- "%d\n", ret);
- kfree_skb(packet->skb);
- break;
- }
-
- IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
-
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
-
- skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
- memset(skb->cb, 0, sizeof(skb->cb));
-
- if (netif_rx_ni(skb) == NET_RX_DROP) {
- IWM_ERR(iwm, "Packet dropped\n");
- ndev->stats.rx_dropped++;
- }
- break;
- case IWM_RX_TICKET_DROP:
- IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
- le16_to_cpu(ticket_node->ticket->flags));
- kfree_skb(packet->skb);
- break;
- default:
- IWM_ERR(iwm, "Unknown ticket action: %d\n",
- le16_to_cpu(ticket_node->ticket->action));
- kfree_skb(packet->skb);
- }
-
- kfree(packet);
- iwm_rx_ticket_node_free(ticket_node);
-}
-
-/*
- * Rx data processing:
- *
- * We're receiving Rx packet from the LMAC, and Rx ticket from
- * the UMAC.
- * To forward a target data packet upstream (i.e. to the
- * kernel network stack), we must have received an Rx ticket
- * that tells us we're allowed to release this packet (ticket
- * action is IWM_RX_TICKET_RELEASE). The Rx ticket also indicates,
- * among other things, where valid data actually starts in the Rx
- * packet.
- */
-void iwm_rx_worker(struct work_struct *work)
-{
- struct iwm_priv *iwm;
- struct iwm_rx_ticket_node *ticket, *next;
-
- iwm = container_of(work, struct iwm_priv, rx_worker);
-
- /*
- * We go through the tickets list and if there is a pending
- * packet for it, we push it upstream.
- * We stop whenever a ticket is missing its packet, as we're
- * supposed to send the packets in order.
- */
- spin_lock(&iwm->ticket_lock);
- list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
- struct iwm_rx_packet *packet =
- iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
-
- if (!packet) {
- IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
- "to be handled first\n",
- le16_to_cpu(ticket->ticket->id));
- break;
- }
-
- list_del(&ticket->node);
- iwm_rx_process_packet(iwm, packet, ticket);
- }
- spin_unlock(&iwm->ticket_lock);
-}
-
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.h b/drivers/net/wireless/iwmc3200wifi/rx.h
deleted file mode 100644
index da0db91..0000000
--- a/drivers/net/wireless/iwmc3200wifi/rx.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_RX_H__
-#define __IWM_RX_H__
-
-#include <linux/skbuff.h>
-
-#include "umac.h"
-
-struct iwm_rx_ticket_node {
- struct list_head node;
- struct iwm_rx_ticket *ticket;
-};
-
-struct iwm_rx_packet {
- struct list_head node;
- u16 id;
- struct sk_buff *skb;
- unsigned long pkt_size;
-};
-
-void iwm_rx_worker(struct work_struct *work);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
deleted file mode 100644
index 0042f20..0000000
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-/*
- * This is the SDIO bus specific hooks for iwm.
- * It also is the module's entry point.
- *
- * Interesting code paths:
- * iwm_sdio_probe() (Called by an SDIO bus scan)
- * -> iwm_if_alloc() (netdev.c)
- * -> iwm_wdev_alloc() (cfg80211.c, allocates and register our wiphy)
- * -> wiphy_new()
- * -> wiphy_register()
- * -> alloc_netdev_mq()
- * -> register_netdev()
- *
- * iwm_sdio_remove()
- * -> iwm_if_free() (netdev.c)
- * -> unregister_netdev()
- * -> iwm_wdev_free() (cfg80211.c)
- * -> wiphy_unregister()
- * -> wiphy_free()
- *
- * iwm_sdio_isr() (called in process context from the SDIO core code)
- * -> queue_work(.., isr_worker)
- * -- [async] --> iwm_sdio_isr_worker()
- * -> iwm_rx_handle()
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/debugfs.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/sdio_func.h>
-
-#include "iwm.h"
-#include "debug.h"
-#include "bus.h"
-#include "sdio.h"
-
-static void iwm_sdio_isr_worker(struct work_struct *work)
-{
- struct iwm_sdio_priv *hw;
- struct iwm_priv *iwm;
- struct iwm_rx_info *rx_info;
- struct sk_buff *skb;
- u8 *rx_buf;
- unsigned long rx_size;
-
- hw = container_of(work, struct iwm_sdio_priv, isr_worker);
- iwm = hw_to_iwm(hw);
-
- while (!skb_queue_empty(&iwm->rx_list)) {
- skb = skb_dequeue(&iwm->rx_list);
- rx_info = skb_to_rx_info(skb);
- rx_size = rx_info->rx_size;
- rx_buf = skb->data;
-
- IWM_HEXDUMP(iwm, DBG, SDIO, "RX: ", rx_buf, rx_size);
- if (iwm_rx_handle(iwm, rx_buf, rx_size) < 0)
- IWM_WARN(iwm, "RX error\n");
-
- kfree_skb(skb);
- }
-}
-
-static void iwm_sdio_isr(struct sdio_func *func)
-{
- struct iwm_priv *iwm;
- struct iwm_sdio_priv *hw;
- struct iwm_rx_info *rx_info;
- struct sk_buff *skb;
- unsigned long buf_size, read_size;
- int ret;
- u8 val;
-
- hw = sdio_get_drvdata(func);
- iwm = hw_to_iwm(hw);
-
- buf_size = hw->blk_size;
-
- /* We're checking the status */
- val = sdio_readb(func, IWM_SDIO_INTR_STATUS_ADDR, &ret);
- if (val == 0 || ret < 0) {
- IWM_ERR(iwm, "Wrong INTR_STATUS\n");
- return;
- }
-
- /* See if we have free buffers */
- if (skb_queue_len(&iwm->rx_list) > IWM_RX_LIST_SIZE) {
- IWM_ERR(iwm, "No buffer for more Rx frames\n");
- return;
- }
-
- /* We first read the transaction size */
- read_size = sdio_readb(func, IWM_SDIO_INTR_GET_SIZE_ADDR + 1, &ret);
- read_size = read_size << 8;
-
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't read the xfer size\n");
- return;
- }
-
- /* We need to clear the INT register */
- sdio_writeb(func, 1, IWM_SDIO_INTR_CLEAR_ADDR, &ret);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't clear the INT register\n");
- return;
- }
-
- while (buf_size < read_size)
- buf_size <<= 1;
-
- skb = dev_alloc_skb(buf_size);
- if (!skb) {
- IWM_ERR(iwm, "Couldn't alloc RX skb\n");
- return;
- }
- rx_info = skb_to_rx_info(skb);
- rx_info->rx_size = read_size;
- rx_info->rx_buf_size = buf_size;
-
- /* Now we can read the actual buffer */
- ret = sdio_memcpy_fromio(func, skb_put(skb, read_size),
- IWM_SDIO_DATA_ADDR, read_size);
-
- /* The skb is put on a driver's specific Rx SKB list */
- skb_queue_tail(&iwm->rx_list, skb);
-
- /* We can now schedule the actual worker */
- queue_work(hw->isr_wq, &hw->isr_worker);
-}
-
-static void iwm_sdio_rx_free(struct iwm_sdio_priv *hw)
-{
- struct iwm_priv *iwm = hw_to_iwm(hw);
-
- flush_workqueue(hw->isr_wq);
-
- skb_queue_purge(&iwm->rx_list);
-}
-
-/* Bus ops */
-static int if_sdio_enable(struct iwm_priv *iwm)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
- int ret;
-
- sdio_claim_host(hw->func);
-
- ret = sdio_enable_func(hw->func);
- if (ret) {
- IWM_ERR(iwm, "Couldn't enable the device: is TOP driver "
- "loaded and functional?\n");
- goto release_host;
- }
-
- iwm_reset(iwm);
-
- ret = sdio_claim_irq(hw->func, iwm_sdio_isr);
- if (ret) {
- IWM_ERR(iwm, "Failed to claim irq: %d\n", ret);
- goto release_host;
- }
-
- sdio_writeb(hw->func, 1, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't enable INTR: %d\n", ret);
- goto release_irq;
- }
-
- sdio_release_host(hw->func);
-
- IWM_DBG_SDIO(iwm, INFO, "IWM SDIO enable\n");
-
- return 0;
-
- release_irq:
- sdio_release_irq(hw->func);
- release_host:
- sdio_release_host(hw->func);
-
- return ret;
-}
-
-static int if_sdio_disable(struct iwm_priv *iwm)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
- int ret;
-
- sdio_claim_host(hw->func);
- sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
- if (ret < 0)
- IWM_WARN(iwm, "Couldn't disable INTR: %d\n", ret);
-
- sdio_release_irq(hw->func);
- sdio_disable_func(hw->func);
- sdio_release_host(hw->func);
-
- iwm_sdio_rx_free(hw);
-
- iwm_reset(iwm);
-
- IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
-
- return 0;
-}
-
-static int if_sdio_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
- int aligned_count = ALIGN(count, hw->blk_size);
- int ret;
-
- if ((unsigned long)buf & 0x3) {
- IWM_ERR(iwm, "buf <%p> is not dword aligned\n", buf);
- /* TODO: Is this a hardware limitation? use get_unligned */
- return -EINVAL;
- }
-
- sdio_claim_host(hw->func);
- ret = sdio_memcpy_toio(hw->func, IWM_SDIO_DATA_ADDR, buf,
- aligned_count);
- sdio_release_host(hw->func);
-
- return ret;
-}
-
-static ssize_t iwm_debugfs_sdio_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct iwm_priv *iwm = filp->private_data;
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
- char *buf;
- u8 cccr;
- int buf_len = 4096, ret;
- size_t len = 0;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- sdio_claim_host(hw->func);
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IOEx, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_IOEx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_IOEx: 0x%x\n", cccr);
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IORx, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_IORx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_IORx: 0x%x\n", cccr);
-
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IENx, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_IENx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_IENx: 0x%x\n", cccr);
-
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_INTx, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_INTx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_INTx: 0x%x\n", cccr);
-
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_ABORT, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_ABORTx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_ABORT: 0x%x\n", cccr);
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IF, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_IF\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_IF: 0x%x\n", cccr);
-
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CAPS, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_CAPS\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_CAPS: 0x%x\n", cccr);
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CIS, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_CIS\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_CIS: 0x%x\n", cccr);
-
- ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
-err:
- sdio_release_host(hw->func);
-
- kfree(buf);
-
- return ret;
-}
-
-static const struct file_operations iwm_debugfs_sdio_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_sdio_read,
- .llseek = default_llseek,
-};
-
-static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
-
- hw->cccr_dentry = debugfs_create_file("cccr", 0200,
- parent_dir, iwm,
- &iwm_debugfs_sdio_fops);
-}
-
-static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
-
- debugfs_remove(hw->cccr_dentry);
-}
-
-static struct iwm_if_ops if_sdio_ops = {
- .enable = if_sdio_enable,
- .disable = if_sdio_disable,
- .send_chunk = if_sdio_send_chunk,
- .debugfs_init = if_sdio_debugfs_init,
- .debugfs_exit = if_sdio_debugfs_exit,
- .umac_name = "iwmc3200wifi-umac-sdio.bin",
- .calib_lmac_name = "iwmc3200wifi-calib-sdio.bin",
- .lmac_name = "iwmc3200wifi-lmac-sdio.bin",
-};
-MODULE_FIRMWARE("iwmc3200wifi-umac-sdio.bin");
-MODULE_FIRMWARE("iwmc3200wifi-calib-sdio.bin");
-MODULE_FIRMWARE("iwmc3200wifi-lmac-sdio.bin");
-
-static int iwm_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- struct iwm_priv *iwm;
- struct iwm_sdio_priv *hw;
- struct device *dev = &func->dev;
- int ret;
-
- /* check if TOP has already initialized the card */
- sdio_claim_host(func);
- ret = sdio_enable_func(func);
- if (ret) {
- dev_err(dev, "wait for TOP to enable the device\n");
- sdio_release_host(func);
- return ret;
- }
-
- ret = sdio_set_block_size(func, IWM_SDIO_BLK_SIZE);
-
- sdio_disable_func(func);
- sdio_release_host(func);
-
- if (ret < 0) {
- dev_err(dev, "Failed to set block size: %d\n", ret);
- return ret;
- }
-
- iwm = iwm_if_alloc(sizeof(struct iwm_sdio_priv), dev, &if_sdio_ops);
- if (IS_ERR(iwm)) {
- dev_err(dev, "allocate SDIO interface failed\n");
- return PTR_ERR(iwm);
- }
-
- hw = iwm_private(iwm);
- hw->iwm = iwm;
-
- iwm_debugfs_init(iwm);
-
- sdio_set_drvdata(func, hw);
-
- hw->func = func;
- hw->blk_size = IWM_SDIO_BLK_SIZE;
-
- hw->isr_wq = create_singlethread_workqueue(KBUILD_MODNAME "_sdio");
- if (!hw->isr_wq) {
- ret = -ENOMEM;
- goto debugfs_exit;
- }
-
- INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
-
- ret = iwm_if_add(iwm);
- if (ret) {
- dev_err(dev, "add SDIO interface failed\n");
- goto destroy_wq;
- }
-
- dev_info(dev, "IWM SDIO probe\n");
-
- return 0;
-
- destroy_wq:
- destroy_workqueue(hw->isr_wq);
- debugfs_exit:
- iwm_debugfs_exit(iwm);
- iwm_if_free(iwm);
- return ret;
-}
-
-static void iwm_sdio_remove(struct sdio_func *func)
-{
- struct iwm_sdio_priv *hw = sdio_get_drvdata(func);
- struct iwm_priv *iwm = hw_to_iwm(hw);
- struct device *dev = &func->dev;
-
- iwm_if_remove(iwm);
- destroy_workqueue(hw->isr_wq);
- iwm_debugfs_exit(iwm);
- iwm_if_free(iwm);
-
- sdio_set_drvdata(func, NULL);
-
- dev_info(dev, "IWM SDIO remove\n");
-}
-
-static const struct sdio_device_id iwm_sdio_ids[] = {
- /* Global/AGN SKU */
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
- /* BGN SKU */
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
- { /* end: all zeroes */ },
-};
-MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
-
-static struct sdio_driver iwm_sdio_driver = {
- .name = "iwm_sdio",
- .id_table = iwm_sdio_ids,
- .probe = iwm_sdio_probe,
- .remove = iwm_sdio_remove,
-};
-
-static int __init iwm_sdio_init_module(void)
-{
- return sdio_register_driver(&iwm_sdio_driver);
-}
-
-static void __exit iwm_sdio_exit_module(void)
-{
- sdio_unregister_driver(&iwm_sdio_driver);
-}
-
-module_init(iwm_sdio_init_module);
-module_exit(iwm_sdio_exit_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR(IWM_COPYRIGHT " " IWM_AUTHOR);
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.h b/drivers/net/wireless/iwmc3200wifi/sdio.h
deleted file mode 100644
index aab6b68..0000000
--- a/drivers/net/wireless/iwmc3200wifi/sdio.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_SDIO_H__
-#define __IWM_SDIO_H__
-
-#define IWM_SDIO_DATA_ADDR 0x0
-#define IWM_SDIO_INTR_ENABLE_ADDR 0x14
-#define IWM_SDIO_INTR_STATUS_ADDR 0x13
-#define IWM_SDIO_INTR_CLEAR_ADDR 0x13
-#define IWM_SDIO_INTR_GET_SIZE_ADDR 0x2C
-
-#define IWM_SDIO_BLK_SIZE 256
-
-#define iwm_to_if_sdio(i) (struct iwm_sdio_priv *)(iwm->private)
-
-struct iwm_sdio_priv {
- struct sdio_func *func;
- struct iwm_priv *iwm;
-
- struct workqueue_struct *isr_wq;
- struct work_struct isr_worker;
-
- struct dentry *cccr_dentry;
-
- unsigned int blk_size;
-};
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.c b/drivers/net/wireless/iwmc3200wifi/trace.c
deleted file mode 100644
index 904d36f..0000000
--- a/drivers/net/wireless/iwmc3200wifi/trace.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "iwm.h"
-#define CREATE_TRACE_POINTS
-#include "trace.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
deleted file mode 100644
index f5f7070..0000000
--- a/drivers/net/wireless/iwmc3200wifi/trace.h
+++ /dev/null
@@ -1,283 +0,0 @@
-#if !defined(__IWM_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
-#define __IWM_TRACE_H__
-
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_IWM_TRACING)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwm
-
-#define IWM_ENTRY __array(char, ndev_name, 16)
-#define IWM_ASSIGN strlcpy(__entry->ndev_name, iwm_to_ndev(iwm)->name, 16)
-#define IWM_PR_FMT "%s"
-#define IWM_PR_ARG __entry->ndev_name
-
-TRACE_EVENT(iwm_tx_nonwifi_cmd,
- TP_PROTO(struct iwm_priv *iwm, struct iwm_udma_out_nonwifi_hdr *hdr),
-
- TP_ARGS(iwm, hdr),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, opcode)
- __field(u8, resp)
- __field(u8, eot)
- __field(u8, hw)
- __field(u16, seq)
- __field(u32, addr)
- __field(u32, op1)
- __field(u32, op2)
- ),
-
- TP_fast_assign(
- IWM_ASSIGN;
- __entry->opcode = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE);
- __entry->resp = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP);
- __entry->eot = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT);
- __entry->hw = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW);
- __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM);
- __entry->addr = le32_to_cpu(hdr->addr);
- __entry->op1 = le32_to_cpu(hdr->op1_sz);
- __entry->op2 = le32_to_cpu(hdr->op2);
- ),
-
- TP_printk(
- IWM_PR_FMT " Tx TARGET CMD: opcode 0x%x, resp %d, eot %d, "
- "hw %d, seq 0x%x, addr 0x%x, op1 0x%x, op2 0x%x",
- IWM_PR_ARG, __entry->opcode, __entry->resp, __entry->eot,
- __entry->hw, __entry->seq, __entry->addr, __entry->op1,
- __entry->op2
- )
-);
-
-TRACE_EVENT(iwm_tx_wifi_cmd,
- TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_out_hdr *hdr),
-
- TP_ARGS(iwm, hdr),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, opcode)
- __field(u8, lmac)
- __field(u8, resp)
- __field(u8, eot)
- __field(u8, ra_tid)
- __field(u8, credit_group)
- __field(u8, color)
- __field(u16, seq)
- ),
-
- TP_fast_assign(
- IWM_ASSIGN;
- __entry->opcode = hdr->sw_hdr.cmd.cmd;
- __entry->lmac = 0;
- __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
- __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
- __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
- __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
- __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
- __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
- if (__entry->opcode == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH ||
- __entry->opcode == UMAC_CMD_OPCODE_WIFI_IF_WRAPPER) {
- __entry->lmac = 1;
- __entry->opcode = ((struct iwm_lmac_hdr *)(hdr + 1))->id;
- }
- ),
-
- TP_printk(
- IWM_PR_FMT " Tx %cMAC CMD: opcode 0x%x, resp %d, eot %d, "
- "seq 0x%x, sta_color 0x%x, ra_tid 0x%x, credit_group 0x%x",
- IWM_PR_ARG, __entry->lmac ? 'L' : 'U', __entry->opcode,
- __entry->resp, __entry->eot, __entry->seq, __entry->color,
- __entry->ra_tid, __entry->credit_group
- )
-);
-
-TRACE_EVENT(iwm_tx_packets,
- TP_PROTO(struct iwm_priv *iwm, u8 *buf, int len),
-
- TP_ARGS(iwm, buf, len),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, eot)
- __field(u8, ra_tid)
- __field(u8, credit_group)
- __field(u8, color)
- __field(u16, seq)
- __field(u8, npkt)
- __field(u32, bytes)
- ),
-
- TP_fast_assign(
- struct iwm_umac_wifi_out_hdr *hdr =
- (struct iwm_umac_wifi_out_hdr *)buf;
-
- IWM_ASSIGN;
- __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
- __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
- __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
- __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
- __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
- __entry->npkt = 1;
- __entry->bytes = len;
-
- if (!__entry->eot) {
- int count;
- u8 *ptr = buf;
-
- __entry->npkt = 0;
- while (ptr < buf + len) {
- count = GET_VAL32(hdr->sw_hdr.meta_data,
- UMAC_FW_CMD_BYTE_COUNT);
- ptr += ALIGN(sizeof(*hdr) + count, 16);
- hdr = (struct iwm_umac_wifi_out_hdr *)ptr;
- __entry->npkt++;
- }
- }
- ),
-
- TP_printk(
- IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
- "ra_tid 0x%x, credit_group 0x%x, embedded_packets %d, %d bytes",
- IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
- __entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
- __entry->credit_group, __entry->npkt, __entry->bytes
- )
-);
-
-TRACE_EVENT(iwm_rx_nonwifi_cmd,
- TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
-
- TP_ARGS(iwm, buf, len),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, opcode)
- __field(u16, seq)
- __field(u32, len)
- ),
-
- TP_fast_assign(
- struct iwm_udma_in_hdr *hdr = buf;
-
- IWM_ASSIGN;
- __entry->opcode = GET_VAL32(hdr->cmd, UDMA_HDI_IN_NW_CMD_OPCODE);
- __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
- __entry->len = len;
- ),
-
- TP_printk(
- IWM_PR_FMT " Rx TARGET RESP: opcode 0x%x, seq 0x%x, len 0x%x",
- IWM_PR_ARG, __entry->opcode, __entry->seq, __entry->len
- )
-);
-
-TRACE_EVENT(iwm_rx_wifi_cmd,
- TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_in_hdr *hdr),
-
- TP_ARGS(iwm, hdr),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, cmd)
- __field(u8, source)
- __field(u16, seq)
- __field(u32, count)
- ),
-
- TP_fast_assign(
- IWM_ASSIGN;
- __entry->cmd = hdr->sw_hdr.cmd.cmd;
- __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
- __entry->count = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
- __entry->seq = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
- ),
-
- TP_printk(
- IWM_PR_FMT " Rx %s RESP: cmd 0x%x, seq 0x%x, count 0x%x",
- IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ? "LMAC" :
- __entry->source == UMAC_HDI_IN_SOURCE_FW ? "UMAC" : "UDMA",
- __entry->cmd, __entry->seq, __entry->count
- )
-);
-
-#define iwm_ticket_action_symbol \
- { IWM_RX_TICKET_DROP, "DROP" }, \
- { IWM_RX_TICKET_RELEASE, "RELEASE" }, \
- { IWM_RX_TICKET_SNIFFER, "SNIFFER" }, \
- { IWM_RX_TICKET_ENQUEUE, "ENQUEUE" }
-
-TRACE_EVENT(iwm_rx_ticket,
- TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
-
- TP_ARGS(iwm, buf, len),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, action)
- __field(u8, reason)
- __field(u16, id)
- __field(u16, flags)
- ),
-
- TP_fast_assign(
- struct iwm_rx_ticket *ticket =
- ((struct iwm_umac_notif_rx_ticket *)buf)->tickets;
-
- IWM_ASSIGN;
- __entry->id = le16_to_cpu(ticket->id);
- __entry->action = le16_to_cpu(ticket->action);
- __entry->flags = le16_to_cpu(ticket->flags);
- __entry->reason = (__entry->flags & IWM_RX_TICKET_DROP_REASON_MSK) >> IWM_RX_TICKET_DROP_REASON_POS;
- ),
-
- TP_printk(
- IWM_PR_FMT " Rx ticket: id 0x%x, action %s, %s 0x%x%s",
- IWM_PR_ARG, __entry->id,
- __print_symbolic(__entry->action, iwm_ticket_action_symbol),
- __entry->reason ? "reason" : "flags",
- __entry->reason ? __entry->reason : __entry->flags,
- __entry->flags & IWM_RX_TICKET_AMSDU_MSK ? ", AMSDU frame" : ""
- )
-);
-
-TRACE_EVENT(iwm_rx_packet,
- TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
-
- TP_ARGS(iwm, buf, len),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, source)
- __field(u16, id)
- __field(u32, len)
- ),
-
- TP_fast_assign(
- struct iwm_umac_wifi_in_hdr *hdr = buf;
-
- IWM_ASSIGN;
- __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
- __entry->id = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
- __entry->len = len - sizeof(*hdr);
- ),
-
- TP_printk(
- IWM_PR_FMT " Rx %s packet: id 0x%x, %d bytes",
- IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ?
- "LMAC" : "UMAC", __entry->id, __entry->len
- )
-);
-#endif
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
deleted file mode 100644
index be98074..0000000
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-/*
- * iwm Tx theory of operation:
- *
- * 1) We receive a 802.3 frame from the stack
- * 2) We convert it to a 802.11 frame [iwm_xmit_frame]
- * 3) We queue it to its corresponding tx queue [iwm_xmit_frame]
- * 4) We schedule the tx worker. There is one worker per tx
- * queue. [iwm_xmit_frame]
- * 5) The tx worker is scheduled
- * 6) We go through every queued skb on the tx queue, and for each
- * and every one of them: [iwm_tx_worker]
- * a) We check if we have enough Tx credits (see below for a Tx
- * credits description) for the frame length. [iwm_tx_worker]
- * b) If we do, we aggregate the Tx frame into a UDMA one, by
- * concatenating one REPLY_TX command per Tx frame. [iwm_tx_worker]
- * c) When we run out of credits, or when we reach the maximum
- * concatenation size, we actually send the concatenated UDMA
- * frame. [iwm_tx_worker]
- *
- * When we run out of Tx credits, the skbs are filling the tx queue,
- * and eventually we will stop the netdev queue. [iwm_tx_worker]
- * The tx queue is emptied as we're getting new tx credits, by
- * scheduling the tx_worker. [iwm_tx_credit_inc]
- * The netdev queue is started again when we have enough tx credits,
- * and when our tx queue has some reasonable amout of space available
- * (i.e. half of the max size). [iwm_tx_worker]
- */
-
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/ieee80211.h>
-
-#include "iwm.h"
-#include "debug.h"
-#include "commands.h"
-#include "hal.h"
-#include "umac.h"
-#include "bus.h"
-
-#define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff
-
-#define BYTES_TO_PAGES(n) (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \
- (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0))
-
-#define pool_id_to_queue(id) ((id < IWM_TX_CMD_QUEUE) ? id : id - 1)
-#define queue_to_pool_id(q) ((q < IWM_TX_CMD_QUEUE) ? q : q + 1)
-
-/* require to hold tx_credit lock */
-static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id)
-{
- struct pool_entry *pool = &tx_credit->pools[id];
- struct spool_entry *spool = &tx_credit->spools[pool->sid];
- int spool_pages;
-
- /* number of pages can be taken from spool by this pool */
- spool_pages = spool->max_pages - spool->alloc_pages +
- max(pool->min_pages - pool->alloc_pages, 0);
-
- return min(pool->max_pages - pool->alloc_pages, spool_pages);
-}
-
-static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb)
-{
- u32 npages = BYTES_TO_PAGES(nb);
-
- if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
- return 1;
-
- set_bit(id, &iwm->tx_credit.full_pools_map);
-
- IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n",
- pool_id_to_queue(id),
- iwm_tx_credit_get(&iwm->tx_credit, id));
-
- return 0;
-}
-
-void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages)
-{
- struct pool_entry *pool;
- struct spool_entry *spool;
- int freed_pages;
- int queue;
-
- BUG_ON(id >= IWM_MACS_OUT_GROUPS);
-
- pool = &iwm->tx_credit.pools[id];
- spool = &iwm->tx_credit.spools[pool->sid];
-
- freed_pages = total_freed_pages - pool->total_freed_pages;
- IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id);
-
- if (!freed_pages) {
- IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n");
- return;
- } else if (freed_pages < 0)
- freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1;
-
- if (pool->alloc_pages > pool->min_pages) {
- int spool_pages = pool->alloc_pages - pool->min_pages;
- spool_pages = min(spool_pages, freed_pages);
- spool->alloc_pages -= spool_pages;
- }
-
- pool->alloc_pages -= freed_pages;
- pool->total_freed_pages = total_freed_pages;
-
- IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
- "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
- pool->total_freed_pages, pool->sid, spool->alloc_pages);
-
- if (test_bit(id, &iwm->tx_credit.full_pools_map) &&
- (pool->alloc_pages < pool->max_pages / 2)) {
- clear_bit(id, &iwm->tx_credit.full_pools_map);
-
- queue = pool_id_to_queue(id);
-
- IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available "
- "credit: %d\n", queue,
- iwm_tx_credit_get(&iwm->tx_credit, id));
- queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
- }
-}
-
-static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages)
-{
- struct pool_entry *pool;
- struct spool_entry *spool;
- int spool_pages;
-
- IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n",
- alloc_pages, id);
-
- BUG_ON(id >= IWM_MACS_OUT_GROUPS);
-
- pool = &iwm->tx_credit.pools[id];
- spool = &iwm->tx_credit.spools[pool->sid];
-
- spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages;
-
- if (pool->alloc_pages >= pool->min_pages)
- spool->alloc_pages += alloc_pages;
- else if (spool_pages > 0)
- spool->alloc_pages += spool_pages;
-
- pool->alloc_pages += alloc_pages;
-
- IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
- "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
- pool->total_freed_pages, pool->sid, spool->alloc_pages);
-}
-
-int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
-{
- u32 npages = BYTES_TO_PAGES(nb);
- int ret = 0;
-
- spin_lock(&iwm->tx_credit.lock);
-
- if (!iwm_tx_credit_ok(iwm, id, nb)) {
- IWM_DBG_TX(iwm, DBG, "No credit available for pool[%d]\n", id);
- ret = -ENOSPC;
- goto out;
- }
-
- iwm_tx_credit_dec(iwm, id, npages);
-
- out:
- spin_unlock(&iwm->tx_credit.lock);
- return ret;
-}
-
-/*
- * Since we're on an SDIO or USB bus, we are not sharing memory
- * for storing to be transmitted frames. The host needs to push
- * them upstream. As a consequence there needs to be a way for
- * the target to let us know if it can actually take more TX frames
- * or not. This is what Tx credits are for.
- *
- * For each Tx HW queue, we have a Tx pool, and then we have one
- * unique super pool (spool), which is actually a global pool of
- * all the UMAC pages.
- * For each Tx pool we have a min_pages, a max_pages fields, and a
- * alloc_pages fields. The alloc_pages tracks the number of pages
- * currently allocated from the tx pool.
- * Here are the rules to check if given a tx frame we have enough
- * tx credits for it:
- * 1) We translate the frame length into a number of UMAC pages.
- * Let's call them n_pages.
- * 2) For the corresponding tx pool, we check if n_pages +
- * pool->alloc_pages is higher than pool->min_pages. min_pages
- * represent a set of pre-allocated pages on the tx pool. If
- * that's the case, then we need to allocate those pages from
- * the spool. We can do so until we reach spool->max_pages.
- * 3) Each tx pool is not allowed to allocate more than pool->max_pages
- * from the spool, so once we're over min_pages, we can allocate
- * pages from the spool, but not more than max_pages.
- *
- * When the tx code path needs to send a tx frame, it checks first
- * if it has enough tx credits, following those rules. [iwm_tx_credit_get]
- * If it does, it then updates the pool and spool counters and
- * then send the frame. [iwm_tx_credit_alloc and iwm_tx_credit_dec]
- * On the other side, when the UMAC is done transmitting frames, it
- * will send a credit update notification to the host. This is when
- * the pool and spool counters gets to be decreased. [iwm_tx_credit_inc,
- * called from rx.c:iwm_ntf_tx_credit_update]
- *
- */
-void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
- struct iwm_umac_notif_alive *alive)
-{
- int i, sid, pool_pages;
-
- spin_lock(&iwm->tx_credit.lock);
-
- iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count);
- iwm->tx_credit.full_pools_map = 0;
- memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry));
-
- IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr);
-
- for (i = 0; i < iwm->tx_credit.pool_nr; i++) {
- __le32 page_grp_state = alive->page_grp_state[i];
-
- iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_GRP_NUM);
- iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_SGRP_NUM);
- iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE);
- iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE);
- iwm->tx_credit.pools[i].alloc_pages = 0;
- iwm->tx_credit.pools[i].total_freed_pages = 0;
-
- sid = iwm->tx_credit.pools[i].sid;
- pool_pages = iwm->tx_credit.pools[i].min_pages;
-
- if (iwm->tx_credit.spools[sid].max_pages == 0) {
- iwm->tx_credit.spools[sid].id = sid;
- iwm->tx_credit.spools[sid].max_pages =
- GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE);
- iwm->tx_credit.spools[sid].alloc_pages = 0;
- }
-
- iwm->tx_credit.spools[sid].alloc_pages += pool_pages;
-
- IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity "
- "min: %d, max: %d, pool alloc: %d, total_free: %d, "
- "super poll alloc: %d\n",
- i, iwm->tx_credit.pools[i].id,
- iwm->tx_credit.pools[i].sid,
- iwm->tx_credit.pools[i].min_pages,
- iwm->tx_credit.pools[i].max_pages,
- iwm->tx_credit.pools[i].alloc_pages,
- iwm->tx_credit.pools[i].total_freed_pages,
- iwm->tx_credit.spools[sid].alloc_pages);
- }
-
- spin_unlock(&iwm->tx_credit.lock);
-}
-
-#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
-
-static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
- int pool_id, u8 *buf)
-{
- struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
- struct iwm_udma_wifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
-
- udma_cmd.count = cpu_to_le16(skb->len +
- sizeof(struct iwm_umac_fw_cmd_hdr));
- /* set EOP to 0 here. iwm_udma_wifi_hdr_set_eop() will be
- * called later to set EOP for the last packet. */
- udma_cmd.eop = 0;
- udma_cmd.credit_group = pool_id;
- udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
- udma_cmd.lmac_offset = 0;
-
- umac_cmd.id = REPLY_TX;
- umac_cmd.count = cpu_to_le16(skb->len);
- umac_cmd.color = tx_info->color;
- umac_cmd.resp = 0;
- umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm));
-
- iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd);
- iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd);
-
- memcpy(buf + sizeof(*hdr), skb->data, skb->len);
-
- return umac_cmd.seq_num;
-}
-
-static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
- struct iwm_tx_queue *txq)
-{
- int ret;
-
- if (!txq->concat_count)
- return 0;
-
- IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n",
- txq->id, txq->concat_count);
-
- /* mark EOP for the last packet */
- iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
-
- trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
- ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
-
- txq->concat_count = 0;
- txq->concat_ptr = txq->concat_buf;
-
- return ret;
-}
-
-void iwm_tx_worker(struct work_struct *work)
-{
- struct iwm_priv *iwm;
- struct iwm_tx_info *tx_info = NULL;
- struct sk_buff *skb;
- struct iwm_tx_queue *txq;
- struct iwm_sta_info *sta_info;
- struct iwm_tid_info *tid_info;
- int cmdlen, ret, pool_id;
-
- txq = container_of(work, struct iwm_tx_queue, worker);
- iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
-
- pool_id = queue_to_pool_id(txq->id);
-
- while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
- !skb_queue_empty(&txq->queue)) {
-
- spin_lock_bh(&txq->lock);
- skb = skb_dequeue(&txq->queue);
- spin_unlock_bh(&txq->lock);
-
- tx_info = skb_to_tx_info(skb);
- sta_info = &iwm->sta_table[tx_info->sta];
- if (!sta_info->valid) {
- IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
- kfree_skb(skb);
- continue;
- }
-
- tid_info = &sta_info->tid_info[tx_info->tid];
-
- mutex_lock(&tid_info->mutex);
-
- /*
- * If the RAxTID is stopped, we queue the skb to the stopped
- * queue.
- * Whenever we'll get a UMAC notification to resume the tx flow
- * for this RAxTID, we'll merge back the stopped queue into the
- * regular queue. See iwm_ntf_stop_resume_tx() from rx.c.
- */
- if (tid_info->stopped) {
- IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
- tx_info->sta, tx_info->tid);
- spin_lock_bh(&txq->lock);
- skb_queue_tail(&txq->stopped_queue, skb);
- spin_unlock_bh(&txq->lock);
-
- mutex_unlock(&tid_info->mutex);
- continue;
- }
-
- cmdlen = IWM_UDMA_HDR_LEN + skb->len;
-
- IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
- "%d, color: %d\n", txq->id, skb, tx_info->sta,
- tx_info->color);
-
- if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
- iwm_tx_send_concat_packets(iwm, txq);
-
- ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
- if (ret) {
- IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
- "%d, Tx worker stopped\n", txq->id);
- spin_lock_bh(&txq->lock);
- skb_queue_head(&txq->queue, skb);
- spin_unlock_bh(&txq->lock);
-
- mutex_unlock(&tid_info->mutex);
- break;
- }
-
- txq->concat_ptr = txq->concat_buf + txq->concat_count;
- tid_info->last_seq_num =
- iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
- txq->concat_count += ALIGN(cmdlen, 16);
-
- mutex_unlock(&tid_info->mutex);
-
- kfree_skb(skb);
- }
-
- iwm_tx_send_concat_packets(iwm, txq);
-
- if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
- !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
- (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
- IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
- netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
- }
-}
-
-int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- struct iwm_priv *iwm = ndev_to_iwm(netdev);
- struct wireless_dev *wdev = iwm_to_wdev(iwm);
- struct iwm_tx_info *tx_info;
- struct iwm_tx_queue *txq;
- struct iwm_sta_info *sta_info;
- u8 *dst_addr, sta_id;
- u16 queue;
- int ret;
-
-
- if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
- IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
- "not associated\n");
- netif_tx_stop_all_queues(netdev);
- goto drop;
- }
-
- queue = skb_get_queue_mapping(skb);
- BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */
-
- txq = &iwm->txq[queue];
-
- /* No free space for Tx, tx_worker is too slow */
- if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) ||
- (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) {
- IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
- netif_stop_subqueue(netdev, queue);
- return NETDEV_TX_BUSY;
- }
-
- ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
- iwm->bssid, 0);
- if (ret) {
- IWM_ERR(iwm, "build wifi header failed\n");
- goto drop;
- }
-
- dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;
-
- for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
- sta_info = &iwm->sta_table[sta_id];
- if (sta_info->valid &&
- !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
- break;
- }
-
- if (sta_id == IWM_STA_TABLE_NUM) {
- IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
- dst_addr);
- goto drop;
- }
-
- tx_info = skb_to_tx_info(skb);
- tx_info->sta = sta_id;
- tx_info->color = sta_info->color;
- /* UMAC uses TID 8 (vs. 0) for non QoS packets */
- if (sta_info->qos)
- tx_info->tid = skb->priority;
- else
- tx_info->tid = IWM_UMAC_MGMT_TID;
-
- spin_lock_bh(&iwm->txq[queue].lock);
- skb_queue_tail(&iwm->txq[queue].queue, skb);
- spin_unlock_bh(&iwm->txq[queue].lock);
-
- queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
-
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
- return NETDEV_TX_OK;
-
- drop:
- netdev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
deleted file mode 100644
index 4a137d3..0000000
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_UMAC_H__
-#define __IWM_UMAC_H__
-
-struct iwm_udma_in_hdr {
- __le32 cmd;
- __le32 size;
-} __packed;
-
-struct iwm_udma_out_nonwifi_hdr {
- __le32 cmd;
- __le32 addr;
- __le32 op1_sz;
- __le32 op2;
-} __packed;
-
-struct iwm_udma_out_wifi_hdr {
- __le32 cmd;
- __le32 meta_data;
-} __packed;
-
-/* Sequence numbering */
-#define UMAC_WIFI_SEQ_NUM_BASE 1
-#define UMAC_WIFI_SEQ_NUM_MAX 0x4000
-#define UMAC_NONWIFI_SEQ_NUM_BASE 1
-#define UMAC_NONWIFI_SEQ_NUM_MAX 0x10
-
-/* MAC address address */
-#define WICO_MAC_ADDRESS_ADDR 0x604008F8
-
-/* RA / TID */
-#define UMAC_HDI_ACT_TBL_IDX_TID_POS 0
-#define UMAC_HDI_ACT_TBL_IDX_TID_SEED 0xF
-
-#define UMAC_HDI_ACT_TBL_IDX_RA_POS 4
-#define UMAC_HDI_ACT_TBL_IDX_RA_SEED 0xF
-
-#define UMAC_HDI_ACT_TBL_IDX_RA_UMAC 0xF
-#define UMAC_HDI_ACT_TBL_IDX_TID_UMAC 0x9
-#define UMAC_HDI_ACT_TBL_IDX_TID_LMAC 0xA
-
-#define UMAC_HDI_ACT_TBL_IDX_HOST_CMD \
- ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
- (UMAC_HDI_ACT_TBL_IDX_TID_UMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
-#define UMAC_HDI_ACT_TBL_IDX_UMAC_CMD \
- ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
- (UMAC_HDI_ACT_TBL_IDX_TID_LMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
-
-/* STA ID and color */
-#define STA_ID_SEED (0x0f)
-#define STA_ID_POS (0)
-#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
-
-#define STA_COLOR_SEED (0x7)
-#define STA_COLOR_POS (4)
-#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
-
-#define STA_ID_N_COLOR_COLOR(id_n_color) \
- (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
-#define STA_ID_N_COLOR_ID(id_n_color) \
- (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
-
-/* iwm_umac_notif_alive.page_grp_state Group number -- bits [3:0] */
-#define UMAC_ALIVE_PAGE_STS_GRP_NUM_POS 0
-#define UMAC_ALIVE_PAGE_STS_GRP_NUM_SEED 0xF
-
-/* iwm_umac_notif_alive.page_grp_state Super group number -- bits [7:4] */
-#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_POS 4
-#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_SEED 0xF
-
-/* iwm_umac_notif_alive.page_grp_state Group min size -- bits [15:8] */
-#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_POS 8
-#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_SEED 0xFF
-
-/* iwm_umac_notif_alive.page_grp_state Group max size -- bits [23:16] */
-#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_POS 16
-#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_SEED 0xFF
-
-/* iwm_umac_notif_alive.page_grp_state Super group max size -- bits [31:24] */
-#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_POS 24
-#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_SEED 0xFF
-
-/* Barkers */
-#define UMAC_REBOOT_BARKER 0xdeadbeef
-#define UMAC_ACK_BARKER 0xfeedbabe
-#define UMAC_PAD_TERMINAL 0xadadadad
-
-/* UMAC JMP address */
-#define UMAC_MU_FW_INST_DATA_12_ADDR 0xBF0000
-
-/* iwm_umac_hdi_out_hdr.cmd OP code -- bits [3:0] */
-#define UMAC_HDI_OUT_CMD_OPCODE_POS 0
-#define UMAC_HDI_OUT_CMD_OPCODE_SEED 0xF
-
-/* iwm_umac_hdi_out_hdr.cmd End-Of-Transfer -- bits [10:10] */
-#define UMAC_HDI_OUT_CMD_EOT_POS 10
-#define UMAC_HDI_OUT_CMD_EOT_SEED 0x1
-
-/* iwm_umac_hdi_out_hdr.cmd UTFD only usage -- bits [11:11] */
-#define UMAC_HDI_OUT_CMD_UTFD_ONLY_POS 11
-#define UMAC_HDI_OUT_CMD_UTFD_ONLY_SEED 0x1
-
-/* iwm_umac_hdi_out_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */
-#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_POS 12
-#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF
-
-/* iwm_umac_hdi_out_hdr.cmd Signature -- bits [31:16] */
-#define UMAC_HDI_OUT_CMD_SIGNATURE_POS 16
-#define UMAC_HDI_OUT_CMD_SIGNATURE_SEED 0xFFFF
-
-/* iwm_umac_hdi_out_hdr.meta_data Byte count -- bits [11:0] */
-#define UMAC_HDI_OUT_BYTE_COUNT_POS 0
-#define UMAC_HDI_OUT_BYTE_COUNT_SEED 0xFFF
-
-/* iwm_umac_hdi_out_hdr.meta_data Credit group -- bits [15:12] */
-#define UMAC_HDI_OUT_CREDIT_GRP_POS 12
-#define UMAC_HDI_OUT_CREDIT_GRP_SEED 0xF
-
-/* iwm_umac_hdi_out_hdr.meta_data RA/TID -- bits [23:16] */
-#define UMAC_HDI_OUT_RATID_POS 16
-#define UMAC_HDI_OUT_RATID_SEED 0xFF
-
-/* iwm_umac_hdi_out_hdr.meta_data LMAC offset -- bits [31:24] */
-#define UMAC_HDI_OUT_LMAC_OFFSET_POS 24
-#define UMAC_HDI_OUT_LMAC_OFFSET_SEED 0xFF
-
-/* Signature */
-#define UMAC_HDI_OUT_SIGNATURE 0xCBBC
-
-/* buffer alignment */
-#define UMAC_HDI_BUF_ALIGN_MSK 0xF
-
-/* iwm_umac_hdi_in_hdr.cmd OP code -- bits [3:0] */
-#define UMAC_HDI_IN_CMD_OPCODE_POS 0
-#define UMAC_HDI_IN_CMD_OPCODE_SEED 0xF
-
-/* iwm_umac_hdi_in_hdr.cmd Non-WiFi API response -- bits [6:4] */
-#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_POS 4
-#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_SEED 0x7
-
-/* iwm_umac_hdi_in_hdr.cmd WiFi API source -- bits [5:4] */
-#define UMAC_HDI_IN_CMD_SOURCE_POS 4
-#define UMAC_HDI_IN_CMD_SOURCE_SEED 0x3
-
-/* iwm_umac_hdi_in_hdr.cmd WiFi API EOT -- bits [6:6] */
-#define UMAC_HDI_IN_CMD_EOT_POS 6
-#define UMAC_HDI_IN_CMD_EOT_SEED 0x1
-
-/* iwm_umac_hdi_in_hdr.cmd timestamp present -- bits [7:7] */
-#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_POS 7
-#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_SEED 0x1
-
-/* iwm_umac_hdi_in_hdr.cmd WiFi Non-last AMSDU -- bits [8:8] */
-#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_POS 8
-#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_SEED 0x1
-
-/* iwm_umac_hdi_in_hdr.cmd WiFi HW sequence number -- bits [31:9] */
-#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_POS 9
-#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_SEED 0x7FFFFF
-
-/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_POS 12
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF
-
-/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW signature -- bits [16:31] */
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_POS 16
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_SEED 0xFFFF
-
-/* Fixed Non-WiFi signature */
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG 0xCBBC
-
-/* IN NTFY op-codes */
-#define UMAC_NOTIFY_OPCODE_ALIVE 0xA1
-#define UMAC_NOTIFY_OPCODE_INIT_COMPLETE 0xA2
-#define UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS 0xA3
-#define UMAC_NOTIFY_OPCODE_ERROR 0xA4
-#define UMAC_NOTIFY_OPCODE_DEBUG 0xA5
-#define UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER 0xB0
-#define UMAC_NOTIFY_OPCODE_STATS 0xB1
-#define UMAC_NOTIFY_OPCODE_PAGE_DEALLOC 0xB3
-#define UMAC_NOTIFY_OPCODE_RX_TICKET 0xB4
-#define UMAC_NOTIFY_OPCODE_MAX (UMAC_NOTIFY_OPCODE_RX_TICKET -\
- UMAC_NOTIFY_OPCODE_ALIVE + 1)
-#define UMAC_NOTIFY_OPCODE_FIRST (UMAC_NOTIFY_OPCODE_ALIVE)
-
-/* HDI OUT OP CODE */
-#define UMAC_HDI_OUT_OPCODE_PING 0x0
-#define UMAC_HDI_OUT_OPCODE_READ 0x1
-#define UMAC_HDI_OUT_OPCODE_WRITE 0x2
-#define UMAC_HDI_OUT_OPCODE_JUMP 0x3
-#define UMAC_HDI_OUT_OPCODE_REBOOT 0x4
-#define UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT 0x5
-#define UMAC_HDI_OUT_OPCODE_READ_PERSISTENT 0x6
-#define UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE 0x7
-/* #define UMAC_HDI_OUT_OPCODE_RESERVED 0x8..0xA */
-#define UMAC_HDI_OUT_OPCODE_WRITE_AUX_REG 0xB
-#define UMAC_HDI_OUT_OPCODE_WIFI 0xF
-
-/* HDI IN OP CODE -- Non WiFi*/
-#define UMAC_HDI_IN_OPCODE_PING 0x0
-#define UMAC_HDI_IN_OPCODE_READ 0x1
-#define UMAC_HDI_IN_OPCODE_WRITE 0x2
-#define UMAC_HDI_IN_OPCODE_WRITE_PERSISTENT 0x5
-#define UMAC_HDI_IN_OPCODE_READ_PERSISTENT 0x6
-#define UMAC_HDI_IN_OPCODE_READ_MODIFY_WRITE 0x7
-#define UMAC_HDI_IN_OPCODE_EP_MGMT 0x8
-#define UMAC_HDI_IN_OPCODE_CREDIT_CHANGE 0x9
-#define UMAC_HDI_IN_OPCODE_CTRL_DATABASE 0xA
-#define UMAC_HDI_IN_OPCODE_WRITE_AUX_REG 0xB
-#define UMAC_HDI_IN_OPCODE_NONWIFI_MAX \
- (UMAC_HDI_IN_OPCODE_WRITE_AUX_REG + 1)
-#define UMAC_HDI_IN_OPCODE_WIFI 0xF
-
-/* HDI IN SOURCE */
-#define UMAC_HDI_IN_SOURCE_FHRX 0x0
-#define UMAC_HDI_IN_SOURCE_UDMA 0x1
-#define UMAC_HDI_IN_SOURCE_FW 0x2
-#define UMAC_HDI_IN_SOURCE_RESERVED 0x3
-
-/* OUT CMD op-codes */
-#define UMAC_CMD_OPCODE_ECHO 0x01
-#define UMAC_CMD_OPCODE_HALT 0x02
-#define UMAC_CMD_OPCODE_RESET 0x03
-#define UMAC_CMD_OPCODE_BULK_EP_INACT_TIMEOUT 0x09
-#define UMAC_CMD_OPCODE_URB_CANCEL_ACK 0x0A
-#define UMAC_CMD_OPCODE_DCACHE_FLUSH 0x0B
-#define UMAC_CMD_OPCODE_EEPROM_PROXY 0x0C
-#define UMAC_CMD_OPCODE_TX_ECHO 0x0D
-#define UMAC_CMD_OPCODE_DBG_MON 0x0E
-#define UMAC_CMD_OPCODE_INTERNAL_TX 0x0F
-#define UMAC_CMD_OPCODE_SET_PARAM_FIX 0x10
-#define UMAC_CMD_OPCODE_SET_PARAM_VAR 0x11
-#define UMAC_CMD_OPCODE_GET_PARAM 0x12
-#define UMAC_CMD_OPCODE_DBG_EVENT_WRAPPER 0x13
-#define UMAC_CMD_OPCODE_TARGET 0x14
-#define UMAC_CMD_OPCODE_STATISTIC_REQUEST 0x15
-#define UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST 0x16
-#define UMAC_CMD_OPCODE_SET_PARAM_LIST 0x17
-#define UMAC_CMD_OPCODE_GET_PARAM_LIST 0x18
-#define UMAC_CMD_OPCODE_STOP_RESUME_STA_TX 0x19
-#define UMAC_CMD_OPCODE_TEST_BLOCK_ACK 0x1A
-
-#define UMAC_CMD_OPCODE_BASE_WRAPPER 0xFA
-#define UMAC_CMD_OPCODE_LMAC_WRAPPER 0xFB
-#define UMAC_CMD_OPCODE_HW_TEST_WRAPPER 0xFC
-#define UMAC_CMD_OPCODE_WIFI_IF_WRAPPER 0xFD
-#define UMAC_CMD_OPCODE_WIFI_WRAPPER 0xFE
-#define UMAC_CMD_OPCODE_WIFI_PASS_THROUGH 0xFF
-
-/* UMAC WiFi interface op-codes */
-#define UMAC_WIFI_IF_CMD_SET_PROFILE 0x11
-#define UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE 0x12
-#define UMAC_WIFI_IF_CMD_SET_EXCLUDE_LIST 0x13
-#define UMAC_WIFI_IF_CMD_SCAN_REQUEST 0x14
-#define UMAC_WIFI_IF_CMD_SCAN_CONFIG 0x15
-#define UMAC_WIFI_IF_CMD_ADD_WEP40_KEY 0x16
-#define UMAC_WIFI_IF_CMD_ADD_WEP104_KEY 0x17
-#define UMAC_WIFI_IF_CMD_ADD_TKIP_KEY 0x18
-#define UMAC_WIFI_IF_CMD_ADD_CCMP_KEY 0x19
-#define UMAC_WIFI_IF_CMD_REMOVE_KEY 0x1A
-#define UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID 0x1B
-#define UMAC_WIFI_IF_CMD_SET_HOST_EXTENDED_IE 0x1C
-#define UMAC_WIFI_IF_CMD_GET_SUPPORTED_CHANNELS 0x1E
-#define UMAC_WIFI_IF_CMD_PMKID_UPDATE 0x1F
-#define UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER 0x20
-
-/* UMAC WiFi interface ports */
-#define UMAC_WIFI_IF_FLG_PORT_DEF 0x00
-#define UMAC_WIFI_IF_FLG_PORT_PAN 0x01
-#define UMAC_WIFI_IF_FLG_PORT_PAN_INVALID WIFI_IF_FLG_PORT_DEF
-
-/* UMAC WiFi interface actions */
-#define UMAC_WIFI_IF_FLG_ACT_GET 0x10
-#define UMAC_WIFI_IF_FLG_ACT_SET 0x20
-
-/* iwm_umac_fw_cmd_hdr.meta_data byte count -- bits [11:0] */
-#define UMAC_FW_CMD_BYTE_COUNT_POS 0
-#define UMAC_FW_CMD_BYTE_COUNT_SEED 0xFFF
-
-/* iwm_umac_fw_cmd_hdr.meta_data status -- bits [15:12] */
-#define UMAC_FW_CMD_STATUS_POS 12
-#define UMAC_FW_CMD_STATUS_SEED 0xF
-
-/* iwm_umac_fw_cmd_hdr.meta_data full TX command by Driver -- bits [16:16] */
-#define UMAC_FW_CMD_TX_DRV_FULL_CMD_POS 16
-#define UMAC_FW_CMD_TX_DRV_FULL_CMD_SEED 0x1
-
-/* iwm_umac_fw_cmd_hdr.meta_data TX command by FW -- bits [17:17] */
-#define UMAC_FW_CMD_TX_FW_CMD_POS 17
-#define UMAC_FW_CMD_TX_FW_CMD_SEED 0x1
-
-/* iwm_umac_fw_cmd_hdr.meta_data TX plaintext mode -- bits [18:18] */
-#define UMAC_FW_CMD_TX_PLAINTEXT_POS 18
-#define UMAC_FW_CMD_TX_PLAINTEXT_SEED 0x1
-
-/* iwm_umac_fw_cmd_hdr.meta_data STA color -- bits [22:20] */
-#define UMAC_FW_CMD_TX_STA_COLOR_POS 20
-#define UMAC_FW_CMD_TX_STA_COLOR_SEED 0x7
-
-/* iwm_umac_fw_cmd_hdr.meta_data TX life time (TU) -- bits [31:24] */
-#define UMAC_FW_CMD_TX_LIFETIME_TU_POS 24
-#define UMAC_FW_CMD_TX_LIFETIME_TU_SEED 0xFF
-
-/* iwm_dev_cmd_hdr.flags Response required -- bits [5:5] */
-#define UMAC_DEV_CMD_FLAGS_RESP_REQ_POS 5
-#define UMAC_DEV_CMD_FLAGS_RESP_REQ_SEED 0x1
-
-/* iwm_dev_cmd_hdr.flags Aborted command -- bits [6:6] */
-#define UMAC_DEV_CMD_FLAGS_ABORT_POS 6
-#define UMAC_DEV_CMD_FLAGS_ABORT_SEED 0x1
-
-/* iwm_dev_cmd_hdr.flags Internal command -- bits [7:7] */
-#define DEV_CMD_FLAGS_FLD_INTERNAL_POS 7
-#define DEV_CMD_FLAGS_FLD_INTERNAL_SEED 0x1
-
-/* Rx */
-/* Rx actions */
-#define IWM_RX_TICKET_DROP 0x0
-#define IWM_RX_TICKET_RELEASE 0x1
-#define IWM_RX_TICKET_SNIFFER 0x2
-#define IWM_RX_TICKET_ENQUEUE 0x3
-
-/* Rx flags */
-#define IWM_RX_TICKET_PAD_SIZE_MSK 0x2
-#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
-#define IWM_RX_TICKET_AMSDU_MSK 0x8
-#define IWM_RX_TICKET_DROP_REASON_POS 4
-#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << IWM_RX_TICKET_DROP_REASON_POS)
-
-#define IWM_RX_DROP_NO_DROP 0x0
-#define IWM_RX_DROP_BAD_CRC 0x1
-/* L2P no address match */
-#define IWM_RX_DROP_LMAC_ADDR_FILTER 0x2
-/* Multicast address not in list */
-#define IWM_RX_DROP_MCAST_ADDR_FILTER 0x3
-/* Control frames are not sent to the driver */
-#define IWM_RX_DROP_CTL_FRAME 0x4
-/* Our frame is back */
-#define IWM_RX_DROP_OUR_TX 0x5
-/* Association class filtering */
-#define IWM_RX_DROP_CLASS_FILTER 0x6
-/* Duplicated frame */
-#define IWM_RX_DROP_DUPLICATE_FILTER 0x7
-/* Decryption error */
-#define IWM_RX_DROP_SEC_ERR 0x8
-/* Unencrypted frame while encryption is on */
-#define IWM_RX_DROP_SEC_NO_ENCRYPTION 0x9
-/* Replay check failure */
-#define IWM_RX_DROP_SEC_REPLAY_ERR 0xa
-/* uCode and FW key color mismatch, check before replay */
-#define IWM_RX_DROP_SEC_KEY_COLOR_MISMATCH 0xb
-#define IWM_RX_DROP_SEC_TKIP_COUNTER_MEASURE 0xc
-/* No fragmentations Db is found */
-#define IWM_RX_DROP_FRAG_NO_RESOURCE 0xd
-/* Fragmention Db has seqCtl mismatch Vs. non-1st frag */
-#define IWM_RX_DROP_FRAG_ERR 0xe
-#define IWM_RX_DROP_FRAG_LOST 0xf
-#define IWM_RX_DROP_FRAG_COMPLETE 0x10
-/* Should be handled by UMAC */
-#define IWM_RX_DROP_MANAGEMENT 0x11
-/* STA not found by UMAC */
-#define IWM_RX_DROP_NO_STATION 0x12
-/* NULL or QoS NULL */
-#define IWM_RX_DROP_NULL_DATA 0x13
-#define IWM_RX_DROP_BA_REORDER_OLD_SEQCTL 0x14
-#define IWM_RX_DROP_BA_REORDER_DUPLICATE 0x15
-
-struct iwm_rx_ticket {
- __le16 action;
- __le16 id;
- __le16 flags;
- u8 payload_offset; /* includes: MAC header, pad, IV */
- u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */
-} __packed;
-
-struct iwm_rx_mpdu_hdr {
- __le16 len;
- __le16 reserved;
-} __packed;
-
-/* UMAC SW WIFI API */
-
-struct iwm_dev_cmd_hdr {
- u8 cmd;
- u8 flags;
- __le16 seq_num;
-} __packed;
-
-struct iwm_umac_fw_cmd_hdr {
- __le32 meta_data;
- struct iwm_dev_cmd_hdr cmd;
-} __packed;
-
-struct iwm_umac_wifi_out_hdr {
- struct iwm_udma_out_wifi_hdr hw_hdr;
- struct iwm_umac_fw_cmd_hdr sw_hdr;
-} __packed;
-
-struct iwm_umac_nonwifi_out_hdr {
- struct iwm_udma_out_nonwifi_hdr hw_hdr;
-} __packed;
-
-struct iwm_umac_wifi_in_hdr {
- struct iwm_udma_in_hdr hw_hdr;
- struct iwm_umac_fw_cmd_hdr sw_hdr;
-} __packed;
-
-struct iwm_umac_nonwifi_in_hdr {
- struct iwm_udma_in_hdr hw_hdr;
- __le32 time_stamp;
-} __packed;
-
-#define IWM_UMAC_PAGE_SIZE 0x200
-
-/* Notify structures */
-struct iwm_fw_version {
- u8 minor;
- u8 major;
- __le16 id;
-};
-
-struct iwm_fw_build {
- u8 type;
- u8 subtype;
- u8 platform;
- u8 opt;
-};
-
-struct iwm_fw_alive_hdr {
- struct iwm_fw_version ver;
- struct iwm_fw_build build;
- __le32 os_build;
- __le32 log_hdr_addr;
- __le32 log_buf_addr;
- __le32 sys_timer_addr;
-};
-
-#define WAIT_NOTIF_TIMEOUT (2 * HZ)
-#define SCAN_COMPLETE_TIMEOUT (3 * HZ)
-
-#define UMAC_NTFY_ALIVE_STATUS_ERR 0xDEAD
-#define UMAC_NTFY_ALIVE_STATUS_OK 0xCAFE
-
-#define UMAC_NTFY_INIT_COMPLETE_STATUS_ERR 0xDEAD
-#define UMAC_NTFY_INIT_COMPLETE_STATUS_OK 0xCAFE
-
-#define UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN 0x40
-#define UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN 0x80
-
-#define IWM_MACS_OUT_GROUPS 6
-#define IWM_MACS_OUT_SGROUPS 1
-
-
-#define WIFI_IF_NTFY_ASSOC_START 0x80
-#define WIFI_IF_NTFY_ASSOC_COMPLETE 0x81
-#define WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE 0x82
-#define WIFI_IF_NTFY_CONNECTION_TERMINATED 0x83
-#define WIFI_IF_NTFY_SCAN_COMPLETE 0x84
-#define WIFI_IF_NTFY_STA_TABLE_CHANGE 0x85
-#define WIFI_IF_NTFY_EXTENDED_IE_REQUIRED 0x86
-#define WIFI_IF_NTFY_RADIO_PREEMPTION 0x87
-#define WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED 0x88
-#define WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED 0x89
-#define WIFI_IF_NTFY_LINK_QUALITY_STATISTICS 0x8A
-#define WIFI_IF_NTFY_MGMT_FRAME 0x8B
-
-/* DEBUG INDICATIONS */
-#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START 0xE0
-#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE 0xE1
-#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START 0xE2
-#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT 0xE3
-#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START 0xE4
-#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE 0xE5
-#define WIFI_DBG_IF_NTFY_CNCT_ATC_START 0xE6
-#define WIFI_DBG_IF_NTFY_COEX_NOTIFICATION 0xE7
-#define WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP 0xE8
-#define WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP 0xE9
-
-#define WIFI_IF_NTFY_MAX 0xff
-
-/* Notification structures */
-struct iwm_umac_notif_wifi_if {
- struct iwm_umac_wifi_in_hdr hdr;
- u8 status;
- u8 flags;
- __le16 buf_size;
-} __packed;
-
-#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1
-#define UMAC_ROAM_REASON_AP_DEAUTH 0x2
-#define UMAC_ROAM_REASON_AP_CONNECT_LOST 0x3
-#define UMAC_ROAM_REASON_RSSI 0x4
-#define UMAC_ROAM_REASON_AP_ASSISTED_ROAM 0x5
-#define UMAC_ROAM_REASON_IBSS_COALESCING 0x6
-
-struct iwm_umac_notif_assoc_start {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 roam_reason;
- u8 bssid[ETH_ALEN];
- u8 reserved[2];
-} __packed;
-
-#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0
-#define UMAC_ASSOC_COMPLETE_FAILURE 0x1
-
-struct iwm_umac_notif_assoc_complete {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 status;
- u8 bssid[ETH_ALEN];
- u8 band;
- u8 channel;
-} __packed;
-
-#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0
-#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1
-#define UMAC_PROFILE_INVALID_REQUEST 0x2
-#define UMAC_PROFILE_INVALID_RF_PREEMPTED 0x3
-
-struct iwm_umac_notif_profile_invalidate {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 reason;
-} __packed;
-
-#define UMAC_SCAN_RESULT_SUCCESS 0x0
-#define UMAC_SCAN_RESULT_ABORTED 0x1
-#define UMAC_SCAN_RESULT_REJECTED 0x2
-#define UMAC_SCAN_RESULT_FAILED 0x3
-
-struct iwm_umac_notif_scan_complete {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 type;
- __le32 result;
- u8 seq_num;
-} __packed;
-
-#define UMAC_OPCODE_ADD_MODIFY 0x0
-#define UMAC_OPCODE_REMOVE 0x1
-#define UMAC_OPCODE_CLEAR_ALL 0x2
-
-#define UMAC_STA_FLAG_QOS 0x1
-
-struct iwm_umac_notif_sta_info {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 opcode;
- u8 mac_addr[ETH_ALEN];
- u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */
- u8 flags;
-} __packed;
-
-#define UMAC_BAND_2GHZ 0
-#define UMAC_BAND_5GHZ 1
-
-#define UMAC_CHANNEL_WIDTH_20MHZ 0
-#define UMAC_CHANNEL_WIDTH_40MHZ 1
-
-struct iwm_umac_notif_bss_info {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 type;
- __le32 timestamp;
- __le16 table_idx;
- __le16 frame_len;
- u8 band;
- u8 channel;
- s8 rssi;
- u8 reserved;
- u8 frame_buf[1];
-} __packed;
-
-#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff
-#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00
-
-#define IWM_BSS_REMOVE_FLG_AGE 0x1000
-#define IWM_BSS_REMOVE_FLG_TIMEOUT 0x2000
-#define IWM_BSS_REMOVE_FLG_TABLE_FULL 0x4000
-
-struct iwm_umac_notif_bss_removed {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 count;
- __le16 entries[0];
-} __packed;
-
-struct iwm_umac_notif_mgt_frame {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le16 len;
- u8 frame[1];
-} __packed;
-
-struct iwm_umac_notif_alive {
- struct iwm_umac_wifi_in_hdr hdr;
- __le16 status;
- __le16 reserved1;
- struct iwm_fw_alive_hdr alive_data;
- __le16 reserved2;
- __le16 page_grp_count;
- __le32 page_grp_state[IWM_MACS_OUT_GROUPS];
-} __packed;
-
-struct iwm_umac_notif_init_complete {
- struct iwm_umac_wifi_in_hdr hdr;
- __le16 status;
- __le16 reserved;
-} __packed;
-
-/* error categories */
-enum {
- UMAC_SYS_ERR_CAT_NONE = 0,
- UMAC_SYS_ERR_CAT_BOOT,
- UMAC_SYS_ERR_CAT_UMAC,
- UMAC_SYS_ERR_CAT_UAXM,
- UMAC_SYS_ERR_CAT_LMAC,
- UMAC_SYS_ERR_CAT_MAX
-};
-
-struct iwm_fw_error_hdr {
- __le32 category;
- __le32 status;
- __le32 pc;
- __le32 blink1;
- __le32 blink2;
- __le32 ilink1;
- __le32 ilink2;
- __le32 data1;
- __le32 data2;
- __le32 line_num;
- __le32 umac_status;
- __le32 lmac_status;
- __le32 sdio_status;
- __le32 dbm_sample_ctrl;
- __le32 dbm_buf_base;
- __le32 dbm_buf_end;
- __le32 dbm_buf_write_ptr;
- __le32 dbm_buf_cycle_cnt;
-} __packed;
-
-struct iwm_umac_notif_error {
- struct iwm_umac_wifi_in_hdr hdr;
- struct iwm_fw_error_hdr err;
-} __packed;
-
-#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0
-#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff
-#define UMAC_DEALLOC_NTFY_CHANGES_MSK_POS 8
-#define UMAC_DEALLOC_NTFY_CHANGES_MSK_SEED 0xffffff
-#define UMAC_DEALLOC_NTFY_PAGE_CNT_POS 0
-#define UMAC_DEALLOC_NTFY_PAGE_CNT_SEED 0xffffff
-#define UMAC_DEALLOC_NTFY_GROUP_NUM_POS 24
-#define UMAC_DEALLOC_NTFY_GROUP_NUM_SEED 0xf
-
-struct iwm_umac_notif_page_dealloc {
- struct iwm_umac_wifi_in_hdr hdr;
- __le32 changes;
- __le32 grp_info[IWM_MACS_OUT_GROUPS];
-} __packed;
-
-struct iwm_umac_notif_wifi_status {
- struct iwm_umac_wifi_in_hdr hdr;
- __le16 status;
- __le16 reserved;
-} __packed;
-
-struct iwm_umac_notif_rx_ticket {
- struct iwm_umac_wifi_in_hdr hdr;
- u8 num_tickets;
- u8 reserved[3];
- struct iwm_rx_ticket tickets[1];
-} __packed;
-
-/* Tx/Rx rates window (number of max of last update window per second) */
-#define UMAC_NTF_RATE_SAMPLE_NR 4
-
-/* Max numbers of bits required to go through all antennae in bitmasks */
-#define UMAC_PHY_NUM_CHAINS 3
-
-#define IWM_UMAC_MGMT_TID 8
-#define IWM_UMAC_TID_NR 9 /* 8 TIDs + MGMT */
-
-struct iwm_umac_notif_stats {
- struct iwm_umac_wifi_in_hdr hdr;
- __le32 flags;
- __le32 timestamp;
- __le16 tid_load[IWM_UMAC_TID_NR + 1]; /* 1 non-QoS + 1 dword align */
- __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
- __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
- __le32 chain_energy[UMAC_PHY_NUM_CHAINS];
- s32 rssi_dbm;
- s32 noise_dbm;
- __le32 supp_rates;
- __le32 supp_ht_rates;
- __le32 missed_beacons;
- __le32 rx_beacons;
- __le32 rx_dir_pkts;
- __le32 rx_nondir_pkts;
- __le32 rx_multicast;
- __le32 rx_errors;
- __le32 rx_drop_other_bssid;
- __le32 rx_drop_decode;
- __le32 rx_drop_reassembly;
- __le32 rx_drop_bad_len;
- __le32 rx_drop_overflow;
- __le32 rx_drop_crc;
- __le32 rx_drop_missed;
- __le32 tx_dir_pkts;
- __le32 tx_nondir_pkts;
- __le32 tx_failure;
- __le32 tx_errors;
- __le32 tx_drop_max_retry;
- __le32 tx_err_abort;
- __le32 tx_err_carrier;
- __le32 rx_bytes;
- __le32 tx_bytes;
- __le32 tx_power;
- __le32 tx_max_power;
- __le32 roam_threshold;
- __le32 ap_assoc_nr;
- __le32 scan_full;
- __le32 scan_abort;
- __le32 ap_nr;
- __le32 roam_nr;
- __le32 roam_missed_beacons;
- __le32 roam_rssi;
- __le32 roam_unassoc;
- __le32 roam_deauth;
- __le32 roam_ap_loadblance;
-} __packed;
-
-#define UMAC_STOP_TX_FLAG 0x1
-#define UMAC_RESUME_TX_FLAG 0x2
-
-#define LAST_SEQ_NUM_INVALID 0xFFFF
-
-struct iwm_umac_notif_stop_resume_tx {
- struct iwm_umac_wifi_in_hdr hdr;
- u8 flags; /* UMAC_*_TX_FLAG_* */
- u8 sta_id;
- __le16 stop_resume_tid_msk; /* tid bitmask */
-} __packed;
-
-#define UMAC_MAX_NUM_PMKIDS 4
-
-/* WiFi interface wrapper header */
-struct iwm_umac_wifi_if {
- u8 oid;
- u8 flags;
- __le16 buf_size;
-} __packed;
-
-#define IWM_SEQ_NUM_HOST_MSK 0x0000
-#define IWM_SEQ_NUM_UMAC_MSK 0x4000
-#define IWM_SEQ_NUM_LMAC_MSK 0x8000
-#define IWM_SEQ_NUM_MSK 0xC000
-
-#endif
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 2fa879b..eb5de80 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -435,24 +435,40 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
* Set Channel
*/
-static int lbs_cfg_set_channel(struct wiphy *wiphy,
- struct net_device *netdev,
- struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type)
+static int lbs_cfg_set_monitor_channel(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type)
{
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = -ENOTSUPP;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d, type %d",
- netdev_name(netdev), channel->center_freq, channel_type);
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
+ channel->center_freq, channel_type);
if (channel_type != NL80211_CHAN_NO_HT)
goto out;
- if (netdev == priv->mesh_dev)
- ret = lbs_mesh_set_channel(priv, channel->hw_value);
- else
- ret = lbs_set_channel(priv, channel->hw_value);
+ ret = lbs_set_channel(priv, channel->hw_value);
+
+ out:
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+static int lbs_cfg_set_mesh_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct ieee80211_channel *channel)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ int ret = -ENOTSUPP;
+
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d",
+ netdev_name(netdev), channel->center_freq);
+
+ if (netdev != priv->mesh_dev)
+ goto out;
+
+ ret = lbs_mesh_set_channel(priv, channel->hw_value);
out:
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -789,7 +805,6 @@ void lbs_scan_done(struct lbs_private *priv)
}
static int lbs_cfg_scan(struct wiphy *wiphy,
- struct net_device *dev,
struct cfg80211_scan_request *request)
{
struct lbs_private *priv = wiphy_priv(wiphy);
@@ -2029,7 +2044,8 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
*/
static struct cfg80211_ops lbs_cfg80211_ops = {
- .set_channel = lbs_cfg_set_channel,
+ .set_monitor_channel = lbs_cfg_set_monitor_channel,
+ .libertas_set_mesh_channel = lbs_cfg_set_mesh_channel,
.scan = lbs_cfg_scan,
.connect = lbs_cfg_connect,
.disconnect = lbs_cfg_disconnect,
@@ -2164,13 +2180,15 @@ int lbs_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct lbs_private *priv = wiphy_priv(wiphy);
- int ret;
+ int ret = 0;
lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain "
"callback for domain %c%c\n", request->alpha2[0],
request->alpha2[1]);
- ret = lbs_set_11d_domain_info(priv, request, wiphy->bands);
+ memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
+ if (lbs_iface_active(priv))
+ ret = lbs_set_11d_domain_info(priv);
lbs_deb_leave(LBS_DEB_CFG80211);
return ret;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index d798bcc..26e6832 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -733,15 +733,13 @@ int lbs_get_rssi(struct lbs_private *priv, s8 *rssi, s8 *nf)
* to the firmware
*
* @priv: pointer to &struct lbs_private
- * @request: cfg80211 regulatory request structure
- * @bands: the device's supported bands and channels
*
* returns: 0 on success, error code on failure
*/
-int lbs_set_11d_domain_info(struct lbs_private *priv,
- struct regulatory_request *request,
- struct ieee80211_supported_band **bands)
+int lbs_set_11d_domain_info(struct lbs_private *priv)
{
+ struct wiphy *wiphy = priv->wdev->wiphy;
+ struct ieee80211_supported_band **bands = wiphy->bands;
struct cmd_ds_802_11d_domain_info cmd;
struct mrvl_ie_domain_param_set *domain = &cmd.domain;
struct ieee80211_country_ie_triplet *t;
@@ -752,21 +750,23 @@ int lbs_set_11d_domain_info(struct lbs_private *priv,
u8 first_channel = 0, next_chan = 0, max_pwr = 0;
u8 i, flag = 0;
size_t triplet_size;
- int ret;
+ int ret = 0;
lbs_deb_enter(LBS_DEB_11D);
+ if (!priv->country_code[0])
+ goto out;
memset(&cmd, 0, sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
lbs_deb_11d("Setting country code '%c%c'\n",
- request->alpha2[0], request->alpha2[1]);
+ priv->country_code[0], priv->country_code[1]);
domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
/* Set country code */
- domain->country_code[0] = request->alpha2[0];
- domain->country_code[1] = request->alpha2[1];
+ domain->country_code[0] = priv->country_code[0];
+ domain->country_code[1] = priv->country_code[1];
domain->country_code[2] = ' ';
/* Now set up the channel triplets; firmware is somewhat picky here
@@ -848,6 +848,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv,
ret = lbs_cmd_with_response(priv, CMD_802_11D_DOMAIN_INFO, &cmd);
+out:
lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
return ret;
}
@@ -1019,9 +1020,9 @@ static void lbs_submit_command(struct lbs_private *priv,
if (ret) {
netdev_info(priv->dev, "DNLD_CMD: hw_host_to_card failed: %d\n",
ret);
- /* Let the timer kick in and retry, and potentially reset
- the whole thing if the condition persists */
- timeo = HZ/4;
+ /* Reset dnld state machine, report failure */
+ priv->dnld_sent = DNLD_RES_RECEIVED;
+ lbs_complete_command(priv, cmdnode, ret);
}
if (command == CMD_802_11_DEEP_SLEEP) {
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index b280ef7..ab07608 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -128,9 +128,7 @@ int lbs_set_monitor_mode(struct lbs_private *priv, int enable);
int lbs_get_rssi(struct lbs_private *priv, s8 *snr, s8 *nf);
-int lbs_set_11d_domain_info(struct lbs_private *priv,
- struct regulatory_request *request,
- struct ieee80211_supported_band **bands);
+int lbs_set_11d_domain_info(struct lbs_private *priv);
int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value);
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a06cc28..668dd27 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -483,7 +483,7 @@ static ssize_t lbs_rdmac_write(struct file *file,
res = -EFAULT;
goto out_unlock;
}
- priv->mac_offset = simple_strtoul((char *)buf, NULL, 16);
+ priv->mac_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
@@ -565,7 +565,7 @@ static ssize_t lbs_rdbbp_write(struct file *file,
res = -EFAULT;
goto out_unlock;
}
- priv->bbp_offset = simple_strtoul((char *)buf, NULL, 16);
+ priv->bbp_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6720054..6bd1608 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -49,6 +49,7 @@ struct lbs_private {
bool wiphy_registered;
struct cfg80211_scan_request *scan_req;
u8 assoc_bss[ETH_ALEN];
+ u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u8 disassoc_reason;
/* Mesh */
@@ -58,6 +59,7 @@ struct lbs_private {
uint16_t mesh_tlv;
u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 mesh_ssid_len;
+ u8 mesh_channel;
#endif
/* Debugfs */
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
index 601f207..c0f9e7e 100644
--- a/drivers/net/wireless/libertas/firmware.c
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -4,9 +4,7 @@
#include <linux/sched.h>
#include <linux/firmware.h>
-#include <linux/firmware.h>
#include <linux/module.h>
-#include <linux/sched.h>
#include "dev.h"
#include "decl.h"
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 2e2dbfa..96726f7 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -68,7 +68,6 @@
#define CMD_802_11_BEACON_STOP 0x0049
#define CMD_802_11_MAC_ADDRESS 0x004d
#define CMD_802_11_LED_GPIO_CTRL 0x004e
-#define CMD_802_11_EEPROM_ACCESS 0x0059
#define CMD_802_11_BAND_CONFIG 0x0058
#define CMD_GSPI_BUS_CONFIG 0x005a
#define CMD_802_11D_DOMAIN_INFO 0x005b
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index cd3b0d4..55a77e4 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -302,14 +302,13 @@ error:
static void if_usb_disconnect(struct usb_interface *intf)
{
struct if_usb_card *cardp = usb_get_intfdata(intf);
- struct lbs_private *priv = (struct lbs_private *) cardp->priv;
+ struct lbs_private *priv = cardp->priv;
lbs_deb_enter(LBS_DEB_MAIN);
cardp->surprise_removed = 1;
if (priv) {
- priv->surpriseremoved = 1;
lbs_stop_card(priv);
lbs_remove_card(priv);
}
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index e96ee0a..5804818 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -152,6 +152,12 @@ int lbs_start_iface(struct lbs_private *priv)
goto err;
}
+ ret = lbs_set_11d_domain_info(priv);
+ if (ret) {
+ lbs_deb_net("set 11d domain info failed\n");
+ goto err;
+ }
+
lbs_update_channel(priv);
priv->iface_running = true;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index e87c031..9780775 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -131,16 +131,13 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel)
{
+ priv->mesh_channel = channel;
return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel);
}
static uint16_t lbs_mesh_get_channel(struct lbs_private *priv)
{
- struct wireless_dev *mesh_wdev = priv->mesh_dev->ieee80211_ptr;
- if (mesh_wdev->channel)
- return mesh_wdev->channel->hw_value;
- else
- return 1;
+ return priv->mesh_channel ?: 1;
}
/***************************************************************************
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 19a5a92..d576dd6 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -253,7 +253,7 @@ lbtf_deb_leave(LBTF_DEB_MAIN);
static void if_usb_disconnect(struct usb_interface *intf)
{
struct if_usb_card *cardp = usb_get_intfdata(intf);
- struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
+ struct lbtf_private *priv = cardp->priv;
lbtf_deb_enter(LBTF_DEB_MAIN);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index fb787df..643f968 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -292,7 +292,7 @@ struct mac80211_hwsim_data {
struct list_head list;
struct ieee80211_hw *hw;
struct device *dev;
- struct ieee80211_supported_band bands[2];
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
@@ -571,7 +571,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
skb_dequeue(&data->pending);
}
- skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (skb == NULL)
goto nla_put_failure;
@@ -678,8 +678,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
continue;
if (data2->idle || !data2->started ||
- !hwsim_ps_rx_ok(data2, skb) ||
- !data->channel || !data2->channel ||
+ !hwsim_ps_rx_ok(data2, skb) || !data2->channel ||
data->channel->center_freq != data2->channel->center_freq ||
!(data->group & data2->group))
continue;
@@ -1083,6 +1082,8 @@ enum hwsim_testmode_attr {
enum hwsim_testmode_cmd {
HWSIM_TM_CMD_SET_PS = 0,
HWSIM_TM_CMD_GET_PS = 1,
+ HWSIM_TM_CMD_STOP_QUEUES = 2,
+ HWSIM_TM_CMD_WAKE_QUEUES = 3,
};
static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
@@ -1122,6 +1123,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
goto nla_put_failure;
return cfg80211_testmode_reply(skb);
+ case HWSIM_TM_CMD_STOP_QUEUES:
+ ieee80211_stop_queues(hw);
+ return 0;
+ case HWSIM_TM_CMD_WAKE_QUEUES:
+ ieee80211_wake_queues(hw);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -1486,7 +1493,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
struct mac80211_hwsim_data *data2;
struct ieee80211_tx_info *txi;
struct hwsim_tx_rate *tx_attempts;
- struct sk_buff __user *ret_skb;
+ unsigned long ret_skb_ptr;
struct sk_buff *skb, *tmp;
struct mac_address *src;
unsigned int hwsim_flags;
@@ -1504,8 +1511,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
- ret_skb = (struct sk_buff __user *)
- (unsigned long) nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
+ ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
data2 = get_hwsim_data_ref_from_addr(src);
@@ -1514,7 +1520,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
/* look for the skb matching the cookie passed back from user */
skb_queue_walk_safe(&data2->pending, skb, tmp) {
- if (skb == ret_skb) {
+ if ((unsigned long)skb == ret_skb_ptr) {
skb_unlink(skb, &data2->pending);
found = true;
break;
@@ -1534,11 +1540,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
/* now send back TX status */
txi = IEEE80211_SKB_CB(skb);
- if (txi->control.vif)
- hwsim_check_magic(txi->control.vif);
- if (txi->control.sta)
- hwsim_check_sta_magic(txi->control.sta);
-
ieee80211_tx_info_clear_status(txi);
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -1555,6 +1556,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
hdr = (struct ieee80211_hdr *) skb->data;
mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
}
+ txi->flags |= IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(data2->hw, skb);
return 0;
@@ -1721,6 +1723,24 @@ static void hwsim_exit_netlink(void)
"unregister family %i\n", ret);
}
+static const struct ieee80211_iface_limit hwsim_if_limits[] = {
+ { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
+ { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) },
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb = {
+ .limits = hwsim_if_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_limits),
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+};
+
static int __init init_mac80211_hwsim(void)
{
int i, err = 0;
@@ -1782,6 +1802,9 @@ static int __init init_mac80211_hwsim(void)
hw->wiphy->n_addresses = 2;
hw->wiphy->addresses = data->addresses;
+ hw->wiphy->iface_combinations = &hwsim_if_comb;
+ hw->wiphy->n_iface_combinations = 1;
+
if (fake_hw_scan) {
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
@@ -1835,7 +1858,7 @@ static int __init init_mac80211_hwsim(void)
sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
break;
default:
- break;
+ continue;
}
sband->ht_cap.ht_supported = true;
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index fe8ebfe..e535c93 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -101,8 +101,7 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
{
int tid;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
- struct host_cmd_ds_11n_delba *del_ba =
- (struct host_cmd_ds_11n_delba *) &resp->params.del_ba;
+ struct host_cmd_ds_11n_delba *del_ba = &resp->params.del_ba;
uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set);
tid = del_ba_param_set >> DELBA_TID_POS;
@@ -147,8 +146,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
int tid;
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -412,7 +410,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
memcpy((u8 *) bss_co_2040 +
sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_bss_co_2040 +
+ bss_desc->bcn_bss_co_2040 +
sizeof(struct ieee_types_header),
le16_to_cpu(bss_co_2040->header.len));
@@ -426,10 +424,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
- memcpy((u8 *) ext_cap +
- sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ext_cap +
- sizeof(struct ieee_types_header),
+ memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header),
+ bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header),
le16_to_cpu(ext_cap->header.len));
*buffer += sizeof(struct mwifiex_ie_types_extcap);
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 77646d7..28366e9 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -105,8 +105,7 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
priv = adapter->priv[i];
if (priv)
ba_stream_num += mwifiex_wmm_list_len(
- (struct list_head *)
- &priv->tx_ba_stream_tbl_ptr);
+ &priv->tx_ba_stream_tbl_ptr);
}
return ((ba_stream_num <
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 9c44088..591ccd3 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
else
last_seq = priv->rx_seq[tid];
- if (last_seq >= new_node->start_win)
+ if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+ last_seq >= new_node->start_win)
new_node->start_win = last_seq + 1;
new_node->win_size = win_size;
@@ -296,9 +297,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
*/
int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
{
- struct host_cmd_ds_11n_addba_req *add_ba_req =
- (struct host_cmd_ds_11n_addba_req *)
- &cmd->params.add_ba_req;
+ struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
@@ -320,9 +319,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
struct host_cmd_ds_11n_addba_req
*cmd_addba_req)
{
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *)
- &cmd->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
@@ -367,8 +364,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
*/
int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
{
- struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *)
- &cmd->params.del_ba;
+ struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
@@ -398,8 +394,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int start_win, end_win, win_size;
u16 pkt_index;
- tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv,
- tid, ta);
+ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
if (pkt_type != PKT_TYPE_BAR)
mwifiex_process_rx_packet(priv->adapter, payload);
@@ -520,9 +515,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *)
- &resp->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
@@ -596,5 +589,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
- memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+ mwifiex_reset_11n_rx_seq_num(priv);
}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index f1bffeb..6c9815a 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -37,6 +37,13 @@
#define ADDBA_RSP_STATUS_ACCEPT 0
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+ memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
u16 seqNum,
u16 tid, u8 *ta,
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8767144..fe42137 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -48,10 +48,9 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
* Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE
*/
static u8
-mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type
- channel_type)
+mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
{
- switch (channel_type) {
+ switch (chan_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
return IEEE80211_HT_PARAM_CHA_SEC_NONE;
@@ -170,7 +169,9 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
if (!priv->sec_info.wep_enabled)
return 0;
- if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
+ priv->wep_key_curr_index = key_index;
+ } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
wiphy_err(wiphy, "set default Tx key index\n");
return -EFAULT;
}
@@ -187,9 +188,25 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
struct key_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ struct mwifiex_wep_key *wep_key;
const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP &&
+ (params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ if (params->key && params->key_len) {
+ wep_key = &priv->wep_key[key_index];
+ memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
+ memcpy(wep_key->key_material, params->key,
+ params->key_len);
+ wep_key->key_index = key_index;
+ wep_key->key_length = params->key_len;
+ priv->sec_info.wep_enabled = 1;
+ }
+ return 0;
+ }
+
if (mwifiex_set_encode(priv, params->key, params->key_len,
key_index, peer_mac, 0)) {
wiphy_err(wiphy, "crypto keys added\n");
@@ -242,13 +259,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
flag = 1;
first_chan = (u32) ch->hw_value;
next_chan = first_chan;
- max_pwr = ch->max_power;
+ max_pwr = ch->max_reg_power;
no_of_parsed_chan = 1;
continue;
}
if (ch->hw_value == next_chan + 1 &&
- ch->max_power == max_pwr) {
+ ch->max_reg_power == max_pwr) {
next_chan++;
no_of_parsed_chan++;
} else {
@@ -259,7 +276,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
no_of_triplet++;
first_chan = (u32) ch->hw_value;
next_chan = first_chan;
- max_pwr = ch->max_power;
+ max_pwr = ch->max_reg_power;
no_of_parsed_chan = 1;
}
}
@@ -321,79 +338,6 @@ static int mwifiex_reg_notifier(struct wiphy *wiphy,
}
/*
- * This function sets the RF channel.
- *
- * This function creates multiple IOCTL requests, populates them accordingly
- * and issues them to set the band/channel and frequency.
- */
-static int
-mwifiex_set_rf_channel(struct mwifiex_private *priv,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- struct mwifiex_chan_freq_power cfp;
- u32 config_bands = 0;
- struct wiphy *wiphy = priv->wdev->wiphy;
- struct mwifiex_adapter *adapter = priv->adapter;
-
- if (chan) {
- /* Set appropriate bands */
- if (chan->band == IEEE80211_BAND_2GHZ) {
- if (channel_type == NL80211_CHAN_NO_HT)
- if (priv->adapter->config_bands == BAND_B ||
- priv->adapter->config_bands == BAND_G)
- config_bands =
- priv->adapter->config_bands;
- else
- config_bands = BAND_B | BAND_G;
- else
- config_bands = BAND_B | BAND_G | BAND_GN;
- } else {
- if (channel_type == NL80211_CHAN_NO_HT)
- config_bands = BAND_A;
- else
- config_bands = BAND_AN | BAND_A;
- }
-
- if (!((config_bands | adapter->fw_bands) &
- ~adapter->fw_bands)) {
- adapter->config_bands = config_bands;
- if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- adapter->adhoc_start_band = config_bands;
- if ((config_bands & BAND_GN) ||
- (config_bands & BAND_AN))
- adapter->adhoc_11n_enabled = true;
- else
- adapter->adhoc_11n_enabled = false;
- }
- }
- adapter->sec_chan_offset =
- mwifiex_cfg80211_channel_type_to_sec_chan_offset
- (channel_type);
- adapter->channel_type = channel_type;
-
- mwifiex_send_domain_info_cmd_fw(wiphy);
- }
-
- wiphy_dbg(wiphy, "info: setting band %d, chan offset %d, mode %d\n",
- config_bands, adapter->sec_chan_offset, priv->bss_mode);
- if (!chan)
- return 0;
-
- memset(&cfp, 0, sizeof(cfp));
- cfp.freq = chan->center_freq;
- cfp.channel = ieee80211_frequency_to_channel(chan->center_freq);
-
- if (mwifiex_bss_set_channel(priv, &cfp))
- return -EFAULT;
-
- if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
- return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
- else
- return mwifiex_uap_set_channel(priv, cfp.channel);
-}
-
-/*
* This function sets the fragmentation threshold.
*
* The fragmentation threshold value must lie between MWIFIEX_FRAG_MIN_VALUE
@@ -608,7 +552,7 @@ static int
mwifiex_dump_station_info(struct mwifiex_private *priv,
struct station_info *sinfo)
{
- struct mwifiex_rate_cfg rate;
+ u32 rate;
sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES |
STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS |
@@ -634,9 +578,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
/*
* Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
- * MCS index values for us are 0 to 7.
+ * MCS index values for us are 0 to 15.
*/
- if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
+ if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
sinfo->txrate.mcs = priv->tx_rate;
sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
/* 40MHz rate */
@@ -654,7 +598,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
sinfo->tx_packets = priv->stats.tx_packets;
sinfo->signal = priv->bcn_rssi_avg;
/* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
- sinfo->txrate.legacy = rate.rate * 5;
+ sinfo->txrate.legacy = rate * 5;
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
sinfo->filled |= STATION_INFO_BSS_PARAM;
@@ -809,8 +753,8 @@ static const u32 mwifiex_cipher_suites[] = {
/*
* CFG802.11 operation handler for setting bit rates.
*
- * Function selects legacy bang B/G/BG from corresponding bitrates selection.
- * Currently only 2.4GHz band is supported.
+ * Function configures data rates to firmware using bitrate mask
+ * provided by cfg80211.
*/
static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
struct net_device *dev,
@@ -818,43 +762,36 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
const struct cfg80211_bitrate_mask *mask)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- int index = 0, mode = 0, i;
- struct mwifiex_adapter *adapter = priv->adapter;
+ u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
+ enum ieee80211_band band;
- /* Currently only 2.4GHz is supported */
- for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
- /*
- * Rates below 6 Mbps in the table are CCK rates; 802.11b
- * and from 6 they are OFDM; 802.11G
- */
- if (mwifiex_rates[i].bitrate == 60) {
- index = 1 << i;
- break;
- }
+ if (!priv->media_connected) {
+ dev_err(priv->adapter->dev,
+ "Can not set Tx data rate in disconnected state\n");
+ return -EINVAL;
}
- if (mask->control[IEEE80211_BAND_2GHZ].legacy < index) {
- mode = BAND_B;
- } else {
- mode = BAND_G;
- if (mask->control[IEEE80211_BAND_2GHZ].legacy % index)
- mode |= BAND_B;
- }
+ band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
- if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) {
- adapter->config_bands = mode;
- if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- adapter->adhoc_start_band = mode;
- adapter->adhoc_11n_enabled = false;
- }
- }
- adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
- adapter->channel_type = NL80211_CHAN_NO_HT;
+ memset(bitmap_rates, 0, sizeof(bitmap_rates));
- wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n",
- (mode & BAND_B) ? "b" : "", (mode & BAND_G) ? "g" : "");
+ /* Fill HR/DSSS rates. */
+ if (band == IEEE80211_BAND_2GHZ)
+ bitmap_rates[0] = mask->control[band].legacy & 0x000f;
- return 0;
+ /* Fill OFDM rates */
+ if (band == IEEE80211_BAND_2GHZ)
+ bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4;
+ else
+ bitmap_rates[1] = mask->control[band].legacy;
+
+ /* Fill MCS rates */
+ bitmap_rates[2] = mask->control[band].mcs[0];
+ if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ bitmap_rates[2] |= mask->control[band].mcs[1] << 8;
+
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
+ HostCmd_ACT_GEN_SET, 0, bitmap_rates);
}
/*
@@ -896,6 +833,69 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
return 0;
}
+/* cfg80211 operation handler for change_beacon.
+ * Function retrieves and sets modified management IEs to FW.
+ */
+static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_beacon_data *data)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+ wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!priv->bss_started) {
+ wiphy_err(wiphy, "%s: bss not started\n", __func__);
+ return -EINVAL;
+ }
+
+ if (mwifiex_set_mgmt_ies(priv, data)) {
+ wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv = mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY);
+ struct mwifiex_ds_ant_cfg ant_cfg;
+
+ if (!tx_ant || !rx_ant)
+ return -EOPNOTSUPP;
+
+ if (adapter->hw_dev_mcs_support != HT_STREAM_2X2) {
+ /* Not a MIMO chip. User should provide specific antenna number
+ * for Tx/Rx path or enable all antennas for diversity
+ */
+ if (tx_ant != rx_ant)
+ return -EOPNOTSUPP;
+
+ if ((tx_ant & (tx_ant - 1)) &&
+ (tx_ant != BIT(adapter->number_of_antenna) - 1))
+ return -EOPNOTSUPP;
+
+ if ((tx_ant == BIT(adapter->number_of_antenna) - 1) &&
+ (priv->adapter->number_of_antenna > 1)) {
+ tx_ant = RF_ANTENNA_AUTO;
+ rx_ant = RF_ANTENNA_AUTO;
+ }
+ }
+
+ ant_cfg.tx_ant = tx_ant;
+ ant_cfg.rx_ant = rx_ant;
+
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_ANTENNA,
+ HostCmd_ACT_GEN_SET, 0, &ant_cfg);
+}
+
/* cfg80211 operation handler for stop ap.
* Function stops BSS running at uAP interface.
*/
@@ -926,10 +926,11 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
{
struct mwifiex_uap_bss_param *bss_cfg;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ u8 config_bands = 0;
if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP)
return -1;
- if (mwifiex_set_mgmt_ies(priv, params))
+ if (mwifiex_set_mgmt_ies(priv, &params->beacon))
return -1;
bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
@@ -948,12 +949,51 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
bss_cfg->ssid.ssid_len = params->ssid_len;
}
+ switch (params->hidden_ssid) {
+ case NL80211_HIDDEN_SSID_NOT_IN_USE:
+ bss_cfg->bcast_ssid_ctl = 1;
+ break;
+ case NL80211_HIDDEN_SSID_ZERO_LEN:
+ bss_cfg->bcast_ssid_ctl = 0;
+ break;
+ case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
+ /* firmware doesn't support this type of hidden SSID */
+ default:
+ kfree(bss_cfg);
+ return -EINVAL;
+ }
+
+ bss_cfg->channel =
+ (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
+ bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
+
+ /* Set appropriate bands */
+ if (params->channel->band == IEEE80211_BAND_2GHZ) {
+ if (params->channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_B | BAND_G;
+ else
+ config_bands = BAND_B | BAND_G | BAND_GN;
+ } else {
+ if (params->channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_A;
+ else
+ config_bands = BAND_AN | BAND_A;
+ }
+
+ if (!((config_bands | priv->adapter->fw_bands) &
+ ~priv->adapter->fw_bands))
+ priv->adapter->config_bands = config_bands;
+
+ mwifiex_send_domain_info_cmd_fw(wiphy);
+
if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
kfree(bss_cfg);
wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
return -1;
}
+ mwifiex_set_ht_params(priv, bss_cfg, params);
+
if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL)) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -977,6 +1017,16 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
return -1;
}
+ if (priv->sec_info.wep_enabled)
+ priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
+ else
+ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
+
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter))
+ return -1;
+
return 0;
}
@@ -1069,7 +1119,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
struct cfg80211_ssid req_ssid;
int ret, auth_type = 0;
struct cfg80211_bss *bss = NULL;
- u8 is_scanning_required = 0;
+ u8 is_scanning_required = 0, config_bands = 0;
memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
@@ -1088,9 +1138,19 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
/* disconnect before try to associate */
mwifiex_deauthenticate(priv, NULL);
- if (channel)
- ret = mwifiex_set_rf_channel(priv, channel,
- priv->adapter->channel_type);
+ if (channel) {
+ if (mode == NL80211_IFTYPE_STATION) {
+ if (channel->band == IEEE80211_BAND_2GHZ)
+ config_bands = BAND_B | BAND_G | BAND_GN;
+ else
+ config_bands = BAND_A | BAND_AN;
+
+ if (!((config_bands | priv->adapter->fw_bands) &
+ ~priv->adapter->fw_bands))
+ priv->adapter->config_bands = config_bands;
+ }
+ mwifiex_send_domain_info_cmd_fw(priv->wdev->wiphy);
+ }
/* As this is new association, clear locally stored
* keys and security related flags */
@@ -1255,6 +1315,76 @@ done:
}
/*
+ * This function sets following parameters for ibss network.
+ * - channel
+ * - start band
+ * - 11n flag
+ * - secondary channel offset
+ */
+static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
+ struct cfg80211_ibss_params *params)
+{
+ struct wiphy *wiphy = priv->wdev->wiphy;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ int index = 0, i;
+ u8 config_bands = 0;
+
+ if (params->channel->band == IEEE80211_BAND_2GHZ) {
+ if (!params->basic_rates) {
+ config_bands = BAND_B | BAND_G;
+ } else {
+ for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
+ /*
+ * Rates below 6 Mbps in the table are CCK
+ * rates; 802.11b and from 6 they are OFDM;
+ * 802.11G
+ */
+ if (mwifiex_rates[i].bitrate == 60) {
+ index = 1 << i;
+ break;
+ }
+ }
+
+ if (params->basic_rates < index) {
+ config_bands = BAND_B;
+ } else {
+ config_bands = BAND_G;
+ if (params->basic_rates % index)
+ config_bands |= BAND_B;
+ }
+ }
+
+ if (params->channel_type != NL80211_CHAN_NO_HT)
+ config_bands |= BAND_GN;
+ } else {
+ if (params->channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_A;
+ else
+ config_bands = BAND_AN | BAND_A;
+ }
+
+ if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands)) {
+ adapter->config_bands = config_bands;
+ adapter->adhoc_start_band = config_bands;
+
+ if ((config_bands & BAND_GN) || (config_bands & BAND_AN))
+ adapter->adhoc_11n_enabled = true;
+ else
+ adapter->adhoc_11n_enabled = false;
+ }
+
+ adapter->sec_chan_offset =
+ mwifiex_chan_type_to_sec_chan_offset(params->channel_type);
+ priv->adhoc_channel =
+ ieee80211_frequency_to_channel(params->channel->center_freq);
+
+ wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
+ config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
+
+ return 0;
+}
+
+/*
* CFG802.11 operation handler to join an IBSS.
*
* This function does not work in any mode other than Ad-Hoc, or if
@@ -1276,6 +1406,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
(char *) params->ssid, params->bssid);
+ mwifiex_set_ibss_params(priv, params);
+
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
params->channel, NULL, params->privacy);
@@ -1322,9 +1454,10 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
* it also informs the results.
*/
static int
-mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
+mwifiex_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
+ struct net_device *dev = request->wdev->netdev;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int i;
struct ieee80211_channel *chan;
@@ -1368,7 +1501,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
priv->user_scan_cfg->chan_list[i].scan_time = 0;
}
- if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
+ if (mwifiex_scan_networks(priv, priv->user_scan_cfg))
return -EFAULT;
if (request->ie && request->ie_len) {
@@ -1458,11 +1591,11 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
/*
* create a new virtual interface with the given name
*/
-struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
- char *name,
- enum nl80211_iftype type,
- u32 *flags,
- struct vif_params *params)
+struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv;
@@ -1471,7 +1604,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
struct wireless_dev *wdev;
if (!adapter)
- return NULL;
+ return ERR_PTR(-EFAULT);
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
@@ -1481,12 +1614,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
if (priv->bss_mode) {
wiphy_err(wiphy,
"cannot create multiple sta/adhoc ifaces\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!wdev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
wdev->wiphy = wiphy;
priv->wdev = wdev;
@@ -1509,12 +1642,12 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
if (priv->bss_mode) {
wiphy_err(wiphy, "Can't create multiple AP interfaces");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!wdev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
priv->wdev = wdev;
wdev->wiphy = wiphy;
@@ -1531,14 +1664,15 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
break;
default:
wiphy_err(wiphy, "type not supported\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name,
ether_setup, 1);
if (!dev) {
wiphy_err(wiphy, "no memory available for netdevice\n");
- goto error;
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(-ENOMEM);
}
mwifiex_init_priv_params(priv, dev);
@@ -1569,7 +1703,9 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
/* Register network device */
if (register_netdevice(dev)) {
wiphy_err(wiphy, "cannot register virtual network device\n");
- goto error;
+ free_netdev(dev);
+ priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
+ return ERR_PTR(-EFAULT);
}
sema_init(&priv->async_sem, 1);
@@ -1580,22 +1716,16 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_init(priv);
#endif
- return dev;
-error:
- if (dev && (dev->reg_state == NETREG_UNREGISTERED))
- free_netdev(dev);
- priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
-
- return NULL;
+ return wdev;
}
EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
/*
* del_virtual_intf: remove the virtual interface determined by dev
*/
-int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
+int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_remove(priv);
@@ -1607,11 +1737,11 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
- if (dev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(dev);
+ if (wdev->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(wdev->netdev);
- if (dev->reg_state == NETREG_UNREGISTERED)
- free_netdev(dev);
+ if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
+ free_netdev(wdev->netdev);
/* Clear the priv in adapter */
priv->netdev = NULL;
@@ -1645,7 +1775,9 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
.start_ap = mwifiex_cfg80211_start_ap,
.stop_ap = mwifiex_cfg80211_stop_ap,
+ .change_beacon = mwifiex_cfg80211_change_beacon,
.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
+ .set_antenna = mwifiex_cfg80211_set_antenna,
};
/*
@@ -1692,7 +1824,16 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
+
+ wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
+ wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
+
+ wiphy->features = NL80211_FEATURE_HT_IBSS;
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1703,7 +1844,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wdev_priv = wiphy_priv(wiphy);
*(unsigned long *)wdev_priv = (unsigned long)adapter;
- set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev);
+ set_wiphy_dev(wiphy, priv->adapter->dev);
ret = wiphy_register(wiphy);
if (ret < 0) {
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 560871b..f69300f 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -167,23 +167,6 @@ u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
}
/*
- * This function maps a data rate value into corresponding index in supported
- * rates table.
- */
-u8 mwifiex_data_rate_to_index(u32 rate)
-{
- u16 *ptr;
-
- if (rate) {
- ptr = memchr(mwifiex_data_rates, rate,
- sizeof(mwifiex_data_rates));
- if (ptr)
- return (u8) (ptr - mwifiex_data_rates);
- }
- return 0;
-}
-
-/*
* This function returns the current active data rates.
*
* The result may vary depending upon connection status.
@@ -277,20 +260,6 @@ mwifiex_is_rate_auto(struct mwifiex_private *priv)
}
/*
- * This function converts rate bitmap into rate index.
- */
-int mwifiex_get_rate_index(u16 *rate_bitmap, int size)
-{
- int i;
-
- for (i = 0; i < size * 8; i++)
- if (rate_bitmap[i / 16] & (1 << (i % 16)))
- return i;
-
- return 0;
-}
-
-/*
* This function gets the supported data rates.
*
* The function works in both Ad-Hoc and infra mode by printing the
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 51e023e..c68adec 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -578,6 +578,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
} else {
adapter->cmd_queued = cmd_node;
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ queue_work(adapter->workqueue, &adapter->main_work);
}
return ret;
@@ -1102,7 +1103,8 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
&resp->params.opt_hs_cfg;
uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
- if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE)) {
+ if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
+ adapter->iface_type == MWIFIEX_SDIO) {
mwifiex_hs_activated_event(priv, true);
return 0;
} else {
@@ -1114,6 +1116,9 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
}
if (conditions != HOST_SLEEP_CFG_CANCEL) {
adapter->is_hs_configured = true;
+ if (adapter->iface_type == MWIFIEX_USB ||
+ adapter->iface_type == MWIFIEX_PCIE)
+ mwifiex_hs_activated_event(priv, true);
} else {
adapter->is_hs_configured = false;
if (adapter->hs_activated)
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index f918f66..070ef25 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -41,16 +41,7 @@
#define MWIFIEX_AMPDU_DEF_RXWINSIZE 16
#define MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT 0xffff
-#define MWIFIEX_RATE_INDEX_HRDSSS0 0
-#define MWIFIEX_RATE_INDEX_HRDSSS3 3
-#define MWIFIEX_RATE_INDEX_OFDM0 4
-#define MWIFIEX_RATE_INDEX_OFDM7 11
-#define MWIFIEX_RATE_INDEX_MCS0 12
-
-#define MWIFIEX_RATE_BITMAP_OFDM0 16
-#define MWIFIEX_RATE_BITMAP_OFDM7 23
#define MWIFIEX_RATE_BITMAP_MCS0 32
-#define MWIFIEX_RATE_BITMAP_MCS127 159
#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024)
#define MWIFIEX_RX_CMD_BUF_SIZE (2 * 1024)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 9f674bb..e831b44 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -122,7 +122,9 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
+#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
+#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59)
#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
#define TLV_TYPE_UAP_AKMP (PROPRIETARY_TLV_BASE_ID + 65)
@@ -161,6 +163,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
+#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
+ IEEE80211_HT_CAP_SM_PS)
+
+#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
+
/* dev_cap bitmap
* BIT
* 0-16 reserved
@@ -217,7 +225,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_BBP_REG_ACCESS 0x001a
#define HostCmd_CMD_RF_REG_ACCESS 0x001b
#define HostCmd_CMD_PMIC_REG_ACCESS 0x00ad
-#define HostCmd_CMD_802_11_RF_CHANNEL 0x001d
+#define HostCmd_CMD_RF_TX_PWR 0x001e
+#define HostCmd_CMD_RF_ANTENNA 0x0020
#define HostCmd_CMD_802_11_DEAUTHENTICATE 0x0024
#define HostCmd_CMD_MAC_CONTROL 0x0028
#define HostCmd_CMD_802_11_AD_HOC_START 0x002b
@@ -313,6 +322,12 @@ enum ENH_PS_MODES {
#define HostCmd_BSS_TYPE_MASK 0xf000
+#define HostCmd_ACT_SET_RX 0x0001
+#define HostCmd_ACT_SET_TX 0x0002
+#define HostCmd_ACT_SET_BOTH 0x0003
+
+#define RF_ANTENNA_AUTO 0xFFFF
+
#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
(((seq) & 0x00ff) | \
(((num) & 0x000f) << 8)) | \
@@ -868,6 +883,25 @@ struct host_cmd_ds_txpwr_cfg {
__le32 mode;
} __packed;
+struct host_cmd_ds_rf_tx_pwr {
+ __le16 action;
+ __le16 cur_level;
+ u8 max_power;
+ u8 min_power;
+} __packed;
+
+struct host_cmd_ds_rf_ant_mimo {
+ __le16 action_tx;
+ __le16 tx_ant_mode;
+ __le16 action_rx;
+ __le16 rx_ant_mode;
+};
+
+struct host_cmd_ds_rf_ant_siso {
+ __le16 action;
+ __le16 ant_mode;
+};
+
struct mwifiex_bcn_param {
u8 bssid[ETH_ALEN];
u8 rssi;
@@ -1194,6 +1228,13 @@ struct host_cmd_tlv_passphrase {
u8 passphrase[0];
} __packed;
+struct host_cmd_tlv_wep_key {
+ struct host_cmd_tlv tlv;
+ u8 key_index;
+ u8 is_default;
+ u8 key[1];
+};
+
struct host_cmd_tlv_auth_type {
struct host_cmd_tlv tlv;
u8 auth_type;
@@ -1209,6 +1250,11 @@ struct host_cmd_tlv_ssid {
u8 ssid[0];
} __packed;
+struct host_cmd_tlv_bcast_ssid {
+ struct host_cmd_tlv tlv;
+ u8 bcast_ctl;
+} __packed;
+
struct host_cmd_tlv_beacon_period {
struct host_cmd_tlv tlv;
__le16 period;
@@ -1245,14 +1291,6 @@ struct host_cmd_tlv_channel_band {
u8 channel;
} __packed;
-struct host_cmd_ds_802_11_rf_channel {
- __le16 action;
- __le16 current_channel;
- __le16 rf_type;
- __le16 reserved;
- u8 reserved_1[32];
-} __packed;
-
struct host_cmd_ds_version_ext {
u8 version_str_sel;
char version_str[128];
@@ -1337,10 +1375,12 @@ struct host_cmd_ds_command {
struct host_cmd_ds_802_11_rssi_info rssi_info;
struct host_cmd_ds_802_11_rssi_info_rsp rssi_info_rsp;
struct host_cmd_ds_802_11_snmp_mib smib;
- struct host_cmd_ds_802_11_rf_channel rf_channel;
struct host_cmd_ds_tx_rate_query tx_rate;
struct host_cmd_ds_tx_rate_cfg tx_rate_cfg;
struct host_cmd_ds_txpwr_cfg txp_cfg;
+ struct host_cmd_ds_rf_tx_pwr txp;
+ struct host_cmd_ds_rf_ant_mimo ant_mimo;
+ struct host_cmd_ds_rf_ant_siso ant_siso;
struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
struct host_cmd_ds_802_11_scan scan;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index ceb82cd..1d8dd00 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -51,8 +51,7 @@ mwifiex_ie_get_autoidx(struct mwifiex_private *priv, u16 subtype_mask,
for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) {
mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask);
- len = le16_to_cpu(priv->mgmt_ie[i].ie_length) +
- le16_to_cpu(ie->ie_length);
+ len = le16_to_cpu(ie->ie_length);
if (mask == MWIFIEX_AUTO_IDX_MASK)
continue;
@@ -108,10 +107,8 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
return -1;
tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
- tmp += le16_to_cpu(priv->mgmt_ie[index].ie_length);
memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
- le16_add_cpu(&priv->mgmt_ie[index].ie_length,
- le16_to_cpu(ie->ie_length));
+ priv->mgmt_ie[index].ie_length = ie->ie_length;
priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
priv->mgmt_ie[index].mgmt_subtype_mask =
cpu_to_le16(mask);
@@ -213,95 +210,67 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
/* save assoc resp ie index after auto-indexing */
*assoc_idx = *((u16 *)pos);
+ kfree(ap_custom_ie);
return ret;
}
-/* This function parses different IEs- Tail IEs, beacon IEs, probe response IEs,
- * association response IEs from cfg80211_ap_settings function and sets these IE
- * to FW.
+/* This function checks if WPS IE is present in passed buffer and copies it to
+ * mwifiex_ie structure.
+ * Function takes pointer to struct mwifiex_ie pointer as argument.
+ * If WPS IE is present memory is allocated for mwifiex_ie pointer and filled
+ * in with WPS IE. Caller should take care of freeing this memory.
*/
-int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
- struct cfg80211_ap_settings *params)
+static int mwifiex_update_wps_ie(const u8 *ies, int ies_len,
+ struct mwifiex_ie **ie_ptr, u16 mask)
{
- struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
- struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL;
- struct ieee_types_header *ie = NULL;
- u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
- u16 ar_idx = MWIFIEX_AUTO_IDX_MASK, rsn_idx = MWIFIEX_AUTO_IDX_MASK;
- u16 mask;
- int ret = 0;
-
- if (params->beacon.tail && params->beacon.tail_len) {
- ie = (void *)cfg80211_find_ie(WLAN_EID_RSN, params->beacon.tail,
- params->beacon.tail_len);
- if (ie) {
- rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!rsn_ie)
- return -ENOMEM;
-
- rsn_ie->ie_index = cpu_to_le16(rsn_idx);
- mask = MGMT_MASK_BEACON | MGMT_MASK_PROBE_RESP |
- MGMT_MASK_ASSOC_RESP;
- rsn_ie->mgmt_subtype_mask = cpu_to_le16(mask);
- rsn_ie->ie_length = cpu_to_le16(ie->len + 2);
- memcpy(rsn_ie->ie_buffer, ie, ie->len + 2);
-
- if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &rsn_idx,
- NULL, NULL,
- NULL, NULL)) {
- ret = -1;
- goto done;
- }
+ struct ieee_types_header *wps_ie;
+ struct mwifiex_ie *ie = NULL;
+ const u8 *vendor_ie;
+
+ vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPS,
+ ies, ies_len);
+ if (vendor_ie) {
+ ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!ie)
+ return -ENOMEM;
- priv->rsn_idx = rsn_idx;
- }
+ wps_ie = (struct ieee_types_header *)vendor_ie;
+ memcpy(ie->ie_buffer, wps_ie, wps_ie->len + 2);
+ ie->ie_length = cpu_to_le16(wps_ie->len + 2);
+ ie->mgmt_subtype_mask = cpu_to_le16(mask);
+ ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
}
- if (params->beacon.beacon_ies && params->beacon.beacon_ies_len) {
- beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!beacon_ie) {
- ret = -ENOMEM;
- goto done;
- }
-
- beacon_ie->ie_index = cpu_to_le16(beacon_idx);
- beacon_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON);
- beacon_ie->ie_length =
- cpu_to_le16(params->beacon.beacon_ies_len);
- memcpy(beacon_ie->ie_buffer, params->beacon.beacon_ies,
- params->beacon.beacon_ies_len);
- }
+ *ie_ptr = ie;
+ return 0;
+}
- if (params->beacon.proberesp_ies && params->beacon.proberesp_ies_len) {
- pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!pr_ie) {
- ret = -ENOMEM;
- goto done;
- }
+/* This function parses beacon IEs, probe response IEs, association response IEs
+ * from cfg80211_ap_settings->beacon and sets these IE to FW.
+ */
+static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
+ struct cfg80211_beacon_data *data)
+{
+ struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL, *ar_ie = NULL;
+ u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
+ u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
+ int ret = 0;
- pr_ie->ie_index = cpu_to_le16(pr_idx);
- pr_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_PROBE_RESP);
- pr_ie->ie_length =
- cpu_to_le16(params->beacon.proberesp_ies_len);
- memcpy(pr_ie->ie_buffer, params->beacon.proberesp_ies,
- params->beacon.proberesp_ies_len);
- }
+ if (data->beacon_ies && data->beacon_ies_len)
+ mwifiex_update_wps_ie(data->beacon_ies, data->beacon_ies_len,
+ &beacon_ie, MGMT_MASK_BEACON);
- if (params->beacon.assocresp_ies && params->beacon.assocresp_ies_len) {
- ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!ar_ie) {
- ret = -ENOMEM;
- goto done;
- }
+ if (data->proberesp_ies && data->proberesp_ies_len)
+ mwifiex_update_wps_ie(data->proberesp_ies,
+ data->proberesp_ies_len, &pr_ie,
+ MGMT_MASK_PROBE_RESP);
- ar_ie->ie_index = cpu_to_le16(ar_idx);
- mask = MGMT_MASK_ASSOC_RESP | MGMT_MASK_REASSOC_RESP;
- ar_ie->mgmt_subtype_mask = cpu_to_le16(mask);
- ar_ie->ie_length =
- cpu_to_le16(params->beacon.assocresp_ies_len);
- memcpy(ar_ie->ie_buffer, params->beacon.assocresp_ies,
- params->beacon.assocresp_ies_len);
- }
+ if (data->assocresp_ies && data->assocresp_ies_len)
+ mwifiex_update_wps_ie(data->assocresp_ies,
+ data->assocresp_ies_len, &ar_ie,
+ MGMT_MASK_ASSOC_RESP |
+ MGMT_MASK_REASSOC_RESP);
if (beacon_ie || pr_ie || ar_ie) {
ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
@@ -319,11 +288,67 @@ done:
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
- kfree(rsn_ie);
return ret;
}
+/* This function parses different IEs-tail IEs, beacon IEs, probe response IEs,
+ * association response IEs from cfg80211_ap_settings function and sets these IE
+ * to FW.
+ */
+int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
+ struct cfg80211_beacon_data *info)
+{
+ struct mwifiex_ie *gen_ie;
+ struct ieee_types_header *rsn_ie, *wpa_ie = NULL;
+ u16 rsn_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
+ const u8 *vendor_ie;
+
+ if (info->tail && info->tail_len) {
+ gen_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!gen_ie)
+ return -ENOMEM;
+ gen_ie->ie_index = cpu_to_le16(rsn_idx);
+ gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
+ MGMT_MASK_PROBE_RESP |
+ MGMT_MASK_ASSOC_RESP);
+
+ rsn_ie = (void *)cfg80211_find_ie(WLAN_EID_RSN,
+ info->tail, info->tail_len);
+ if (rsn_ie) {
+ memcpy(gen_ie->ie_buffer, rsn_ie, rsn_ie->len + 2);
+ ie_len = rsn_ie->len + 2;
+ gen_ie->ie_length = cpu_to_le16(ie_len);
+ }
+
+ vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ info->tail,
+ info->tail_len);
+ if (vendor_ie) {
+ wpa_ie = (struct ieee_types_header *)vendor_ie;
+ memcpy(gen_ie->ie_buffer + ie_len,
+ wpa_ie, wpa_ie->len + 2);
+ ie_len += wpa_ie->len + 2;
+ gen_ie->ie_length = cpu_to_le16(ie_len);
+ }
+
+ if (rsn_ie || wpa_ie) {
+ if (mwifiex_update_uap_custom_ie(priv, gen_ie, &rsn_idx,
+ NULL, NULL,
+ NULL, NULL)) {
+ kfree(gen_ie);
+ return -1;
+ }
+ priv->rsn_idx = rsn_idx;
+ }
+
+ kfree(gen_ie);
+ }
+
+ return mwifiex_set_mgmt_beacon_data_ies(priv, info);
+}
+
/* This function removes management IE set */
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
{
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index c1cb004..21fdc6c 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -57,6 +57,69 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
return 0;
}
+static void scan_delay_timer_fn(unsigned long data)
+{
+ struct mwifiex_private *priv = (struct mwifiex_private *)data;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct cmd_ctrl_node *cmd_node, *tmp_node;
+ unsigned long flags;
+
+ if (!mwifiex_wmm_lists_empty(adapter)) {
+ if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+ /*
+ * Abort scan operation by cancelling all pending scan
+ * command
+ */
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ list_for_each_entry_safe(cmd_node, tmp_node,
+ &adapter->scan_pending_q,
+ list) {
+ list_del(&cmd_node->list);
+ cmd_node->wait_q_enabled = false;
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ }
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
+ flags);
+
+ if (priv->user_scan_cfg) {
+ dev_dbg(priv->adapter->dev,
+ "info: %s: scan aborted\n", __func__);
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ kfree(priv->user_scan_cfg);
+ priv->user_scan_cfg = NULL;
+ }
+ } else {
+ /*
+ * Tx data queue is still not empty, delay scan
+ * operation further by 20msec.
+ */
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ adapter->scan_delay_cnt++;
+ }
+ queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+ } else {
+ /*
+ * Tx data queue is empty. Get scan command from scan_pending_q
+ * and put to cmd_pending_q to resume scan operation
+ */
+ adapter->scan_delay_cnt = 0;
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ }
+}
+
/*
* This function initializes the private structure and sets default
* values to the members.
@@ -136,6 +199,9 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
priv->scan_block = false;
+ setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn,
+ (unsigned long)priv);
+
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -278,7 +344,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->adhoc_awake_period = 0;
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
- adapter->channel_type = NL80211_CHAN_HT20;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index e6be6ee..5019153 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -21,6 +21,7 @@
#define _MWIFIEX_IOCTL_H_
#include <net/mac80211.h>
+#include <net/lib80211.h>
enum {
MWIFIEX_SCAN_TYPE_UNCHANGED = 0,
@@ -71,6 +72,13 @@ struct wpa_param {
u8 passphrase[MWIFIEX_WPA_PASSHPHRASE_LEN];
};
+struct wep_key {
+ u8 key_index;
+ u8 is_default;
+ u16 length;
+ u8 key[WLAN_KEY_LEN_WEP104];
+};
+
#define KEY_MGMT_ON_HOST 0x03
#define MWIFIEX_AUTH_MODE_AUTO 0xFF
#define BAND_CONFIG_MANUAL 0x00
@@ -90,6 +98,8 @@ struct mwifiex_uap_bss_param {
u16 key_mgmt;
u16 key_mgmt_operation;
struct wpa_param wpa_cfg;
+ struct wep_key wep_cfg[NUM_WEP_KEYS];
+ struct ieee80211_ht_cap ht_cap;
};
enum {
@@ -215,12 +225,6 @@ struct mwifiex_ds_encrypt_key {
u8 wapi_rxpn[WAPI_RXPN_LEN];
};
-struct mwifiex_rate_cfg {
- u32 action;
- u32 is_rate_auto;
- u32 rate;
-};
-
struct mwifiex_power_cfg {
u32 is_power_auto;
u32 power_level;
@@ -267,6 +271,11 @@ struct mwifiex_ds_11n_amsdu_aggr_ctrl {
u16 curr_buf_size;
};
+struct mwifiex_ds_ant_cfg {
+ u32 tx_ant;
+ u32 rx_ant;
+};
+
#define MWIFIEX_NUM_OF_CMD_BUFFER 20
#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index d6b4fb0..82e63ce 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1349,22 +1349,16 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
{
u8 mac_address[ETH_ALEN];
int ret;
- u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
- if (mac) {
- if (!memcmp(mac, zero_mac, sizeof(zero_mac)))
- memcpy((u8 *) &mac_address,
- (u8 *) &priv->curr_bss_params.bss_descriptor.
- mac_address, ETH_ALEN);
- else
- memcpy((u8 *) &mac_address, (u8 *) mac, ETH_ALEN);
- } else {
- memcpy((u8 *) &mac_address, (u8 *) &priv->curr_bss_params.
- bss_descriptor.mac_address, ETH_ALEN);
- }
+ if (!mac || is_zero_ether_addr(mac))
+ memcpy(mac_address,
+ priv->curr_bss_params.bss_descriptor.mac_address,
+ ETH_ALEN);
+ else
+ memcpy(mac_address, mac, ETH_ALEN);
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
- HostCmd_ACT_GEN_SET, 0, &mac_address);
+ HostCmd_ACT_GEN_SET, 0, mac_address);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3192855..4680362 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -190,7 +190,8 @@ process_start:
adapter->tx_lock_flag)
break;
- if (adapter->scan_processing || adapter->data_sent ||
+ if ((adapter->scan_processing &&
+ !adapter->scan_delay_cnt) || adapter->data_sent ||
mwifiex_wmm_lists_empty(adapter)) {
if (adapter->cmd_sent || adapter->curr_cmd ||
(!is_command_pending(adapter)))
@@ -244,8 +245,8 @@ process_start:
}
}
- if (!adapter->scan_processing && !adapter->data_sent &&
- !mwifiex_wmm_lists_empty(adapter)) {
+ if ((!adapter->scan_processing || adapter->scan_delay_cnt) &&
+ !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
adapter->is_hs_configured = false;
@@ -376,7 +377,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
- mwifiex_del_virtual_intf(adapter->wiphy, priv->netdev);
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
rtnl_unlock();
err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
@@ -843,7 +844,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
rtnl_lock();
if (priv->wdev && priv->netdev)
- mwifiex_del_virtual_intf(adapter->wiphy, priv->netdev);
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
rtnl_unlock();
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index bd3b0bf..e7c2a82 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -79,14 +79,17 @@ enum {
#define SCAN_BEACON_ENTRY_PAD 6
-#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 200
-#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 200
-#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 110
+#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110
+#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 30
+#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 30
#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+#define MWIFIEX_MAX_SCAN_DELAY_CNT 50
+#define MWIFIEX_SCAN_DELAY_MSEC 20
+
#define RSN_GTK_OUI_OFFSET 2
#define MWIFIEX_OUI_NOT_PRESENT 0
@@ -482,6 +485,7 @@ struct mwifiex_private {
u16 proberesp_idx;
u16 assocresp_idx;
u16 rsn_idx;
+ struct timer_list scan_delay_timer;
};
enum mwifiex_ba_status {
@@ -674,7 +678,6 @@ struct mwifiex_adapter {
u8 hw_dev_mcs_support;
u8 adhoc_11n_enabled;
u8 sec_chan_offset;
- enum nl80211_channel_type channel_type;
struct mwifiex_dbg dbg;
u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
u32 arp_filter_size;
@@ -686,6 +689,7 @@ struct mwifiex_adapter {
struct completion fw_load;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
+ u8 scan_delay_cnt;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -819,9 +823,7 @@ int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv,
u8 *rates);
u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates);
-u8 mwifiex_data_rate_to_index(u32 rate);
u8 mwifiex_is_rate_auto(struct mwifiex_private *priv);
-int mwifiex_get_rate_index(u16 *rateBitmap, int size);
extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE];
void mwifiex_save_curr_bcn(struct mwifiex_private *priv);
void mwifiex_free_curr_bcn(struct mwifiex_private *priv);
@@ -835,6 +837,9 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_config,
struct cfg80211_ap_settings *params);
+void mwifiex_set_ht_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_ap_settings *params);
/*
* This function checks if the queuing is RA based or not.
@@ -937,16 +942,13 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
-int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate);
+int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate);
int mwifiex_request_scan(struct mwifiex_private *priv,
struct cfg80211_ssid *req_ssid);
-int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
- struct mwifiex_user_scan_cfg *scan_req);
+int mwifiex_scan_networks(struct mwifiex_private *priv,
+ const struct mwifiex_user_scan_cfg *user_scan_in);
int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
-int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel);
-
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
int key_len, u8 key_index, const u8 *mac_addr,
int disable);
@@ -985,9 +987,6 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
int mwifiex_main_process(struct mwifiex_adapter *);
-int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel);
-int mwifiex_bss_set_channel(struct mwifiex_private *,
- struct mwifiex_chan_freq_power *cfp);
int mwifiex_get_bss_info(struct mwifiex_private *,
struct mwifiex_bss_info *);
int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
@@ -998,15 +997,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
-struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
- char *name, enum nl80211_iftype type,
- u32 *flags, struct vif_params *params);
-int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
+struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params);
+int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev);
void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config);
int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
- struct cfg80211_ap_settings *params);
+ struct cfg80211_beacon_data *data);
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
u8 *mwifiex_11d_code_2_region(u8 code);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 74f0457..04dc7ca 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -28,7 +28,10 @@
/* The maximum number of channels the firmware can scan per command */
#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
-#define MWIFIEX_CHANNELS_PER_SCAN_CMD 4
+#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
+#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
+#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
+#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
/* Memory needed to store a max sized Channel List TLV for a firmware scan */
#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -471,7 +474,7 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
* This routine is used for any scan that is not provided with a
* specific channel list to scan.
*/
-static void
+static int
mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
const struct mwifiex_user_scan_cfg
*user_scan_in,
@@ -528,6 +531,7 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
}
}
+ return chan_idx;
}
/*
@@ -727,6 +731,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
u32 num_probes;
u32 ssid_len;
u32 chan_idx;
+ u32 chan_num;
u32 scan_type;
u16 scan_dur;
u8 channel;
@@ -850,7 +855,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
if (*filtered_scan)
*max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
else
- *max_chan_per_scan = MWIFIEX_CHANNELS_PER_SCAN_CMD;
+ *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
/* If the input config or adapter has the number of Probes set,
add tlv */
@@ -962,13 +967,28 @@ mwifiex_config_scan(struct mwifiex_private *priv,
dev_dbg(adapter->dev,
"info: Scan: Scanning current channel only\n");
}
-
+ chan_num = chan_idx;
} else {
dev_dbg(adapter->dev,
"info: Scan: Creating full region channel list\n");
- mwifiex_scan_create_channel_list(priv, user_scan_in,
- scan_chan_list,
- *filtered_scan);
+ chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
+ scan_chan_list,
+ *filtered_scan);
+ }
+
+ /*
+ * In associated state we will reduce the number of channels scanned per
+ * scan command to avoid any traffic delay/loss. This number is decided
+ * based on total number of channels to be scanned due to constraints
+ * of command buffers.
+ */
+ if (priv->media_connected) {
+ if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
+ *max_chan_per_scan = 1;
+ else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
+ *max_chan_per_scan = 2;
+ else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
+ *max_chan_per_scan = 3;
}
}
@@ -1014,14 +1034,12 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
case TLV_TYPE_TSFTIMESTAMP:
dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
"timestamp TLV, len = %d\n", tlv_len);
- *tlv_data = (struct mwifiex_ie_types_data *)
- current_tlv;
+ *tlv_data = current_tlv;
break;
case TLV_TYPE_CHANNELBANDLIST:
dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
" band list TLV, len = %d\n", tlv_len);
- *tlv_data = (struct mwifiex_ie_types_data *)
- current_tlv;
+ *tlv_data = current_tlv;
break;
default:
dev_err(adapter->dev,
@@ -1226,15 +1244,15 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->beacon_buf);
break;
case WLAN_EID_BSS_COEX_2040:
- bss_entry->bcn_bss_co_2040 = (u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->bcn_bss_co_2040 = current_ptr +
+ sizeof(struct ieee_types_header);
bss_entry->bss_co_2040_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
case WLAN_EID_EXT_CAPABILITY:
- bss_entry->bcn_ext_cap = (u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->bcn_ext_cap = current_ptr +
+ sizeof(struct ieee_types_header);
bss_entry->ext_cap_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
@@ -1276,8 +1294,8 @@ mwifiex_radio_type_to_band(u8 radio_type)
* order to send the appropriate scan commands to firmware to populate or
* update the internal driver scan table.
*/
-static int mwifiex_scan_networks(struct mwifiex_private *priv,
- const struct mwifiex_user_scan_cfg *user_scan_in)
+int mwifiex_scan_networks(struct mwifiex_private *priv,
+ const struct mwifiex_user_scan_cfg *user_scan_in)
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -1342,6 +1360,7 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
adapter->cmd_queued = cmd_node;
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
true);
+ queue_work(adapter->workqueue, &adapter->main_work);
} else {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
@@ -1358,26 +1377,6 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
}
/*
- * Sends IOCTL request to start a scan with user configurations.
- *
- * This function allocates the IOCTL request buffer, fills it
- * with requisite parameters and calls the IOCTL handler.
- *
- * Upon completion, it also generates a wireless event to notify
- * applications.
- */
-int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
- struct mwifiex_user_scan_cfg *scan_req)
-{
- int status;
-
- status = mwifiex_scan_networks(priv, scan_req);
- queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
-
- return status;
-}
-
-/*
* This function prepares a scan command to be sent to the firmware.
*
* This uses the scan command configuration sent to the command processing
@@ -1683,8 +1682,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
goto done;
}
if (element_id == WLAN_EID_DS_PARAMS) {
- channel = *(u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ channel = *(current_ptr + sizeof(struct ieee_types_header));
break;
}
@@ -1772,14 +1770,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
priv->user_scan_cfg = NULL;
}
} else {
- /* Get scan command from scan_pending_q and put to
- cmd_pending_q */
- cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
- mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ if (!mwifiex_wmm_lists_empty(adapter)) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = 1;
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ } else {
+ /* Get scan command from scan_pending_q and put to
+ cmd_pending_q */
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+ true);
+ }
}
done:
@@ -2010,12 +2017,11 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
- (u8 *) (curr_bss->beacon_buf +
- curr_bss->bss_co_2040_offset);
+ (curr_bss->beacon_buf + curr_bss->bss_co_2040_offset);
if (curr_bss->bcn_ext_cap)
- curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf +
- curr_bss->ext_cap_offset);
+ curr_bss->bcn_ext_cap = curr_bss->beacon_buf +
+ curr_bss->ext_cap_offset;
}
/*
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index e037747..fc8a9bf 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
adapter->event_cause = *(u32 *) skb->data;
- skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
- memcpy(adapter->event_body, skb->data, skb->len);
+ memcpy(adapter->event_body,
+ skb->data + MWIFIEX_EVENT_HEADER_LEN,
+ skb->len);
/* event cause has been saved to adapter->event_cause */
adapter->event_received = true;
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 40e025d..df3a33c 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -260,6 +260,56 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
}
/*
+ * This function prepares command to get RF Tx power.
+ */
+static int mwifiex_cmd_rf_tx_power(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_rf_tx_pwr *txp = &cmd->params.txp;
+
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_tx_pwr)
+ + S_DS_GEN);
+ cmd->command = cpu_to_le16(HostCmd_CMD_RF_TX_PWR);
+ txp->action = cpu_to_le16(cmd_action);
+
+ return 0;
+}
+
+/*
+ * This function prepares command to set rf antenna.
+ */
+static int mwifiex_cmd_rf_antenna(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action,
+ struct mwifiex_ds_ant_cfg *ant_cfg)
+{
+ struct host_cmd_ds_rf_ant_mimo *ant_mimo = &cmd->params.ant_mimo;
+ struct host_cmd_ds_rf_ant_siso *ant_siso = &cmd->params.ant_siso;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_RF_ANTENNA);
+
+ if (cmd_action != HostCmd_ACT_GEN_SET)
+ return 0;
+
+ if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) {
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_mimo) +
+ S_DS_GEN);
+ ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_SET_TX);
+ ant_mimo->tx_ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
+ ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_SET_RX);
+ ant_mimo->rx_ant_mode = cpu_to_le16((u16)ant_cfg->rx_ant);
+ } else {
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_siso) +
+ S_DS_GEN);
+ ant_siso->action = cpu_to_le16(HostCmd_ACT_SET_BOTH);
+ ant_siso->ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
+ }
+
+ return 0;
+}
+
+/*
* This function prepares command to set Host Sleep configuration.
*
* Preparation includes -
@@ -695,40 +745,6 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
}
/*
- * This function prepares command to set/get RF channel.
- *
- * Preparation includes -
- * - Setting command ID, action and proper size
- * - Setting RF type and current RF channel (for SET only)
- * - Ensuring correct endian-ness
- */
-static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action, u16 *channel)
-{
- struct host_cmd_ds_802_11_rf_channel *rf_chan =
- &cmd->params.rf_channel;
- uint16_t rf_type = le16_to_cpu(rf_chan->rf_type);
-
- cmd->command = cpu_to_le16(HostCmd_CMD_802_11_RF_CHANNEL);
- cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_rf_channel)
- + S_DS_GEN);
-
- if (cmd_action == HostCmd_ACT_GEN_SET) {
- if ((priv->adapter->adhoc_start_band & BAND_A) ||
- (priv->adapter->adhoc_start_band & BAND_AN))
- rf_chan->rf_type =
- cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
-
- rf_type = le16_to_cpu(rf_chan->rf_type);
- SET_SECONDARYCHAN(rf_type, priv->adapter->sec_chan_offset);
- rf_chan->current_channel = cpu_to_le16(*channel);
- }
- rf_chan->action = cpu_to_le16(cmd_action);
- return 0;
-}
-
-/*
* This function prepares command to set/get IBSS coalescing status.
*
* Preparation includes -
@@ -793,8 +809,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_mac_reg_access *mac_reg;
cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
- mac_reg = (struct host_cmd_ds_mac_reg_access *) &cmd->
- params.mac_reg;
+ mac_reg = &cmd->params.mac_reg;
mac_reg->action = cpu_to_le16(cmd_action);
mac_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -806,8 +821,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_bbp_reg_access *bbp_reg;
cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
- bbp_reg = (struct host_cmd_ds_bbp_reg_access *)
- &cmd->params.bbp_reg;
+ bbp_reg = &cmd->params.bbp_reg;
bbp_reg->action = cpu_to_le16(cmd_action);
bbp_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -819,8 +833,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_rf_reg_access *rf_reg;
cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
- rf_reg = (struct host_cmd_ds_rf_reg_access *)
- &cmd->params.rf_reg;
+ rf_reg = &cmd->params.rf_reg;
rf_reg->action = cpu_to_le16(cmd_action);
rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
@@ -831,8 +844,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_pmic_reg_access *pmic_reg;
cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN);
- pmic_reg = (struct host_cmd_ds_pmic_reg_access *) &cmd->
- params.pmic_reg;
+ pmic_reg = &cmd->params.pmic_reg;
pmic_reg->action = cpu_to_le16(cmd_action);
pmic_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -844,8 +856,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_rf_reg_access *cau_reg;
cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
- cau_reg = (struct host_cmd_ds_rf_reg_access *)
- &cmd->params.rf_reg;
+ cau_reg = &cmd->params.rf_reg;
cau_reg->action = cpu_to_le16(cmd_action);
cau_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -856,7 +867,6 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
{
struct mwifiex_ds_read_eeprom *rd_eeprom = data_buf;
struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom =
- (struct host_cmd_ds_802_11_eeprom_access *)
&cmd->params.eeprom;
cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN);
@@ -1055,6 +1065,14 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_tx_power_cfg(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_RF_TX_PWR:
+ ret = mwifiex_cmd_rf_tx_power(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
+ case HostCmd_CMD_RF_ANTENNA:
+ ret = mwifiex_cmd_rf_antenna(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
case HostCmd_CMD_802_11_PS_MODE_ENH:
ret = mwifiex_cmd_enh_power_mode(priv, cmd_ptr, cmd_action,
(uint16_t)cmd_oid, data_buf);
@@ -1117,10 +1135,6 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
S_DS_GEN);
ret = 0;
break;
- case HostCmd_CMD_802_11_RF_CHANNEL:
- ret = mwifiex_cmd_802_11_rf_channel(priv, cmd_ptr, cmd_action,
- data_buf);
- break;
case HostCmd_CMD_FUNC_INIT:
if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
@@ -1283,7 +1297,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
priv->data_rate = 0;
/* get tx power */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TXPWR_CFG,
+ ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR,
HostCmd_ACT_GEN_GET, 0, NULL);
if (ret)
return -1;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index a79ed9b..0b09004 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -227,7 +227,7 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *stats)
{
struct host_cmd_ds_802_11_get_log *get_log =
- (struct host_cmd_ds_802_11_get_log *) &resp->params.get_log;
+ &resp->params.get_log;
if (stats) {
stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame);
@@ -267,12 +267,10 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
*
* Based on the new rate bitmaps, the function re-evaluates if
* auto data rate has been activated. If not, it sends another
- * query to the firmware to get the current Tx data rate and updates
- * the driver value.
+ * query to the firmware to get the current Tx data rate.
*/
static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp,
- struct mwifiex_rate_cfg *ds_rate)
+ struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg;
struct mwifiex_rate_scope *rate_scope;
@@ -280,9 +278,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
u16 tlv, tlv_buf_len;
u8 *tlv_buf;
u32 i;
- int ret = 0;
- tlv_buf = (u8 *) ((u8 *) rate_cfg) +
+ tlv_buf = ((u8 *)rate_cfg) +
sizeof(struct host_cmd_ds_tx_rate_cfg);
tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
@@ -318,33 +315,11 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
if (priv->is_data_rate_auto)
priv->data_rate = 0;
else
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
-
- if (!ds_rate)
- return ret;
-
- if (le16_to_cpu(rate_cfg->action) == HostCmd_ACT_GEN_GET) {
- if (priv->is_data_rate_auto) {
- ds_rate->is_rate_auto = 1;
- return ret;
- }
- ds_rate->rate = mwifiex_get_rate_index(priv->bitmap_rates,
- sizeof(priv->bitmap_rates));
-
- if (ds_rate->rate >= MWIFIEX_RATE_BITMAP_OFDM0 &&
- ds_rate->rate <= MWIFIEX_RATE_BITMAP_OFDM7)
- ds_rate->rate -= (MWIFIEX_RATE_BITMAP_OFDM0 -
- MWIFIEX_RATE_INDEX_OFDM0);
-
- if (ds_rate->rate >= MWIFIEX_RATE_BITMAP_MCS0 &&
- ds_rate->rate <= MWIFIEX_RATE_BITMAP_MCS127)
- ds_rate->rate -= (MWIFIEX_RATE_BITMAP_MCS0 -
- MWIFIEX_RATE_INDEX_MCS0);
- }
+ return mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL);
- return ret;
+ return 0;
}
/*
@@ -451,6 +426,57 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
}
/*
+ * This function handles the command response of get RF Tx power.
+ */
+static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_rf_tx_pwr *txp = &resp->params.txp;
+ u16 action = le16_to_cpu(txp->action);
+
+ priv->tx_power_level = le16_to_cpu(txp->cur_level);
+
+ if (action == HostCmd_ACT_GEN_GET) {
+ priv->max_tx_power_level = txp->max_power;
+ priv->min_tx_power_level = txp->min_power;
+ }
+
+ dev_dbg(priv->adapter->dev,
+ "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
+ priv->tx_power_level, priv->max_tx_power_level,
+ priv->min_tx_power_level);
+
+ return 0;
+}
+
+/*
+ * This function handles the command response of set rf antenna
+ */
+static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_rf_ant_mimo *ant_mimo = &resp->params.ant_mimo;
+ struct host_cmd_ds_rf_ant_siso *ant_siso = &resp->params.ant_siso;
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ dev_dbg(adapter->dev,
+ "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x"
+ " Rx action = 0x%x, Rx Mode = 0x%04x\n",
+ le16_to_cpu(ant_mimo->action_tx),
+ le16_to_cpu(ant_mimo->tx_ant_mode),
+ le16_to_cpu(ant_mimo->action_rx),
+ le16_to_cpu(ant_mimo->rx_ant_mode));
+ else
+ dev_dbg(adapter->dev,
+ "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
+ le16_to_cpu(ant_siso->action),
+ le16_to_cpu(ant_siso->ant_mode));
+
+ return 0;
+}
+
+/*
* This function handles the command response of set/get MAC address.
*
* Handling includes saving the MAC address in driver.
@@ -605,34 +631,6 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
}
/*
- * This function handles the command response of get RF channel.
- *
- * Handling includes changing the header fields into CPU format
- * and saving the new channel in driver.
- */
-static int mwifiex_ret_802_11_rf_channel(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp,
- u16 *data_buf)
-{
- struct host_cmd_ds_802_11_rf_channel *rf_channel =
- &resp->params.rf_channel;
- u16 new_channel = le16_to_cpu(rf_channel->current_channel);
-
- if (priv->curr_bss_params.bss_descriptor.channel != new_channel) {
- dev_dbg(priv->adapter->dev, "cmd: Channel Switch: %d to %d\n",
- priv->curr_bss_params.bss_descriptor.channel,
- new_channel);
- /* Update the channel again */
- priv->curr_bss_params.bss_descriptor.channel = new_channel;
- }
-
- if (data_buf)
- *data_buf = new_channel;
-
- return 0;
-}
-
-/*
* This function handles the command response of get extended version.
*
* Handling includes forming the extended version string and sending it
@@ -679,39 +677,33 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
eeprom = data_buf;
switch (type) {
case HostCmd_CMD_MAC_REG_ACCESS:
- r.mac = (struct host_cmd_ds_mac_reg_access *)
- &resp->params.mac_reg;
+ r.mac = &resp->params.mac_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset));
reg_rw->value = r.mac->value;
break;
case HostCmd_CMD_BBP_REG_ACCESS:
- r.bbp = (struct host_cmd_ds_bbp_reg_access *)
- &resp->params.bbp_reg;
+ r.bbp = &resp->params.bbp_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset));
reg_rw->value = cpu_to_le32((u32) r.bbp->value);
break;
case HostCmd_CMD_RF_REG_ACCESS:
- r.rf = (struct host_cmd_ds_rf_reg_access *)
- &resp->params.rf_reg;
+ r.rf = &resp->params.rf_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
reg_rw->value = cpu_to_le32((u32) r.bbp->value);
break;
case HostCmd_CMD_PMIC_REG_ACCESS:
- r.pmic = (struct host_cmd_ds_pmic_reg_access *)
- &resp->params.pmic_reg;
+ r.pmic = &resp->params.pmic_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset));
reg_rw->value = cpu_to_le32((u32) r.pmic->value);
break;
case HostCmd_CMD_CAU_REG_ACCESS:
- r.rf = (struct host_cmd_ds_rf_reg_access *)
- &resp->params.rf_reg;
+ r.rf = &resp->params.rf_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
reg_rw->value = cpu_to_le32((u32) r.rf->value);
break;
case HostCmd_CMD_802_11_EEPROM_ACCESS:
- r.eeprom = (struct host_cmd_ds_802_11_eeprom_access *)
- &resp->params.eeprom;
+ r.eeprom = &resp->params.eeprom;
pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count);
if (le16_to_cpu(eeprom->byte_count) <
le16_to_cpu(r.eeprom->byte_count)) {
@@ -787,7 +779,7 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
struct mwifiex_ds_misc_subsc_evt *sub_event)
{
struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
- (struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt;
+ &resp->params.subsc_evt;
/* For every subscribe event command (Get/Set/Clear), FW reports the
* current set of subscribed events*/
@@ -833,7 +825,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
ret = mwifiex_ret_mac_multicast_adr(priv, resp);
break;
case HostCmd_CMD_TX_RATE_CFG:
- ret = mwifiex_ret_tx_rate_cfg(priv, resp, data_buf);
+ ret = mwifiex_ret_tx_rate_cfg(priv, resp);
break;
case HostCmd_CMD_802_11_SCAN:
ret = mwifiex_ret_802_11_scan(priv, resp);
@@ -847,6 +839,12 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_TXPWR_CFG:
ret = mwifiex_ret_tx_power_cfg(priv, resp);
break;
+ case HostCmd_CMD_RF_TX_PWR:
+ ret = mwifiex_ret_rf_tx_power(priv, resp);
+ break;
+ case HostCmd_CMD_RF_ANTENNA:
+ ret = mwifiex_ret_rf_antenna(priv, resp);
+ break;
case HostCmd_CMD_802_11_PS_MODE_ENH:
ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf);
break;
@@ -878,9 +876,6 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_802_11_TX_RATE_QUERY:
ret = mwifiex_ret_802_11_tx_rate_query(priv, resp);
break;
- case HostCmd_CMD_802_11_RF_CHANNEL:
- ret = mwifiex_ret_802_11_rf_channel(priv, resp, data_buf);
- break;
case HostCmd_CMD_VERSION_EXT:
ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
break;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 4ace5a3..b8614a8 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_STA_ASSOC:
- skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
memset(&sinfo, 0, sizeof(sinfo));
- event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+ event = (struct mwifiex_assoc_event *)
+ (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
len = -1;
@@ -422,7 +422,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (len != -1) {
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
- sinfo.assoc_req_ies = (u8 *)&event->data[len];
+ sinfo.assoc_req_ies = &event->data[len];
len = (u8 *)sinfo.assoc_req_ies -
(u8 *)&event->frame_control;
sinfo.assoc_req_ies_len =
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
GFP_KERNEL);
break;
case EVENT_UAP_STA_DEAUTH:
- skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
- cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
- GFP_KERNEL);
+ cfg80211_del_sta(priv->netdev, adapter->event_body +
+ MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
break;
case EVENT_UAP_BSS_IDLE:
priv->media_connected = false;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 106c449..fb21360 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -66,9 +66,6 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "cmd pending\n");
atomic_inc(&adapter->cmd_pending);
- /* Status pending, wake up main process */
- queue_work(adapter->workqueue, &adapter->main_work);
-
/* Wait for completion */
wait_event_interruptible(adapter->cmd_wait_q.wait,
*(cmd_queued->condition));
@@ -500,297 +497,24 @@ int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds);
/*
- * IOCTL request handler to set/get active channel.
- *
- * This function performs validity checking on channel/frequency
- * compatibility and returns failure if not valid.
- */
-int mwifiex_bss_set_channel(struct mwifiex_private *priv,
- struct mwifiex_chan_freq_power *chan)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- struct mwifiex_chan_freq_power *cfp = NULL;
-
- if (!chan)
- return -1;
-
- if (!chan->channel && !chan->freq)
- return -1;
- if (adapter->adhoc_start_band & BAND_AN)
- adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN;
- else if (adapter->adhoc_start_band & BAND_A)
- adapter->adhoc_start_band = BAND_G | BAND_B;
- if (chan->channel) {
- if (chan->channel <= MAX_CHANNEL_BAND_BG)
- cfp = mwifiex_get_cfp(priv, 0, (u16) chan->channel, 0);
- if (!cfp) {
- cfp = mwifiex_get_cfp(priv, BAND_A,
- (u16) chan->channel, 0);
- if (cfp) {
- if (adapter->adhoc_11n_enabled)
- adapter->adhoc_start_band = BAND_A
- | BAND_AN;
- else
- adapter->adhoc_start_band = BAND_A;
- }
- }
- } else {
- if (chan->freq <= MAX_FREQUENCY_BAND_BG)
- cfp = mwifiex_get_cfp(priv, 0, 0, chan->freq);
- if (!cfp) {
- cfp = mwifiex_get_cfp(priv, BAND_A, 0, chan->freq);
- if (cfp) {
- if (adapter->adhoc_11n_enabled)
- adapter->adhoc_start_band = BAND_A
- | BAND_AN;
- else
- adapter->adhoc_start_band = BAND_A;
- }
- }
- }
- if (!cfp || !cfp->channel) {
- dev_err(adapter->dev, "invalid channel/freq\n");
- return -1;
- }
- priv->adhoc_channel = (u8) cfp->channel;
- chan->channel = cfp->channel;
- chan->freq = cfp->freq;
-
- return 0;
-}
-
-/*
- * IOCTL request handler to set/get Ad-Hoc channel.
- *
- * This function prepares the correct firmware command and
- * issues it to set or get the ad-hoc channel.
- */
-static int mwifiex_bss_ioctl_ibss_channel(struct mwifiex_private *priv,
- u16 action, u16 *channel)
-{
- if (action == HostCmd_ACT_GEN_GET) {
- if (!priv->media_connected) {
- *channel = priv->adhoc_channel;
- return 0;
- }
- } else {
- priv->adhoc_channel = (u8) *channel;
- }
-
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_RF_CHANNEL,
- action, 0, channel);
-}
-
-/*
- * IOCTL request handler to change Ad-Hoc channel.
- *
- * This function allocates the IOCTL request buffer, fills it
- * with requisite parameters and calls the IOCTL handler.
- *
- * The function follows the following steps to perform the change -
- * - Get current IBSS information
- * - Get current channel
- * - If no change is required, return
- * - If not connected, change channel and return
- * - If connected,
- * - Disconnect
- * - Change channel
- * - Perform specific SSID scan with same SSID
- * - Start/Join the IBSS
- */
-int
-mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel)
-{
- int ret;
- struct mwifiex_bss_info bss_info;
- struct mwifiex_ssid_bssid ssid_bssid;
- u16 curr_chan = 0;
- struct cfg80211_bss *bss = NULL;
- struct ieee80211_channel *chan;
- enum ieee80211_band band;
-
- memset(&bss_info, 0, sizeof(bss_info));
-
- /* Get BSS information */
- if (mwifiex_get_bss_info(priv, &bss_info))
- return -1;
-
- /* Get current channel */
- ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_GET,
- &curr_chan);
-
- if (curr_chan == channel) {
- ret = 0;
- goto done;
- }
- dev_dbg(priv->adapter->dev, "cmd: updating channel from %d to %d\n",
- curr_chan, channel);
-
- if (!bss_info.media_connected) {
- ret = 0;
- goto done;
- }
-
- /* Do disonnect */
- memset(&ssid_bssid, 0, ETH_ALEN);
- ret = mwifiex_deauthenticate(priv, ssid_bssid.bssid);
-
- ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_SET,
- &channel);
-
- /* Do specific SSID scanning */
- if (mwifiex_request_scan(priv, &bss_info.ssid)) {
- ret = -1;
- goto done;
- }
-
- band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
- chan = __ieee80211_get_channel(priv->wdev->wiphy,
- ieee80211_channel_to_frequency(channel,
- band));
-
- /* Find the BSS we want using available scan results */
- bss = cfg80211_get_bss(priv->wdev->wiphy, chan, bss_info.bssid,
- bss_info.ssid.ssid, bss_info.ssid.ssid_len,
- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
- if (!bss)
- wiphy_warn(priv->wdev->wiphy, "assoc: bss %pM not in scan results\n",
- bss_info.bssid);
-
- ret = mwifiex_bss_start(priv, bss, &bss_info.ssid);
-done:
- return ret;
-}
-
-/*
- * IOCTL request handler to get rate.
- *
- * This function prepares the correct firmware command and
- * issues it to get the current rate if it is connected,
- * otherwise, the function returns the lowest supported rate
- * for the band.
- */
-static int mwifiex_rate_ioctl_get_rate_value(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate_cfg)
-{
- rate_cfg->is_rate_auto = priv->is_data_rate_auto;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
-}
-
-/*
- * IOCTL request handler to set rate.
- *
- * This function prepares the correct firmware command and
- * issues it to set the current rate.
- *
- * The function also performs validation checking on the supplied value.
- */
-static int mwifiex_rate_ioctl_set_rate_value(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate_cfg)
-{
- u8 rates[MWIFIEX_SUPPORTED_RATES];
- u8 *rate;
- int rate_index, ret;
- u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
- u32 i;
- struct mwifiex_adapter *adapter = priv->adapter;
-
- if (rate_cfg->is_rate_auto) {
- memset(bitmap_rates, 0, sizeof(bitmap_rates));
- /* Support all HR/DSSS rates */
- bitmap_rates[0] = 0x000F;
- /* Support all OFDM rates */
- bitmap_rates[1] = 0x00FF;
- /* Support all HT-MCSs rate */
- for (i = 0; i < ARRAY_SIZE(priv->bitmap_rates) - 3; i++)
- bitmap_rates[i + 2] = 0xFFFF;
- bitmap_rates[9] = 0x3FFF;
- } else {
- memset(rates, 0, sizeof(rates));
- mwifiex_get_active_data_rates(priv, rates);
- rate = rates;
- for (i = 0; (rate[i] && i < MWIFIEX_SUPPORTED_RATES); i++) {
- dev_dbg(adapter->dev, "info: rate=%#x wanted=%#x\n",
- rate[i], rate_cfg->rate);
- if ((rate[i] & 0x7f) == (rate_cfg->rate & 0x7f))
- break;
- }
- if ((i == MWIFIEX_SUPPORTED_RATES) || !rate[i]) {
- dev_err(adapter->dev, "fixed data rate %#x is out "
- "of range\n", rate_cfg->rate);
- return -1;
- }
- memset(bitmap_rates, 0, sizeof(bitmap_rates));
-
- rate_index = mwifiex_data_rate_to_index(rate_cfg->rate);
-
- /* Only allow b/g rates to be set */
- if (rate_index >= MWIFIEX_RATE_INDEX_HRDSSS0 &&
- rate_index <= MWIFIEX_RATE_INDEX_HRDSSS3) {
- bitmap_rates[0] = 1 << rate_index;
- } else {
- rate_index -= 1; /* There is a 0x00 in the table */
- if (rate_index >= MWIFIEX_RATE_INDEX_OFDM0 &&
- rate_index <= MWIFIEX_RATE_INDEX_OFDM7)
- bitmap_rates[1] = 1 << (rate_index -
- MWIFIEX_RATE_INDEX_OFDM0);
- }
- }
-
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
- HostCmd_ACT_GEN_SET, 0, bitmap_rates);
-
- return ret;
-}
-
-/*
- * IOCTL request handler to set/get rate.
- *
- * This function can be used to set/get either the rate value or the
- * rate index.
- */
-static int mwifiex_rate_ioctl_cfg(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate_cfg)
-{
- int status;
-
- if (!rate_cfg)
- return -1;
-
- if (rate_cfg->action == HostCmd_ACT_GEN_GET)
- status = mwifiex_rate_ioctl_get_rate_value(priv, rate_cfg);
- else
- status = mwifiex_rate_ioctl_set_rate_value(priv, rate_cfg);
-
- return status;
-}
-
-/*
* Sends IOCTL request to get the data rate.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
-int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate)
+int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate)
{
int ret;
- memset(rate, 0, sizeof(struct mwifiex_rate_cfg));
- rate->action = HostCmd_ACT_GEN_GET;
- ret = mwifiex_rate_ioctl_cfg(priv, rate);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL);
if (!ret) {
- if (rate->is_rate_auto)
- rate->rate = mwifiex_index_to_data_rate(priv,
- priv->tx_rate,
- priv->tx_htinfo
- );
+ if (priv->is_data_rate_auto)
+ *rate = mwifiex_index_to_data_rate(priv, priv->tx_rate,
+ priv->tx_htinfo);
else
- rate->rate = priv->data_rate;
- } else {
- ret = -1;
+ *rate = priv->data_rate;
}
return ret;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index e2faec4..cecb272 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -161,15 +161,11 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
goto done;
for (i = 0; i < adapter->priv_num; i++) {
-
tpriv = adapter->priv[i];
- if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA) &&
- (tpriv->media_connected)) {
- if (netif_queue_stopped(tpriv->netdev))
- mwifiex_wake_up_net_dev_queue(tpriv->netdev,
- adapter);
- }
+ if (tpriv->media_connected &&
+ netif_queue_stopped(tpriv->netdev))
+ mwifiex_wake_up_net_dev_queue(tpriv->netdev, adapter);
}
done:
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 76dfbc4..f40e93fe 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -26,6 +26,18 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_config,
struct cfg80211_ap_settings *params) {
int i;
+ struct mwifiex_wep_key wep_key;
+
+ if (!params->privacy) {
+ bss_config->protocol = PROTOCOL_NO_SECURITY;
+ bss_config->key_mgmt = KEY_MGMT_NONE;
+ bss_config->wpa_cfg.length = 0;
+ priv->sec_info.wep_enabled = 0;
+ priv->sec_info.wpa_enabled = 0;
+ priv->sec_info.wpa2_enabled = 0;
+
+ return 0;
+ }
switch (params->auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
@@ -54,7 +66,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
- bss_config->protocol = PROTOCOL_WPA2;
+ bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
break;
@@ -66,7 +78,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
- bss_config->protocol = PROTOCOL_WPA2;
+ bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
break;
@@ -80,10 +92,19 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
case WLAN_CIPHER_SUITE_WEP104:
break;
case WLAN_CIPHER_SUITE_TKIP:
- bss_config->wpa_cfg.pairwise_cipher_wpa = CIPHER_TKIP;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ bss_config->wpa_cfg.pairwise_cipher_wpa |=
+ CIPHER_TKIP;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
+ CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
- bss_config->wpa_cfg.pairwise_cipher_wpa2 =
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ bss_config->wpa_cfg.pairwise_cipher_wpa |=
+ CIPHER_AES_CCMP;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_AES_CCMP;
default:
break;
@@ -93,6 +114,27 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
switch (params->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
+ if (priv->sec_info.wep_enabled) {
+ bss_config->protocol = PROTOCOL_STATIC_WEP;
+ bss_config->key_mgmt = KEY_MGMT_NONE;
+ bss_config->wpa_cfg.length = 0;
+
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ wep_key = priv->wep_key[i];
+ bss_config->wep_cfg[i].key_index = i;
+
+ if (priv->wep_key_curr_index == i)
+ bss_config->wep_cfg[i].is_default = 1;
+ else
+ bss_config->wep_cfg[i].is_default = 0;
+
+ bss_config->wep_cfg[i].length =
+ wep_key.key_length;
+ memcpy(&bss_config->wep_cfg[i].key,
+ &wep_key.key_material,
+ wep_key.key_length);
+ }
+ }
break;
case WLAN_CIPHER_SUITE_TKIP:
bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
@@ -107,6 +149,33 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
return 0;
}
+/* This function updates 11n related parameters from IE and sets them into
+ * bss_config structure.
+ */
+void
+mwifiex_set_ht_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_ap_settings *params)
+{
+ const u8 *ht_ie;
+
+ if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
+ return;
+
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
+ params->beacon.tail_len);
+ if (ht_ie) {
+ memcpy(&bss_cfg->ht_cap, ht_ie + 2,
+ sizeof(struct ieee80211_ht_cap));
+ } else {
+ memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
+ bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
+ bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
+ }
+
+ return;
+}
+
/* This function initializes some of mwifiex_uap_bss_param variables.
* This helps FW in ignoring invalid values. These values may or may not
* be get updated to valid ones at later stage.
@@ -124,6 +193,120 @@ void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
}
/* This function parses BSS related parameters from structure
+ * and prepares TLVs specific to WPA/WPA2 security.
+ * These TLVs are appended to command buffer.
+ */
+static void
+mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
+{
+ struct host_cmd_tlv_pwk_cipher *pwk_cipher;
+ struct host_cmd_tlv_gwk_cipher *gwk_cipher;
+ struct host_cmd_tlv_passphrase *passphrase;
+ struct host_cmd_tlv_akmp *tlv_akmp;
+ struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+ u16 cmd_size = *param_size;
+ u8 *tlv = *tlv_buf;
+
+ tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
+ tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
+ tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
+ sizeof(struct host_cmd_tlv));
+ tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
+ tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
+ cmd_size += sizeof(struct host_cmd_tlv_akmp);
+ tlv += sizeof(struct host_cmd_tlv_akmp);
+
+ if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
+ pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
+ pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
+ pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
+ cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
+ }
+
+ if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
+ pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
+ pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
+ pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
+ cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
+ }
+
+ if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
+ gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
+ gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
+ gwk_cipher->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
+ cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
+ }
+
+ if (bss_cfg->wpa_cfg.length) {
+ passphrase = (struct host_cmd_tlv_passphrase *)tlv;
+ passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
+ passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
+ memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
+ bss_cfg->wpa_cfg.length);
+ cmd_size += sizeof(struct host_cmd_tlv) +
+ bss_cfg->wpa_cfg.length;
+ tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length;
+ }
+
+ *param_size = cmd_size;
+ *tlv_buf = tlv;
+
+ return;
+}
+
+/* This function parses BSS related parameters from structure
+ * and prepares TLVs specific to WEP encryption.
+ * These TLVs are appended to command buffer.
+ */
+static void
+mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
+{
+ struct host_cmd_tlv_wep_key *wep_key;
+ u16 cmd_size = *param_size;
+ int i;
+ u8 *tlv = *tlv_buf;
+ struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ if (bss_cfg->wep_cfg[i].length &&
+ (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
+ bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
+ wep_key = (struct host_cmd_tlv_wep_key *)tlv;
+ wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
+ wep_key->tlv.len =
+ cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
+ wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
+ wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
+ memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
+ bss_cfg->wep_cfg[i].length);
+ cmd_size += sizeof(struct host_cmd_tlv) + 2 +
+ bss_cfg->wep_cfg[i].length;
+ tlv += sizeof(struct host_cmd_tlv) + 2 +
+ bss_cfg->wep_cfg[i].length;
+ }
+ }
+
+ *param_size = cmd_size;
+ *tlv_buf = tlv;
+
+ return;
+}
+
+/* This function parses BSS related parameters from structure
* and prepares TLVs. These TLVs are appended to command buffer.
*/
static int
@@ -132,16 +315,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
struct host_cmd_tlv_dtim_period *dtim_period;
struct host_cmd_tlv_beacon_period *beacon_period;
struct host_cmd_tlv_ssid *ssid;
+ struct host_cmd_tlv_bcast_ssid *bcast_ssid;
struct host_cmd_tlv_channel_band *chan_band;
struct host_cmd_tlv_frag_threshold *frag_threshold;
struct host_cmd_tlv_rts_threshold *rts_threshold;
struct host_cmd_tlv_retry_limit *retry_limit;
- struct host_cmd_tlv_pwk_cipher *pwk_cipher;
- struct host_cmd_tlv_gwk_cipher *gwk_cipher;
struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
struct host_cmd_tlv_auth_type *auth_type;
- struct host_cmd_tlv_passphrase *passphrase;
- struct host_cmd_tlv_akmp *tlv_akmp;
+ struct mwifiex_ie_types_htcap *htcap;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
u16 cmd_size = *param_size;
@@ -153,6 +334,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
cmd_size += sizeof(struct host_cmd_tlv) +
bss_cfg->ssid.ssid_len;
tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+
+ bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
+ bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+ bcast_ssid->tlv.len =
+ cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
+ bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
+ cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
+ tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
}
if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
@@ -223,70 +412,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
}
if ((bss_cfg->protocol & PROTOCOL_WPA) ||
(bss_cfg->protocol & PROTOCOL_WPA2) ||
- (bss_cfg->protocol & PROTOCOL_EAP)) {
- tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
- tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
- tlv_akmp->tlv.len =
- cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
- sizeof(struct host_cmd_tlv));
- tlv_akmp->key_mgmt_operation =
- cpu_to_le16(bss_cfg->key_mgmt_operation);
- tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
- cmd_size += sizeof(struct host_cmd_tlv_akmp);
- tlv += sizeof(struct host_cmd_tlv_akmp);
-
- if (bss_cfg->wpa_cfg.pairwise_cipher_wpa &
- VALID_CIPHER_BITMAP) {
- pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
- pwk_cipher->tlv.type =
- cpu_to_le16(TLV_TYPE_PWK_CIPHER);
- pwk_cipher->tlv.len = cpu_to_le16(
- sizeof(struct host_cmd_tlv_pwk_cipher) -
- sizeof(struct host_cmd_tlv));
- pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
- pwk_cipher->cipher =
- bss_cfg->wpa_cfg.pairwise_cipher_wpa;
- cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
- tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
- }
- if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 &
- VALID_CIPHER_BITMAP) {
- pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
- pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
- pwk_cipher->tlv.len = cpu_to_le16(
- sizeof(struct host_cmd_tlv_pwk_cipher) -
- sizeof(struct host_cmd_tlv));
- pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
- pwk_cipher->cipher =
- bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
- cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
- tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
- }
- if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
- gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
- gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
- gwk_cipher->tlv.len = cpu_to_le16(
- sizeof(struct host_cmd_tlv_gwk_cipher) -
- sizeof(struct host_cmd_tlv));
- gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
- cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
- tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
- }
- if (bss_cfg->wpa_cfg.length) {
- passphrase = (struct host_cmd_tlv_passphrase *)tlv;
- passphrase->tlv.type =
- cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
- passphrase->tlv.len =
- cpu_to_le16(bss_cfg->wpa_cfg.length);
- memcpy(passphrase->passphrase,
- bss_cfg->wpa_cfg.passphrase,
- bss_cfg->wpa_cfg.length);
- cmd_size += sizeof(struct host_cmd_tlv) +
- bss_cfg->wpa_cfg.length;
- tlv += sizeof(struct host_cmd_tlv) +
- bss_cfg->wpa_cfg.length;
- }
- }
+ (bss_cfg->protocol & PROTOCOL_EAP))
+ mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size);
+ else
+ mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size);
+
if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
(bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
auth_type = (struct host_cmd_tlv_auth_type *)tlv;
@@ -310,6 +440,25 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
}
+ if (bss_cfg->ht_cap.cap_info) {
+ htcap = (struct mwifiex_ie_types_htcap *)tlv;
+ htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ htcap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info;
+ htcap->ht_cap.ampdu_params_info =
+ bss_cfg->ht_cap.ampdu_params_info;
+ memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs,
+ sizeof(struct ieee80211_mcs_info));
+ htcap->ht_cap.extended_ht_cap_info =
+ bss_cfg->ht_cap.extended_ht_cap_info;
+ htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info;
+ htcap->ht_cap.antenna_selection_info =
+ bss_cfg->ht_cap.antenna_selection_info;
+ cmd_size += sizeof(struct mwifiex_ie_types_htcap);
+ tlv += sizeof(struct mwifiex_ie_types_htcap);
+ }
+
*param_size = cmd_size;
return 0;
@@ -401,32 +550,3 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
return 0;
}
-
-/* This function sets the RF channel for AP.
- *
- * This function populates channel information in AP config structure
- * and sends command to configure channel information in AP.
- */
-int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
-{
- struct mwifiex_uap_bss_param *bss_cfg;
- struct wiphy *wiphy = priv->wdev->wiphy;
-
- bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
- if (!bss_cfg)
- return -ENOMEM;
-
- bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
- bss_cfg->channel = channel;
-
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_BSS_PARAMS_I, bss_cfg)) {
- wiphy_err(wiphy, "Failed to set the uAP channel\n");
- kfree(bss_cfg);
- return -1;
- }
-
- kfree(bss_cfg);
- return 0;
-}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 49ebf20..22a5916 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
struct device *dev = adapter->dev;
u32 recv_type;
__le32 tmp;
+ int ret;
if (adapter->hs_activated)
mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
case MWIFIEX_USB_TYPE_CMD:
if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
dev_err(dev, "CMD: skb->len too large\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
} else if (!adapter->curr_cmd) {
dev_dbg(dev, "CMD: no curr_cmd\n");
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(
adapter, skb->data,
skb->len);
- return 0;
+ ret = 0;
+ goto exit_restore_skb;
}
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
case MWIFIEX_USB_TYPE_EVENT:
if (skb->len < sizeof(u32)) {
dev_err(dev, "EVENT: skb->len too small\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
adapter->event_cause = le32_to_cpu(tmp);
- skb_pull(skb, sizeof(u32));
dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
if (skb->len > MAX_EVENT_SIZE) {
dev_err(dev, "EVENT: event body too large\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
- skb_copy_from_linear_data(skb, adapter->event_body,
- skb->len);
+ memcpy(adapter->event_body, skb->data +
+ MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
adapter->event_received = true;
adapter->event_skb = skb;
break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
}
return -EINPROGRESS;
+
+exit_restore_skb:
+ /* The buffer will be reused for further cmds/events */
+ skb_push(skb, INTF_HEADER_LEN);
+
+ return ret;
}
static void mwifiex_usb_rx_complete(struct urb *urb)
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index f3fc655..3fa4d41 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
+ mwifiex_reset_11n_rx_seq_num(priv);
+
atomic_set(&priv->wmm.tx_pkts_queued, 0);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
if (!ptr->is_11n_enabled ||
mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+ priv->wps.session_enable ||
((priv->sec_info.wpa_enabled ||
priv->sec_info.wpa2_enabled) &&
!priv->wpa_is_gtk_set)) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index cf7bdc6..224e03a 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1665,7 +1665,9 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data(wh->frame_control)) {
- sta = info->control.sta;
+ rcu_read_lock();
+ sta = ieee80211_find_sta_by_ifaddr(hw, wh->addr1,
+ wh->addr2);
if (sta) {
sta_info = MWL8K_STA(sta);
BUG_ON(sta_info == NULL);
@@ -1682,6 +1684,7 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
sta_info->is_ampdu_allowed = true;
}
}
+ rcu_read_unlock();
}
ieee80211_tx_info_clear_status(info);
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index f7b15b8..7b751fb 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -138,7 +138,7 @@ static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev,
return err;
}
-static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
+static int orinoco_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct orinoco_private *priv = wiphy_priv(wiphy);
@@ -160,10 +160,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
return err;
}
-static int orinoco_set_channel(struct wiphy *wiphy,
- struct net_device *netdev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+static int orinoco_set_monitor_channel(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
{
struct orinoco_private *priv = wiphy_priv(wiphy);
int err = 0;
@@ -286,7 +285,7 @@ static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
const struct cfg80211_ops orinoco_cfg_ops = {
.change_virtual_intf = orinoco_change_vif,
- .set_channel = orinoco_set_channel,
+ .set_monitor_channel = orinoco_set_monitor_channel,
.scan = orinoco_scan,
.set_wiphy_params = orinoco_set_wiphy_params,
};
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index fa8ce51..1403709 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -857,7 +857,7 @@ good_eeprom:
wiphy_warn(dev->wiphy,
"Invalid hwaddr! Using randomly generated MAC addr\n");
- random_ether_addr(perm_addr);
+ eth_random_addr(perm_addr);
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
}
@@ -905,7 +905,7 @@ int p54_read_eeprom(struct ieee80211_hw *dev)
while (eeprom_size) {
blocksize = min(eeprom_size, maxblocksize);
- ret = p54_download_eeprom(priv, (void *) (eeprom + offset),
+ ret = p54_download_eeprom(priv, eeprom + offset,
offset, blocksize);
if (unlikely(ret))
goto free;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 18e82b3..9ba8510 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -478,7 +478,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
memcpy(&body->longbow.curve_data,
- (void *) entry + sizeof(__le16),
+ entry + sizeof(__le16),
priv->curve_data->entry_size);
} else {
struct p54_scan_body *chan = &body->normal;
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 82a1cac..f38786e 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -422,11 +422,11 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
* Clear manually, ieee80211_tx_info_clear_status would
* clear the counts too and we need them.
*/
- memset(&info->status.ampdu_ack_len, 0,
+ memset(&info->status.ack_signal, 0,
sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
+ offsetof(struct ieee80211_tx_info, status.ack_signal));
BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
- status.ampdu_ack_len) != 23);
+ status.ack_signal) != 20);
if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
pad = entry_data->align[0];
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 266d45b..799e148 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -455,7 +455,7 @@ islpci_eth_receive(islpci_private *priv)
"Error mapping DMA address\n");
/* free the skbuf structure before aborting */
- dev_kfree_skb_irq((struct sk_buff *) skb);
+ dev_kfree_skb_irq(skb);
skb = NULL;
break;
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 86a738b..598ca1c 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1849,7 +1849,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
local = netdev_priv(dev);
- link = (struct pcmcia_device *)local->finder;
+ link = local->finder;
if (!pcmcia_dev_present(link)) {
pr_debug(
"ray_cs interrupt from device not present or suspended.\n");
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2e9e6af..241162e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -484,7 +484,7 @@ static int rndis_change_virtual_intf(struct wiphy *wiphy,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params);
-static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
+static int rndis_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request);
static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed);
@@ -1941,9 +1941,10 @@ static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm)
}
#define SCAN_DELAY_JIFFIES (6 * HZ)
-static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
+static int rndis_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
+ struct net_device *dev = request->wdev->netdev;
struct usbnet *usbdev = netdev_priv(dev);
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int ret;
@@ -2110,7 +2111,7 @@ resize_buf:
while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
matched) {
- if (!ether_addr_equal(bssid->mac, match_bssid))
+ if (ether_addr_equal(bssid->mac, match_bssid))
*matched = true;
}
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 299c387..c7548da 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -99,6 +99,14 @@ config RT2800PCI_RT53XX
rt2800pci driver.
Supported chips: RT5390
+config RT2800PCI_RT3290
+ bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default y
+ ---help---
+ This adds support for rt3290 wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT3290
endif
config RT2500USB
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5e6b501..8b9dbd7 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1455,7 +1455,7 @@ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 136b849..d2cf8a4 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1585,7 +1585,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 669aecd..3aae36b 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1352,7 +1352,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 9348521..e252e9b 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5360 2.4G 1T1R
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
*/
@@ -67,9 +68,12 @@
#define RF3320 0x000b
#define RF3322 0x000c
#define RF3053 0x000d
+#define RF3290 0x3290
+#define RF5360 0x5360
#define RF5370 0x5370
#define RF5372 0x5372
#define RF5390 0x5390
+#define RF5392 0x5392
/*
* Chipset revisions.
@@ -114,6 +118,12 @@
* Registers.
*/
+
+/*
+ * MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
+ */
+#define MAC_CSR0_3290 0x0000
+
/*
* E2PROM_CSR: PCI EEPROM control register.
* RELOAD: Write 1 to reload eeprom content.
@@ -130,6 +140,150 @@
#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
/*
+ * CMB_CTRL_CFG
+ */
+#define CMB_CTRL 0x0020
+#define AUX_OPT_BIT0 FIELD32(0x00000001)
+#define AUX_OPT_BIT1 FIELD32(0x00000002)
+#define AUX_OPT_BIT2 FIELD32(0x00000004)
+#define AUX_OPT_BIT3 FIELD32(0x00000008)
+#define AUX_OPT_BIT4 FIELD32(0x00000010)
+#define AUX_OPT_BIT5 FIELD32(0x00000020)
+#define AUX_OPT_BIT6 FIELD32(0x00000040)
+#define AUX_OPT_BIT7 FIELD32(0x00000080)
+#define AUX_OPT_BIT8 FIELD32(0x00000100)
+#define AUX_OPT_BIT9 FIELD32(0x00000200)
+#define AUX_OPT_BIT10 FIELD32(0x00000400)
+#define AUX_OPT_BIT11 FIELD32(0x00000800)
+#define AUX_OPT_BIT12 FIELD32(0x00001000)
+#define AUX_OPT_BIT13 FIELD32(0x00002000)
+#define AUX_OPT_BIT14 FIELD32(0x00004000)
+#define AUX_OPT_BIT15 FIELD32(0x00008000)
+#define LDO25_LEVEL FIELD32(0x00030000)
+#define LDO25_LARGEA FIELD32(0x00040000)
+#define LDO25_FRC_ON FIELD32(0x00080000)
+#define CMB_RSV FIELD32(0x00300000)
+#define XTAL_RDY FIELD32(0x00400000)
+#define PLL_LD FIELD32(0x00800000)
+#define LDO_CORE_LEVEL FIELD32(0x0F000000)
+#define LDO_BGSEL FIELD32(0x30000000)
+#define LDO3_EN FIELD32(0x40000000)
+#define LDO0_EN FIELD32(0x80000000)
+
+/*
+ * EFUSE_CSR_3290: RT3290 EEPROM
+ */
+#define EFUSE_CTRL_3290 0x0024
+
+/*
+ * EFUSE_DATA3 of 3290
+ */
+#define EFUSE_DATA3_3290 0x0028
+
+/*
+ * EFUSE_DATA2 of 3290
+ */
+#define EFUSE_DATA2_3290 0x002c
+
+/*
+ * EFUSE_DATA1 of 3290
+ */
+#define EFUSE_DATA1_3290 0x0030
+
+/*
+ * EFUSE_DATA0 of 3290
+ */
+#define EFUSE_DATA0_3290 0x0034
+
+/*
+ * OSC_CTRL_CFG
+ * Ring oscillator configuration
+ */
+#define OSC_CTRL 0x0038
+#define OSC_REF_CYCLE FIELD32(0x00001fff)
+#define OSC_RSV FIELD32(0x0000e000)
+#define OSC_CAL_CNT FIELD32(0x0fff0000)
+#define OSC_CAL_ACK FIELD32(0x10000000)
+#define OSC_CLK_32K_VLD FIELD32(0x20000000)
+#define OSC_CAL_REQ FIELD32(0x40000000)
+#define OSC_ROSC_EN FIELD32(0x80000000)
+
+/*
+ * COEX_CFG_0
+ */
+#define COEX_CFG0 0x0040
+#define COEX_CFG_ANT FIELD32(0xff000000)
+/*
+ * COEX_CFG_1
+ */
+#define COEX_CFG1 0x0044
+
+/*
+ * COEX_CFG_2
+ */
+#define COEX_CFG2 0x0048
+#define BT_COEX_CFG1 FIELD32(0xff000000)
+#define BT_COEX_CFG0 FIELD32(0x00ff0000)
+#define WL_COEX_CFG1 FIELD32(0x0000ff00)
+#define WL_COEX_CFG0 FIELD32(0x000000ff)
+/*
+ * PLL_CTRL_CFG
+ * PLL configuration register
+ */
+#define PLL_CTRL 0x0050
+#define PLL_RESERVED_INPUT1 FIELD32(0x000000ff)
+#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
+#define PLL_CONTROL FIELD32(0x00070000)
+#define PLL_LPF_R1 FIELD32(0x00080000)
+#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
+#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
+#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
+#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
+#define PLL_LOCK_CTRL FIELD32(0x70000000)
+#define PLL_VBGBK_EN FIELD32(0x80000000)
+
+
+/*
+ * WLAN_CTRL_CFG
+ * RT3290 wlan configuration
+ */
+#define WLAN_FUN_CTRL 0x0080
+#define WLAN_EN FIELD32(0x00000001)
+#define WLAN_CLK_EN FIELD32(0x00000002)
+#define WLAN_RSV1 FIELD32(0x00000004)
+#define WLAN_RESET FIELD32(0x00000008)
+#define PCIE_APP0_CLK_REQ FIELD32(0x00000010)
+#define FRC_WL_ANT_SET FIELD32(0x00000020)
+#define INV_TR_SW0 FIELD32(0x00000040)
+#define WLAN_GPIO_IN_BIT0 FIELD32(0x00000100)
+#define WLAN_GPIO_IN_BIT1 FIELD32(0x00000200)
+#define WLAN_GPIO_IN_BIT2 FIELD32(0x00000400)
+#define WLAN_GPIO_IN_BIT3 FIELD32(0x00000800)
+#define WLAN_GPIO_IN_BIT4 FIELD32(0x00001000)
+#define WLAN_GPIO_IN_BIT5 FIELD32(0x00002000)
+#define WLAN_GPIO_IN_BIT6 FIELD32(0x00004000)
+#define WLAN_GPIO_IN_BIT7 FIELD32(0x00008000)
+#define WLAN_GPIO_IN_BIT_ALL FIELD32(0x0000ff00)
+#define WLAN_GPIO_OUT_BIT0 FIELD32(0x00010000)
+#define WLAN_GPIO_OUT_BIT1 FIELD32(0x00020000)
+#define WLAN_GPIO_OUT_BIT2 FIELD32(0x00040000)
+#define WLAN_GPIO_OUT_BIT3 FIELD32(0x00050000)
+#define WLAN_GPIO_OUT_BIT4 FIELD32(0x00100000)
+#define WLAN_GPIO_OUT_BIT5 FIELD32(0x00200000)
+#define WLAN_GPIO_OUT_BIT6 FIELD32(0x00400000)
+#define WLAN_GPIO_OUT_BIT7 FIELD32(0x00800000)
+#define WLAN_GPIO_OUT_BIT_ALL FIELD32(0x00ff0000)
+#define WLAN_GPIO_OUT_OE_BIT0 FIELD32(0x01000000)
+#define WLAN_GPIO_OUT_OE_BIT1 FIELD32(0x02000000)
+#define WLAN_GPIO_OUT_OE_BIT2 FIELD32(0x04000000)
+#define WLAN_GPIO_OUT_OE_BIT3 FIELD32(0x08000000)
+#define WLAN_GPIO_OUT_OE_BIT4 FIELD32(0x10000000)
+#define WLAN_GPIO_OUT_OE_BIT5 FIELD32(0x20000000)
+#define WLAN_GPIO_OUT_OE_BIT6 FIELD32(0x40000000)
+#define WLAN_GPIO_OUT_OE_BIT7 FIELD32(0x80000000)
+#define WLAN_GPIO_OUT_OE_BIT_ALL FIELD32(0xff000000)
+
+/*
* AUX_CTRL: Aux/PCI-E related configuration
*/
#define AUX_CTRL 0x10c
@@ -1760,9 +1914,11 @@ struct mac_iveiv_entry {
/*
* BBP 3: RX Antenna
*/
-#define BBP3_RX_ADC FIELD8(0x03)
+#define BBP3_RX_ADC FIELD8(0x03)
#define BBP3_RX_ANTENNA FIELD8(0x18)
#define BBP3_HT40_MINUS FIELD8(0x20)
+#define BBP3_ADC_MODE_SWITCH FIELD8(0x40)
+#define BBP3_ADC_INIT_MODE FIELD8(0x80)
/*
* BBP 4: Bandwidth
@@ -1772,6 +1928,14 @@ struct mac_iveiv_entry {
#define BBP4_MAC_IF_CTRL FIELD8(0x40)
/*
+ * BBP 47: Bandwidth
+ */
+#define BBP47_TSSI_REPORT_SEL FIELD8(0x03)
+#define BBP47_TSSI_UPDATE_REQ FIELD8(0x04)
+#define BBP47_TSSI_TSSI_MODE FIELD8(0x18)
+#define BBP47_TSSI_ADC6 FIELD8(0x80)
+
+/*
* BBP 109
*/
#define BBP109_TX0_POWER FIELD8(0x0f)
@@ -1914,6 +2078,16 @@ struct mac_iveiv_entry {
#define RFCSR27_R4 FIELD8(0x40)
/*
+ * RFCSR 29:
+ */
+#define RFCSR29_ADC6_TEST FIELD8(0x01)
+#define RFCSR29_ADC6_INT_TEST FIELD8(0x02)
+#define RFCSR29_RSSI_RESET FIELD8(0x04)
+#define RFCSR29_RSSI_ON FIELD8(0x08)
+#define RFCSR29_RSSI_RIP_CTRL FIELD8(0x30)
+#define RFCSR29_RSSI_GAIN FIELD8(0xc0)
+
+/*
* RFCSR 30:
*/
#define RFCSR30_TX_H20M FIELD8(0x02)
@@ -1944,6 +2118,11 @@ struct mac_iveiv_entry {
#define RFCSR49_TX FIELD8(0x3f)
/*
+ * RFCSR 50:
+ */
+#define RFCSR50_TX FIELD8(0x3f)
+
+/*
* RF registers
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index dfc90d3..88455b1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -354,16 +354,15 @@ int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
* of 4kb. Certain USB chipsets however require different firmware,
* which Ralink only provides attached to the original firmware
* file. Thus for USB devices, firmware files have a length
- * which is a multiple of 4kb.
+ * which is a multiple of 4kb. The firmware for rt3290 chip also
+ * have a length which is a multiple of 4kb.
*/
- if (rt2x00_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev) || rt2x00_rt(rt2x00dev, RT3290))
fw_len = 4096;
- multiple = true;
- } else {
+ else
fw_len = 8192;
- multiple = true;
- }
+ multiple = true;
/*
* Validate the firmware length
*/
@@ -415,7 +414,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
return -EBUSY;
if (rt2x00_is_pci(rt2x00dev)) {
- if (rt2x00_rt(rt2x00dev, RT3572) ||
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
@@ -851,8 +851,13 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
- rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
- return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
+ } else {
+ rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+ return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+ }
}
EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
@@ -1935,8 +1940,50 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
-#define RT5390_POWER_BOUND 0x27
-#define RT5390_FREQ_OFFSET_BOUND 0x5f
+#define POWER_BOUND 0x27
+#define FREQ_OFFSET_BOUND 0x5f
+
+static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ if (info->default_power1 > POWER_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ if (rf->channel <= 14) {
+ if (rf->channel == 6)
+ rt2800_bbp_write(rt2x00dev, 68, 0x0c);
+ else
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+ if (rf->channel >= 1 && rf->channel <= 6)
+ rt2800_bbp_write(rt2x00dev, 59, 0x0f);
+ else if (rf->channel >= 7 && rf->channel <= 11)
+ rt2800_bbp_write(rt2x00dev, 59, 0x0e);
+ else if (rf->channel >= 12 && rf->channel <= 14)
+ rt2800_bbp_write(rt2x00dev, 59, 0x0d);
+ }
+}
static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
@@ -1952,13 +1999,27 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
- if (info->default_power1 > RT5390_POWER_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
+ if (info->default_power1 > POWER_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+ if (rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ if (info->default_power1 > POWER_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX,
+ info->default_power2);
+ rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+ }
+
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ if (rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ }
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -1966,9 +2027,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE,
- RT5390_FREQ_OFFSET_BOUND);
+ if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
@@ -2021,15 +2081,6 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
}
}
}
-
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
- rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-
- rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
- rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
}
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -2039,7 +2090,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
unsigned int tx_pin;
- u8 bbp;
+ u8 bbp, rfcsr;
if (rf->channel <= 14) {
info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
@@ -2060,15 +2111,36 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3052:
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
break;
+ case RF3290:
+ rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
+ break;
+ case RF5360:
case RF5370:
case RF5372:
case RF5390:
+ case RF5392:
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
break;
default:
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
+ if (rt2x00_rf(rt2x00dev, RF3290) ||
+ rt2x00_rf(rt2x00dev, RF5360) ||
+ rt2x00_rf(rt2x00dev, RF5370) ||
+ rt2x00_rf(rt2x00dev, RF5372) ||
+ rt2x00_rf(rt2x00dev, RF5390) ||
+ rt2x00_rf(rt2x00dev, RF5392)) {
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+ }
+
/*
* Change BBP settings
*/
@@ -2549,9 +2621,12 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
+ case RF3290:
+ case RF5360:
case RF5370:
case RF5372:
case RF5390:
+ case RF5392:
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
@@ -2682,6 +2757,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
@@ -2778,10 +2854,54 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ if (rt2x00_get_field32(reg, WLAN_EN) == 1) {
+ rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ }
+
+ rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
+ if (!(rt2x00_get_field32(reg, LDO0_EN) == 1)) {
+ rt2x00_set_field32(&reg, LDO0_EN, 1);
+ rt2x00_set_field32(&reg, LDO_BGSEL, 3);
+ rt2800_register_write(rt2x00dev, CMB_CTRL, reg);
+ }
+
+ rt2800_register_read(rt2x00dev, OSC_CTRL, &reg);
+ rt2x00_set_field32(&reg, OSC_ROSC_EN, 1);
+ rt2x00_set_field32(&reg, OSC_CAL_REQ, 1);
+ rt2x00_set_field32(&reg, OSC_REF_CYCLE, 0x27);
+ rt2800_register_write(rt2x00dev, OSC_CTRL, reg);
+
+ rt2800_register_read(rt2x00dev, COEX_CFG0, &reg);
+ rt2x00_set_field32(&reg, COEX_CFG_ANT, 0x5e);
+ rt2800_register_write(rt2x00dev, COEX_CFG0, reg);
+
+ rt2800_register_read(rt2x00dev, COEX_CFG2, &reg);
+ rt2x00_set_field32(&reg, BT_COEX_CFG1, 0x00);
+ rt2x00_set_field32(&reg, BT_COEX_CFG0, 0x17);
+ rt2x00_set_field32(&reg, WL_COEX_CFG1, 0x93);
+ rt2x00_set_field32(&reg, WL_COEX_CFG0, 0x7f);
+ rt2800_register_write(rt2x00dev, COEX_CFG2, reg);
+
+ rt2800_register_read(rt2x00dev, PLL_CTRL, &reg);
+ rt2x00_set_field32(&reg, PLL_CONTROL, 1);
+ rt2800_register_write(rt2x00dev, PLL_CTRL, reg);
+ }
+
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390)) {
- rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0,
+ 0x00000404);
+ else
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0,
+ 0x00000400);
+
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
@@ -3190,14 +3310,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_read(rt2x00dev, 4, &value);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
}
if (rt2800_is_305x_soc(rt2x00dev) ||
+ rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
@@ -3206,20 +3328,26 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
- } else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ } else if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
- rt2800_bbp_write(rt2x00dev, 77, 0x59);
+
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ rt2800_bbp_write(rt2x00dev, 77, 0x58);
+ else
+ rt2800_bbp_write(rt2x00dev, 77, 0x59);
} else {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -3244,23 +3372,33 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 81, 0x37);
}
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_bbp_write(rt2x00dev, 74, 0x0b);
+ rt2800_bbp_write(rt2x00dev, 79, 0x18);
+ rt2800_bbp_write(rt2x00dev, 80, 0x09);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+ }
+
rt2800_bbp_write(rt2x00dev, 82, 0x62);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 83, 0x7a);
else
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
- else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ else if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 84, 0x9a);
else
rt2800_bbp_write(rt2x00dev, 84, 0x99);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 86, 0x38);
else
rt2800_bbp_write(rt2x00dev, 86, 0x00);
@@ -3270,8 +3408,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 91, 0x04);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 92, 0x02);
else
rt2800_bbp_write(rt2x00dev, 92, 0x00);
@@ -3285,6 +3424,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3293,27 +3433,32 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
else
rt2800_bbp_write(rt2x00dev, 103, 0x00);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 104, 0x92);
if (rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 105, 0x01);
+ else if (rt2x00_rt(rt2x00dev, RT3290))
+ rt2800_bbp_write(rt2x00dev, 105, 0x1c);
else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 105, 0x3c);
else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 106, 0x03);
else if (rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 106, 0x12);
else
rt2800_bbp_write(rt2x00dev, 106, 0x35);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3338,6 +3483,29 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 138, value);
}
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_bbp_write(rt2x00dev, 67, 0x24);
+ rt2800_bbp_write(rt2x00dev, 143, 0x04);
+ rt2800_bbp_write(rt2x00dev, 142, 0x99);
+ rt2800_bbp_write(rt2x00dev, 150, 0x30);
+ rt2800_bbp_write(rt2x00dev, 151, 0x2e);
+ rt2800_bbp_write(rt2x00dev, 152, 0x20);
+ rt2800_bbp_write(rt2x00dev, 153, 0x34);
+ rt2800_bbp_write(rt2x00dev, 154, 0x40);
+ rt2800_bbp_write(rt2x00dev, 155, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 253, 0x04);
+
+ rt2800_bbp_read(rt2x00dev, 47, &value);
+ rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
+ rt2800_bbp_write(rt2x00dev, 47, value);
+
+ /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
+ rt2800_bbp_read(rt2x00dev, 3, &value);
+ rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
+ rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
+ rt2800_bbp_write(rt2x00dev, 3, value);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
int ant, div_mode;
@@ -3470,6 +3638,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_rt(rt2x00dev, RT3070) &&
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
+ !rt2x00_rt(rt2x00dev, RT3290) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3480,8 +3649,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
/*
* Init RF calibration.
*/
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
@@ -3519,6 +3689,53 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
+ } else if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0xf3);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x83);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x7b);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x98);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
} else if (rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
@@ -3927,6 +4144,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
}
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_rfcsr_read(rt2x00dev, 29, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3);
+ rt2800_rfcsr_write(rt2x00dev, 29, rfcsr);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
@@ -4033,9 +4256,14 @@ EXPORT_SYMBOL_GPL(rt2800_disable_radio);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
+ u16 efuse_ctrl_reg;
- rt2800_register_read(rt2x00dev, EFUSE_CTRL, &reg);
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ efuse_ctrl_reg = EFUSE_CTRL_3290;
+ else
+ efuse_ctrl_reg = EFUSE_CTRL;
+ rt2800_register_read(rt2x00dev, efuse_ctrl_reg, &reg);
return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT);
}
EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
@@ -4043,27 +4271,44 @@ EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
{
u32 reg;
-
+ u16 efuse_ctrl_reg;
+ u16 efuse_data0_reg;
+ u16 efuse_data1_reg;
+ u16 efuse_data2_reg;
+ u16 efuse_data3_reg;
+
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ efuse_ctrl_reg = EFUSE_CTRL_3290;
+ efuse_data0_reg = EFUSE_DATA0_3290;
+ efuse_data1_reg = EFUSE_DATA1_3290;
+ efuse_data2_reg = EFUSE_DATA2_3290;
+ efuse_data3_reg = EFUSE_DATA3_3290;
+ } else {
+ efuse_ctrl_reg = EFUSE_CTRL;
+ efuse_data0_reg = EFUSE_DATA0;
+ efuse_data1_reg = EFUSE_DATA1;
+ efuse_data2_reg = EFUSE_DATA2;
+ efuse_data3_reg = EFUSE_DATA3;
+ }
mutex_lock(&rt2x00dev->csr_mutex);
- rt2800_register_read_lock(rt2x00dev, EFUSE_CTRL, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_ctrl_reg, &reg);
rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
- rt2800_register_write_lock(rt2x00dev, EFUSE_CTRL, reg);
+ rt2800_register_write_lock(rt2x00dev, efuse_ctrl_reg, reg);
/* Wait until the EEPROM has been loaded */
- rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
-
+ rt2800_regbusy_read(rt2x00dev, efuse_ctrl_reg, EFUSE_CTRL_KICK, &reg);
/* Apparently the data is read from end to start */
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_data3_reg, &reg);
/* The returned value is in CPU order, but eeprom is le */
*(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_data2_reg, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_data1_reg, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_data0_reg, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
@@ -4090,7 +4335,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
@@ -4225,9 +4470,14 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
* RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
* RT53xx: defined in "EEPROM_CHIP_ID" field
*/
- rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
- if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 ||
- rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392)
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ rt2800_register_read(rt2x00dev, MAC_CSR0_3290, &reg);
+ else
+ rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+
+ if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT3290 ||
+ rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 ||
+ rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392)
rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
else
value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -4242,6 +4492,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RT3070:
case RT3071:
case RT3090:
+ case RT3290:
case RT3390:
case RT3572:
case RT5390:
@@ -4262,10 +4513,13 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3021:
case RF3022:
case RF3052:
+ case RF3290:
case RF3320:
+ case RF5360:
case RF5370:
case RF5372:
case RF5390:
+ case RF5392:
break;
default:
ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n",
@@ -4576,10 +4830,13 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
+ rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3320) ||
+ rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
rt2x00_rf(rt2x00dev, RF5372) ||
- rt2x00_rf(rt2x00dev, RF5390)) {
+ rt2x00_rf(rt2x00dev, RF5390) ||
+ rt2x00_rf(rt2x00dev, RF5392)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
} else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -4662,9 +4919,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3320:
case RF3052:
+ case RF3290:
+ case RF5360:
case RF5370:
case RF5372:
case RF5390:
+ case RF5392:
__set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
break;
}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cad25bf..235376e 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -280,7 +280,13 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
*/
static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
{
- return FIRMWARE_RT2860;
+ /*
+ * Chip rt3290 use specific 4KB firmware named rt3290.bin.
+ */
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ return FIRMWARE_RT3290;
+ else
+ return FIRMWARE_RT2860;
}
static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
@@ -974,6 +980,66 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
return rt2800_validate_eeprom(rt2x00dev);
}
+static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ int i, count;
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ if (rt2x00_get_field32(reg, WLAN_EN))
+ return 0;
+
+ rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
+ rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
+ rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
+ rt2x00_set_field32(&reg, WLAN_EN, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+
+ udelay(REGISTER_BUSY_DELAY);
+
+ count = 0;
+ do {
+ /*
+ * Check PLL_LD & XTAL_RDY.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
+ if (rt2x00_get_field32(reg, PLL_LD) &&
+ rt2x00_get_field32(reg, XTAL_RDY))
+ break;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ if (i >= REGISTER_BUSY_COUNT) {
+
+ if (count >= 10)
+ return -EIO;
+
+ rt2800_register_write(rt2x00dev, 0x58, 0x018);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x418);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x618);
+ udelay(REGISTER_BUSY_DELAY);
+ count++;
+ } else {
+ count = 0;
+ }
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
+ rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
+ rt2x00_set_field32(&reg, WLAN_RESET, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2x00_set_field32(&reg, WLAN_RESET, 0);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
+ } while (count != 0);
+
+ return 0;
+}
static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
@@ -997,6 +1063,17 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
+ * clk for rt3290. That avoid the MCU fail in start phase.
+ */
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ retval = rt2800_enable_wlan_rt3290(rt2x00dev);
+
+ if (retval)
+ return retval;
+ }
+
+ /*
* This device has multiple filters for control frames
* and has a separate filter for PS Poll frames.
*/
@@ -1175,6 +1252,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1432, 0x7768) },
{ PCI_DEVICE(0x1462, 0x891a) },
{ PCI_DEVICE(0x1a3b, 0x1059) },
+#ifdef CONFIG_RT2800PCI_RT3290
+ { PCI_DEVICE(0x1814, 0x3290) },
+#endif
#ifdef CONFIG_RT2800PCI_RT33XX
{ PCI_DEVICE(0x1814, 0x3390) },
#endif
@@ -1188,6 +1268,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3593) },
#endif
#ifdef CONFIG_RT2800PCI_RT53XX
+ { PCI_DEVICE(0x1814, 0x5360) },
{ PCI_DEVICE(0x1814, 0x5362) },
{ PCI_DEVICE(0x1814, 0x5390) },
{ PCI_DEVICE(0x1814, 0x5392) },
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index 70e050d..ab22a08 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -47,6 +47,7 @@
* 8051 firmware image.
*/
#define FIRMWARE_RT2860 "rt2860.bin"
+#define FIRMWARE_RT3290 "rt3290.bin"
#define FIRMWARE_IMAGE_BASE 0x2000
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index bf78317..6cf3365 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -971,6 +971,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0411, 0x015d) },
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
+ { USB_DEVICE(0x0411, 0x01ee) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x002f) },
{ USB_DEVICE(0x07aa, 0x003c) },
@@ -1137,6 +1138,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
#ifdef CONFIG_RT2800USB_RT33XX
/* Belkin */
{ USB_DEVICE(0x050d, 0x945b) },
+ /* D-Link */
+ { USB_DEVICE(0x2001, 0x3c17) },
/* Panasonic */
{ USB_DEVICE(0x083a, 0xb511) },
/* Philips */
@@ -1237,7 +1240,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0b) },
{ USB_DEVICE(0x07d1, 0x3c17) },
- { USB_DEVICE(0x2001, 0x3c17) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1) },
/* Gemtek */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ca36ccc..8afb546 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -187,6 +187,7 @@ struct rt2x00_chip {
#define RT3070 0x3070
#define RT3071 0x3071
#define RT3090 0x3090 /* 2.4GHz PCIe */
+#define RT3290 0x3290
#define RT3390 0x3390
#define RT3572 0x3572
#define RT3593 0x3593
@@ -396,8 +397,7 @@ struct rt2x00_intf {
* for hardware which doesn't support hardware
* sequence counting.
*/
- spinlock_t seqlock;
- u16 seqno;
+ atomic_t seqno;
};
static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index e7361d9..49a63e9 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -102,7 +102,7 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
/* Update the AID, this is needed for dynamic PS support */
rt2x00dev->aid = bss_conf->assoc ? bss_conf->aid : 0;
- rt2x00dev->last_beacon = bss_conf->last_tsf;
+ rt2x00dev->last_beacon = bss_conf->sync_tsf;
/* Update global beacon interval time, this is needed for PS support */
rt2x00dev->beacon_int = bss_conf->beacon_int;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e5404e5..a6b88bd 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1161,6 +1161,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_WDS);
+ rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
/*
* Initialize work.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index b49773e..4ff26c2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
else
rt2x00dev->intf_sta_count++;
- spin_lock_init(&intf->seqlock);
mutex_init(&intf->beacon_skb_mutex);
intf->beacon = entry;
@@ -507,9 +506,19 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
- else if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+
+ if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ return -EOPNOTSUPP;
+
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
- else if (key->keylen > 32)
+
+ if (key->keylen > 32)
return -ENOSPC;
memset(&crypto, 0, sizeof(crypto));
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0a4653a..a0c8cae 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -256,6 +256,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
struct ieee80211_hw *hw;
struct rt2x00_dev *rt2x00dev;
int retval;
+ u16 chip;
retval = pci_enable_device(pci_dev);
if (retval) {
@@ -305,6 +306,14 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
if (retval)
goto exit_free_device;
+ /*
+ * Because rt3290 chip use different efuse offset to read efuse data.
+ * So before read efuse it need to indicate it is the
+ * rt3290 or not.
+ */
+ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
+ rt2x00dev->chip.rt = chip;
+
retval = rt2x00lib_probe_dev(rt2x00dev);
if (retval)
goto exit_free_reg;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4c662ec..f7e74a0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
+ u16 seqno;
if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
return;
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
* sequence counting per-frame, since those will override the
* sequence counter given by mac80211.
*/
- spin_lock(&intf->seqlock);
-
if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
- intf->seqno += 0x10;
- hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
-
- spin_unlock(&intf->seqlock);
+ seqno = atomic_add_return(0x10, &intf->seqno);
+ else
+ seqno = atomic_read(&intf->seqno);
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seqno);
}
static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
@@ -775,9 +774,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
bool rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
- void *data,
- bool (*fn)(struct queue_entry *entry,
- void *data))
+ bool (*fn)(struct queue_entry *entry))
{
unsigned long irqflags;
unsigned int index_start;
@@ -808,17 +805,17 @@ bool rt2x00queue_for_each_entry(struct data_queue *queue,
*/
if (index_start < index_end) {
for (i = index_start; i < index_end; i++) {
- if (fn(&queue->entries[i], data))
+ if (fn(&queue->entries[i]))
return true;
}
} else {
for (i = index_start; i < queue->limit; i++) {
- if (fn(&queue->entries[i], data))
+ if (fn(&queue->entries[i]))
return true;
}
for (i = 0; i < index_end; i++) {
- if (fn(&queue->entries[i], data))
+ if (fn(&queue->entries[i]))
return true;
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 5f1392c..9b8c10a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -584,7 +584,6 @@ struct data_queue_desc {
* @queue: Pointer to @data_queue
* @start: &enum queue_index Pointer to start index
* @end: &enum queue_index Pointer to end index
- * @data: Data to pass to the callback function
* @fn: The function to call for each &struct queue_entry
*
* This will walk through all entries in the queue, in chronological
@@ -597,9 +596,7 @@ struct data_queue_desc {
bool rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
- void *data,
- bool (*fn)(struct queue_entry *entry,
- void *data));
+ bool (*fn)(struct queue_entry *entry));
/**
* rt2x00queue_empty - Check if the queue is empty.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index d357d1e..40ea807 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -285,7 +285,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
}
-static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data)
+static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -390,7 +390,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
}
-static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void* data)
+static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -427,18 +427,12 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
case QID_AC_BE:
case QID_AC_BK:
if (!rt2x00queue_empty(queue))
- rt2x00queue_for_each_entry(queue,
- Q_INDEX_DONE,
- Q_INDEX,
- NULL,
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
rt2x00usb_kick_tx_entry);
break;
case QID_RX:
if (!rt2x00queue_full(queue))
- rt2x00queue_for_each_entry(queue,
- Q_INDEX_DONE,
- Q_INDEX,
- NULL,
+ rt2x00queue_for_each_entry(queue, Q_INDEX, Q_INDEX_DONE,
rt2x00usb_kick_rx_entry);
break;
default:
@@ -447,7 +441,7 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
}
EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
-static bool rt2x00usb_flush_entry(struct queue_entry *entry, void* data)
+static bool rt2x00usb_flush_entry(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
@@ -474,7 +468,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
unsigned int i;
if (drop)
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
rt2x00usb_flush_entry);
/*
@@ -565,7 +559,7 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
entry->flags = 0;
if (entry->queue->qid == QID_RX)
- rt2x00usb_kick_rx_entry(entry, NULL);
+ rt2x00usb_kick_rx_entry(entry);
}
EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index ee22bd7..f322596 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2415,7 +2415,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 77ccbbc..ba6e434 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1770,7 +1770,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 2bebcb7..aceaf68 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -47,6 +47,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
{ PCI_DEVICE(0x1799, 0x6001) },
{ PCI_DEVICE(0x1799, 0x6020) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x3300) },
+ { PCI_DEVICE(0x1186, 0x3301) },
+ { PCI_DEVICE(0x1432, 0x7106) },
{ }
};
@@ -1076,7 +1078,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
" randomly generated MAC addr\n", pci_name(pdev));
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
}
SET_IEEE80211_PERM_ADDR(dev, mac_addr);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 4fb1ca1..71a30b0 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1486,7 +1486,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
"generated MAC address\n");
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
}
SET_IEEE80211_PERM_ADDR(dev, mac_addr);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 2e0de2f..c2d5b49 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
radio_on = true;
} else if (radio_on) {
radio_on = false;
- cancel_delayed_work_sync(&priv->led_on);
+ cancel_delayed_work(&priv->led_on);
ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
}
} else if (radio_on) {
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index f4c852c..942e56b 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -167,7 +167,7 @@ static const u8 tid_to_ac[] = {
0, /* IEEE80211_AC_VO */
};
-u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid)
+u8 rtl_tid_to_ac(u8 tid)
{
return tid_to_ac[tid];
}
@@ -907,7 +907,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
struct rtl_priv *rtlpriv = rtl_priv(hw);
__le16 fc = hdr->frame_control;
- u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
+ u8 *act = (u8 *)skb->data + MAC80211_3ADDR_LEN;
u8 category;
if (!ieee80211_is_action(fc))
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 5a23a6d..f35af0f 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -138,7 +138,7 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
enum ieee80211_smps_mode smps);
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
-u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid);
+u8 rtl_tid_to_ac(u8 tid);
extern struct attribute_group rtl_attribute_group;
int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool isht, u8 desc_rate, bool first_ampdu);
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 3d8cc4a..5b4b4d4 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -128,7 +128,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
u32 us_config;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
"EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
ul_entry_idx, ul_key_id, ul_enc_alg,
ul_default_key, mac_addr);
@@ -146,7 +146,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
}
rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
- (u8 *) key_content, us_config);
+ key_content, us_config);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n");
@@ -342,7 +342,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Remove from HW Security CAM */
memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
- pr_info("&&&&&&&&&del entry %d\n", i);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ "del CAM entry %d\n", i);
}
}
return;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 278e9f9..a18ad2a 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -680,7 +680,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->short_preamble = bss_conf->use_short_preamble;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
- (u8 *) (&mac->short_preamble));
+ &mac->short_preamble);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -693,7 +693,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->slot_time = RTL_SLOT_TIME_20;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *) (&mac->slot_time));
+ &mac->slot_time);
}
if (changed & BSS_CHANGED_HT) {
@@ -713,7 +713,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
- (u8 *) (&mac->max_mss_density));
+ &mac->max_mss_density);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
&mac->current_ampdu_factor);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
@@ -801,7 +801,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
u8 mstatus = RT_MEDIA_CONNECT;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
- (u8 *) (&mstatus));
+ &mstatus);
ppsc->report_linked = true;
}
} else {
@@ -809,7 +809,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
u8 mstatus = RT_MEDIA_DISCONNECT;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
- (u8 *)(&mstatus));
+ &mstatus);
ppsc->report_linked = false;
}
}
@@ -836,7 +836,7 @@ static void rtl_op_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
mac->tsf = tsf;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, &bibss);
}
static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
@@ -845,7 +845,7 @@ static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp = 0;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, &tmp);
}
static void rtl_op_sta_notify(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 1f14380..8e2f9afb 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -352,7 +352,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
(u8 *)&efuse_utilized);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
- (u8 *)&efuse_usage);
+ &efuse_usage);
done:
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
kfree(efuse_word[i]);
@@ -409,7 +409,7 @@ void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
else if (type == 2)
efuse_shadow_read_2byte(hw, offset, (u16 *) value);
else if (type == 4)
- efuse_shadow_read_4byte(hw, offset, (u32 *) value);
+ efuse_shadow_read_4byte(hw, offset, value);
}
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 2062ea1..80f75d3 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -480,7 +480,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
/* we juse use em for BE/BK/VI/VO */
for (tid = 7; tid >= 0; tid--) {
- u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
+ u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)];
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
while (!mac->act_scanning &&
rtlpriv->psc.rfpwr_state == ERFON) {
@@ -756,10 +756,10 @@ done:
if (index == rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
HW_DESC_RXERO,
- (u8 *)&tmp_one);
+ &tmp_one);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
index = (index + 1) % rtlpci->rxringcount;
}
@@ -934,7 +934,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
__skb_queue_tail(&ring->queue, pskb);
rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
- (u8 *)&temp_one);
+ &temp_one);
return;
}
@@ -1126,11 +1126,11 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
rxbuffersize);
rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
}
rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
- HW_DESC_RXERO, (u8 *)&tmp_one);
+ HW_DESC_RXERO, &tmp_one);
}
return 0;
}
@@ -1263,7 +1263,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_desc((u8 *) entry,
false,
HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
}
rtlpci->rx_ring[rx_queue_idx].idx = 0;
}
@@ -1273,17 +1273,18 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
*after reset, release previous pending packet,
*and force the tx idx to the first one
*/
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
if (rtlpci->tx_ring[i].desc) {
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
while (skb_queue_len(&ring->queue)) {
- struct rtl_tx_desc *entry =
- &ring->desc[ring->idx];
- struct sk_buff *skb =
- __skb_dequeue(&ring->queue);
+ struct rtl_tx_desc *entry;
+ struct sk_buff *skb;
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,
+ flags);
+ entry = &ring->desc[ring->idx];
+ skb = __skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->
get_desc((u8 *)
@@ -1291,15 +1292,15 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
- kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+ flags);
+ kfree_skb(skb);
}
ring->idx = 0;
}
}
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
return 0;
}
@@ -1422,7 +1423,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
__skb_queue_tail(&ring->queue, skb);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
- HW_DESC_OWN, (u8 *)&temp_one);
+ HW_DESC_OWN, &temp_one);
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 5ae2664..13ad33e 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -333,10 +333,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *) (&rpwm_val));
+ &rpwm_val);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *) (&fw_pwrmode));
+ &fw_pwrmode);
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -356,11 +356,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
(u8 *) (&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *) (&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_SET_RPWM,
- (u8 *) (&rpwm_val));
+ &rpwm_val);
} else {
/* Reset the power save related parameters. */
ppsc->dot11_psmode = EACTIVE;
@@ -446,7 +446,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = (void *) data;
+ struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
u8 *tim;
u8 tim_len;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index f7f48c7..a45afda 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -656,9 +656,8 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 692c8ef..44febfd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -168,7 +168,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferPtr = (u8 *) buffer;
+ u8 *bufferPtr = buffer;
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size);
@@ -262,7 +262,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
return 1;
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
- pfwdata = (u8 *) rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
if (IS_FW_HEADER_EXIST(pfwheader)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 5c4d9bc..bd0da7e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -214,13 +214,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *) (&e_aci));
+ &e_aci);
}
break;
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool)*val;
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -232,7 +232,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
@@ -257,7 +257,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -284,7 +284,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
else
p_regtoset = regtoset_normal;
- factor_toset = *((u8 *) val);
+ factor_toset = *(val);
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -316,17 +316,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *(val);
rtl92c_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
- (u8 *) (&e_aci));
+ (&e_aci));
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *(val);
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -382,7 +382,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -396,13 +396,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val;
@@ -411,31 +411,30 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
udelay(1);
if (rpwm_val & BIT(7)) {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- (*(u8 *) val));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
} else {
rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- ((*(u8 *) val) | BIT(7)));
+ *val | BIT(7));
}
break;
}
case HW_VAR_H2C_FW_PWRMODE:{
- u8 psmode = (*(u8 *) val);
+ u8 psmode = *val;
if ((psmode != FW_PS_ACTIVE_MODE) &&
(!IS_92C_SERIAL(rtlhal->version))) {
rtl92c_dm_rf_saving(hw, true);
}
- rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_pwrmode_cmd(hw, *val);
break;
}
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = *val;
u8 tmp_regcr, tmp_reg422;
bool recover = false;
@@ -472,7 +471,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
- rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_joinbss_report_cmd(hw, *val);
break;
}
@@ -486,7 +485,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92ce_stop_tx_beacon(hw);
@@ -1589,10 +1588,10 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag,
hwinfo);
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = *&hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1939,7 +1938,7 @@ void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 3af874e..5216664 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -605,7 +605,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
struct ieee80211_sta *sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -806,7 +806,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 0c74d4f..4bbb711 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -381,11 +381,11 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n",
rtlefuse->eeprom_vid, rtlefuse->eeprom_did);
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version =
le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
rtlefuse->eeprom_oemid);
if (rtlhal->oem_id == RT_CID_DEFAULT) {
@@ -1660,7 +1660,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
+ &e_aci);
} else {
u8 sifstime = 0;
u8 u1bAIFS;
@@ -1685,7 +1685,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool)*val;
reg_tmp = 0;
if (short_preamble)
reg_tmp |= 0x80;
@@ -1696,7 +1696,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
switch (rtlpriv->sec.pairwise_enc_algorithm) {
case NO_ENCRYPTION:
@@ -1729,7 +1729,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
density_to_set &= 0x1f;
mac->min_space_cfg &= 0x07;
mac->min_space_cfg |= (density_to_set << 3);
@@ -1747,7 +1747,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 index = 0;
p_regtoset = regtoset_normal;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -1774,7 +1774,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
u32 u4b_ac_param;
u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
@@ -1814,11 +1814,11 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
if (rtlusb->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
+ HW_VAR_ACM_CTRL, &e_aci);
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -1874,7 +1874,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -1891,39 +1891,38 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
if (rpwm_val & BIT(7))
- rtl_write_byte(rtlpriv, REG_USB_HRPWM,
- (*(u8 *)val));
+ rtl_write_byte(rtlpriv, REG_USB_HRPWM, *val);
else
rtl_write_byte(rtlpriv, REG_USB_HRPWM,
- ((*(u8 *)val) | BIT(7)));
+ *val | BIT(7));
break;
}
case HW_VAR_H2C_FW_PWRMODE:{
- u8 psmode = (*(u8 *) val);
+ u8 psmode = *val;
if ((psmode != FW_PS_ACTIVE_MODE) &&
(!IS_92C_SERIAL(rtlhal->version)))
rtl92c_dm_rf_saving(hw, true);
- rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_pwrmode_cmd(hw, (*val));
break;
}
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = *val;
u8 tmp_reg422;
bool recover = false;
@@ -1948,7 +1947,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
tmp_reg422 | BIT(6));
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
}
- rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_joinbss_report_cmd(hw, (*val));
break;
}
case HW_VAR_AID:{
@@ -1961,7 +1960,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92cu_stop_tx_beacon(hw);
@@ -2184,7 +2183,7 @@ void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index d228358..9970c2b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
{RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
{RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
{RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 21bc827..2e6eb35 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -668,7 +668,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb->len);
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index a7d63a8..c0201ed 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -696,7 +696,7 @@ static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index f548a8d..895ae6c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -120,7 +120,7 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferPtr = (u8 *) buffer;
+ u8 *bufferPtr = buffer;
u32 pagenums, remainSize;
u32 page, offset;
@@ -256,8 +256,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
fwsize = rtlhal->fwsize;
- pfwheader = (u8 *) rtlhal->pfirmware;
- pfwdata = (u8 *) rtlhal->pfirmware;
+ pfwheader = rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index b338d52..f4051f4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -235,12 +235,12 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *) (&e_aci));
+ (&e_aci));
break;
}
case HW_VAR_ACK_PREAMBLE: {
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool) (*val);
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
@@ -252,7 +252,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
if (min_spacing_to_set < sec_min_space)
@@ -271,7 +271,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY: {
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -293,7 +293,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
regtoSet = 0x66626641;
else
regtoSet = 0xb972a841;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -316,15 +316,15 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM: {
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
rtl92d_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
- (u8 *) (&e_aci));
+ &e_aci);
break;
}
case HW_VAR_ACM_CTRL: {
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -376,7 +376,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlpci->receive_config = ((u32 *) (val))[0];
break;
case HW_VAR_RETRY_LIMIT: {
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -390,16 +390,16 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:
- rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (u8 *) (val));
+ rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
break;
case HW_VAR_H2C_FW_PWRMODE:
break;
@@ -407,7 +407,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT: {
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = (*val);
u8 tmp_regcr, tmp_reg422;
bool recover = false;
@@ -435,7 +435,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
- rtl92d_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
break;
}
case HW_VAR_AID: {
@@ -447,7 +447,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF: {
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92de_stop_tx_beacon(hw);
@@ -1794,7 +1794,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
"RTL819X Not boot from eeprom, check it !!\n");
return;
}
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
_rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
/* VID, DID SE 0xA-D */
@@ -2115,7 +2115,7 @@ void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 18380a7..4420312 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3345,21 +3345,21 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
switch (rtlhal->macphymode) {
case DUALMAC_SINGLEPHY:
rtlphy->rf_type = RF_2T2R;
- rtlhal->version |= CHIP_92D_SINGLEPHY;
+ rtlhal->version |= RF_TYPE_2T2R;
rtlhal->bandset = BAND_ON_BOTH;
rtlhal->current_bandtype = BAND_ON_2_4G;
break;
case SINGLEMAC_SINGLEPHY:
rtlphy->rf_type = RF_2T2R;
- rtlhal->version |= CHIP_92D_SINGLEPHY;
+ rtlhal->version |= RF_TYPE_2T2R;
rtlhal->bandset = BAND_ON_BOTH;
rtlhal->current_bandtype = BAND_ON_2_4G;
break;
case DUALMAC_DUALPHY:
rtlphy->rf_type = RF_1T1R;
- rtlhal->version &= (~CHIP_92D_SINGLEPHY);
+ rtlhal->version &= RF_TYPE_1T1R;
/* Now we let MAC0 run on 5G band. */
if (rtlhal->interfaceindex == 0) {
rtlhal->bandset = BAND_ON_5G;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 1666ef7..f80690d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -560,7 +560,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct ieee80211_sta *sta = info->control.sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -761,11 +761,11 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 2e11580..465f581 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -146,7 +146,7 @@ static void _rtl92s_dm_check_edca_turbo(struct ieee80211_hw *hw)
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *)(&tmp));
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index b141c35..4542e69 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -145,13 +145,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
+ (&e_aci));
}
break;
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool) (*val);
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -163,7 +163,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *)val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
if (rtlpriv->sec.pairwise_enc_algorithm ==
NO_ENCRYPTION)
@@ -194,7 +194,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
@@ -216,7 +216,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
15, 15, 15, 15, 0};
u8 index = 0;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -248,17 +248,17 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
rtl92s_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
- (u8 *)(&e_aci));
+ &e_aci);
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(
mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -313,7 +313,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, RETRY_LIMIT,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -328,14 +328,14 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_EFUSE_USAGE: {
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
}
case HW_VAR_IO_CMD: {
break;
}
case HW_VAR_WPA_CONFIG: {
- rtl_write_byte(rtlpriv, REG_SECR, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECR, *val);
break;
}
case HW_VAR_SET_RPWM:{
@@ -1813,8 +1813,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
else
index = 2;
- tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_HT20_DIFF +
- index]) & 0xff;
+ tempval = hwinfo[EEPROM_TX_PWR_HT20_DIFF + index] & 0xff;
rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
((tempval >> 4) & 0xF);
@@ -1830,14 +1829,13 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
else
index = 1;
- tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index])
- & 0xff;
+ tempval = hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index] & 0xff;
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] =
(tempval & 0xF);
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
((tempval >> 4) & 0xF);
- tempval = (*(u8 *)&hwinfo[TX_PWR_SAFETY_CHK]);
+ tempval = hwinfo[TX_PWR_SAFETY_CHK];
rtlefuse->txpwr_safetyflag = (tempval & 0x01);
}
@@ -1876,7 +1874,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Read RF-indication and Tx Power gain
* index diff of legacy to HT OFDM rate. */
- tempval = (*(u8 *)&hwinfo[EEPROM_RFIND_POWERDIFF]) & 0xff;
+ tempval = hwinfo[EEPROM_RFIND_POWERDIFF] & 0xff;
rtlefuse->eeprom_txpowerdiff = tempval;
rtlefuse->legacy_httxpowerdiff =
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
@@ -1887,7 +1885,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Get TSSI value for each path. */
usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A];
rtlefuse->eeprom_tssi[RF90_PATH_A] = (u8)((usvalue & 0xff00) >> 8);
- usvalue = *(u8 *)&hwinfo[EEPROM_TSSI_B];
+ usvalue = hwinfo[EEPROM_TSSI_B];
rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff);
RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
@@ -1896,7 +1894,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Read antenna tx power offset of B/C/D to A from EEPROM */
/* and read ThermalMeter from EEPROM */
- tempval = *(u8 *)&hwinfo[EEPROM_THERMALMETER];
+ tempval = hwinfo[EEPROM_THERMALMETER];
rtlefuse->eeprom_thermalmeter = tempval;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
"thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
@@ -1906,20 +1904,20 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->tssi_13dbm = rtlefuse->eeprom_thermalmeter * 100;
/* Read CrystalCap from EEPROM */
- tempval = (*(u8 *)&hwinfo[EEPROM_CRYSTALCAP]) >> 4;
+ tempval = hwinfo[EEPROM_CRYSTALCAP] >> 4;
rtlefuse->eeprom_crystalcap = tempval;
/* CrystalCap, BIT(12)~15 */
rtlefuse->crystalcap = rtlefuse->eeprom_crystalcap;
/* Read IC Version && Channel Plan */
/* Version ID, Channel plan */
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->txpwr_fromeprom = true;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
"EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan);
/* Read Customer ID or Board Type!!! */
- tempval = *(u8 *)&hwinfo[EEPROM_BOARDTYPE];
+ tempval = hwinfo[EEPROM_BOARDTYPE];
/* Change RF type definition */
if (tempval == 0)
rtlphy->rf_type = RF_2T2R;
@@ -1941,7 +1939,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
}
}
rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMID];
+ rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x",
rtlefuse->eeprom_oemid);
@@ -2251,7 +2249,7 @@ void rtl92se_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
sifs_timer = 0x0e0e;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 8d7099b..b917a2a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1247,6 +1247,9 @@ static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
/* Read HT 40 OFDM TX power */
ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index];
ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index];
+ } else {
+ ofdmpowerLevel[0] = 0;
+ ofdmpowerLevel[1] = 0;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 730bcc9..ad4b480 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -29,7 +29,6 @@
#include "../wifi.h"
#include "../core.h"
-#include "../pci.h"
#include "../base.h"
#include "../pci.h"
#include "reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 812b585..36d1cb3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -599,7 +599,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct ieee80211_sta *sta = info->control.sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 reserved_macid = 0;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a6049d7..aa970fc 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -131,15 +131,19 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
u8 request;
u16 wvalue;
u16 index;
- __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+ __le32 *data;
+ unsigned long flags;
+ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
+ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+ rtlpriv->usb_data_index = 0;
+ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
request = REALTEK_USB_VENQT_CMD_REQ;
index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)addr;
_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
- if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
- rtlpriv->usb_data_index = 0;
return le32_to_cpu(*data);
}
@@ -951,6 +955,10 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
GFP_KERNEL);
if (!rtlpriv->usb_data)
return -ENOMEM;
+
+ /* this spin lock must be initialized early */
+ spin_lock_init(&rtlpriv->locks.usb_lock);
+
rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
SET_IEEE80211_DEV(hw, &intf->dev);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index bd816ae..cdaa21f 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1555,6 +1555,7 @@ struct rtl_locks {
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
spinlock_t waitq_lock;
+ spinlock_t usb_lock;
/*Dual mac*/
spinlock_t cck_and_rw_pagea_lock;
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
index 1a72932..be80011 100644
--- a/drivers/net/wireless/ti/Kconfig
+++ b/drivers/net/wireless/ti/Kconfig
@@ -8,6 +8,7 @@ menuconfig WL_TI
if WL_TI
source "drivers/net/wireless/ti/wl1251/Kconfig"
source "drivers/net/wireless/ti/wl12xx/Kconfig"
+source "drivers/net/wireless/ti/wl18xx/Kconfig"
# keep last for automatic dependencies
source "drivers/net/wireless/ti/wlcore/Kconfig"
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
index 0a56562..4d68239 100644
--- a/drivers/net/wireless/ti/Makefile
+++ b/drivers/net/wireless/ti/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_WLCORE) += wlcore/
obj-$(CONFIG_WL12XX) += wl12xx/
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/
obj-$(CONFIG_WL1251) += wl1251/
+obj-$(CONFIG_WL18XX) += wl18xx/
diff --git a/drivers/net/wireless/ti/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index ad87a1a..db6430c 100644
--- a/drivers/net/wireless/ti/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
@@ -869,7 +869,7 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
}
*mactime = tsf_info->current_tsf_lsb |
- (tsf_info->current_tsf_msb << 31);
+ ((u64)tsf_info->current_tsf_msb << 32);
out:
kfree(tsf_info);
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index d14d69d..6822b84 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -277,15 +277,6 @@ int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
join->rx_config_options = wl->rx_config;
join->rx_filter_options = wl->rx_filter;
- /*
- * FIXME: disable temporarily all filters because after commit
- * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
- * association. The filter logic needs to be implemented properly
- * and once that is done, this hack can be removed.
- */
- join->rx_config_options = 0;
- join->rx_filter_options = WL1251_DEFAULT_RX_FILTER;
-
join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
diff --git a/drivers/net/wireless/ti/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 9f15cca..5ec50a4 100644
--- a/drivers/net/wireless/ti/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
@@ -76,8 +76,7 @@ static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
}
}
- if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID &&
- wl->station_mode != STATION_ACTIVE_MODE) {
+ if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
/* indicate to the stack, that beacons have been lost */
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index d1afb8e..3118c42 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -334,6 +334,12 @@ static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
if (ret < 0)
goto out;
+ /*
+ * Join command applies filters, and if we are not associated,
+ * BSSID filter must be disabled for association to work.
+ */
+ if (is_zero_ether_addr(wl->bssid))
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval,
dtim_period);
@@ -348,33 +354,6 @@ out:
return ret;
}
-static void wl1251_filter_work(struct work_struct *work)
-{
- struct wl1251 *wl =
- container_of(work, struct wl1251, filter_work);
- int ret;
-
- mutex_lock(&wl->mutex);
-
- if (wl->state == WL1251_STATE_OFF)
- goto out;
-
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1251_join(wl, wl->bss_type, wl->channel, wl->beacon_int,
- wl->dtim_period);
- if (ret < 0)
- goto out_sleep;
-
-out_sleep:
- wl1251_ps_elp_sleep(wl);
-
-out:
- mutex_unlock(&wl->mutex);
-}
-
static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1251 *wl = hw->priv;
@@ -478,7 +457,6 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->tx_work);
- cancel_work_sync(&wl->filter_work);
cancel_delayed_work_sync(&wl->elp_work);
mutex_lock(&wl->mutex);
@@ -681,13 +659,15 @@ out:
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
- FIF_OTHER_BSS)
+ FIF_OTHER_BSS | \
+ FIF_PROBE_REQ)
static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *total,u64 multicast)
{
struct wl1251 *wl = hw->priv;
+ int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter");
@@ -698,7 +678,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
/* no filters which we support changed */
return;
- /* FIXME: wl->rx_config and wl->rx_filter are not protected */
+ mutex_lock(&wl->mutex);
wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
@@ -721,15 +701,25 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
}
if (*total & FIF_CONTROL)
wl->rx_filter |= CFG_RX_CTL_EN;
- if (*total & FIF_OTHER_BSS)
- wl->rx_filter &= ~CFG_BSSID_FILTER_EN;
+ if (*total & FIF_OTHER_BSS || is_zero_ether_addr(wl->bssid))
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ if (*total & FIF_PROBE_REQ)
+ wl->rx_filter |= CFG_RX_PREQ_EN;
- /*
- * FIXME: workqueues need to be properly cancelled on stop(), for
- * now let's just disable changing the filter settings. They will
- * be updated any on config().
- */
- /* schedule_work(&wl->filter_work); */
+ if (wl->state == WL1251_STATE_OFF)
+ goto out;
+
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* send filters to firmware */
+ wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
+
+ wl1251_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
}
/* HW encryption */
@@ -1390,7 +1380,6 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
skb_queue_head_init(&wl->tx_queue);
- INIT_WORK(&wl->filter_work, wl1251_filter_work);
INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
wl->channel = WL1251_DEFAULT_CHANNEL;
wl->scanning = false;
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 1b851f6..e2750a1 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
}
if (wl->irq) {
+ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
}
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
- disable_irq(wl->irq);
wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c35..567660c 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -73,6 +73,8 @@ static void wl1251_spi_reset(struct wl1251 *wl)
spi_sync(wl_to_spi(wl), &m);
wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
+
+ kfree(cmd);
}
static void wl1251_spi_wake(struct wl1251 *wl)
@@ -127,6 +129,8 @@ static void wl1251_spi_wake(struct wl1251 *wl)
spi_sync(wl_to_spi(wl), &m);
wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+
+ kfree(cmd);
}
static void wl1251_spi_reset_wake(struct wl1251 *wl)
@@ -281,6 +285,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
wl->use_eeprom = pdata->use_eeprom;
+ irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +294,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
- disable_irq(wl->irq);
-
ret = wl1251_init_ieee80211(wl);
if (ret)
goto out_irq;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 9d8f581..fd02060 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -315,7 +315,6 @@ struct wl1251 {
bool tx_queue_stopped;
struct work_struct tx_work;
- struct work_struct filter_work;
/* Pending TX frames */
struct sk_buff *tx_frames[16];
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
index 87f64b1..da509aa 100644
--- a/drivers/net/wireless/ti/wl12xx/Makefile
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -1,3 +1,3 @@
-wl12xx-objs = main.o cmd.o acx.o
+wl12xx-objs = main.o cmd.o acx.o debugfs.o
obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/acx.h b/drivers/net/wireless/ti/wl12xx/acx.h
index d1f5aba..2a26868 100644
--- a/drivers/net/wireless/ti/wl12xx/acx.h
+++ b/drivers/net/wireless/ti/wl12xx/acx.h
@@ -24,6 +24,21 @@
#define __WL12XX_ACX_H__
#include "../wlcore/wlcore.h"
+#include "../wlcore/acx.h"
+
+#define WL12XX_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_INIT_COMPLETE | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_CMD_COMPLETE | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA)
+
+#define WL12XX_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA)
struct wl1271_acx_host_config_bitmap {
struct acx_header header;
@@ -31,6 +46,228 @@ struct wl1271_acx_host_config_bitmap {
__le32 host_cfg_bitmap;
} __packed;
+struct wl12xx_acx_tx_statistics {
+ __le32 internal_desc_overflow;
+} __packed;
+
+struct wl12xx_acx_rx_statistics {
+ __le32 out_of_mem;
+ __le32 hdr_overflow;
+ __le32 hw_stuck;
+ __le32 dropped;
+ __le32 fcs_err;
+ __le32 xfr_hint_trig;
+ __le32 path_reset;
+ __le32 reset_counter;
+} __packed;
+
+struct wl12xx_acx_dma_statistics {
+ __le32 rx_requested;
+ __le32 rx_errors;
+ __le32 tx_requested;
+ __le32 tx_errors;
+} __packed;
+
+struct wl12xx_acx_isr_statistics {
+ /* host command complete */
+ __le32 cmd_cmplt;
+
+ /* fiqisr() */
+ __le32 fiqs;
+
+ /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
+ __le32 rx_headers;
+
+ /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
+ __le32 rx_completes;
+
+ /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
+ __le32 rx_mem_overflow;
+
+ /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
+ __le32 rx_rdys;
+
+ /* irqisr() */
+ __le32 irqs;
+
+ /* (INT_STS_ND & INT_TRIG_TX_PROC) */
+ __le32 tx_procs;
+
+ /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
+ __le32 decrypt_done;
+
+ /* (INT_STS_ND & INT_TRIG_DMA0) */
+ __le32 dma0_done;
+
+ /* (INT_STS_ND & INT_TRIG_DMA1) */
+ __le32 dma1_done;
+
+ /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
+ __le32 tx_exch_complete;
+
+ /* (INT_STS_ND & INT_TRIG_COMMAND) */
+ __le32 commands;
+
+ /* (INT_STS_ND & INT_TRIG_RX_PROC) */
+ __le32 rx_procs;
+
+ /* (INT_STS_ND & INT_TRIG_PM_802) */
+ __le32 hw_pm_mode_changes;
+
+ /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
+ __le32 host_acknowledges;
+
+ /* (INT_STS_ND & INT_TRIG_PM_PCI) */
+ __le32 pci_pm;
+
+ /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
+ __le32 wakeups;
+
+ /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
+ __le32 low_rssi;
+} __packed;
+
+struct wl12xx_acx_wep_statistics {
+ /* WEP address keys configured */
+ __le32 addr_key_count;
+
+ /* default keys configured */
+ __le32 default_key_count;
+
+ __le32 reserved;
+
+ /* number of times that WEP key not found on lookup */
+ __le32 key_not_found;
+
+ /* number of times that WEP key decryption failed */
+ __le32 decrypt_fail;
+
+ /* WEP packets decrypted */
+ __le32 packets;
+
+ /* WEP decrypt interrupts */
+ __le32 interrupt;
+} __packed;
+
+#define ACX_MISSED_BEACONS_SPREAD 10
+
+struct wl12xx_acx_pwr_statistics {
+ /* the amount of enters into power save mode (both PD & ELP) */
+ __le32 ps_enter;
+
+ /* the amount of enters into ELP mode */
+ __le32 elp_enter;
+
+ /* the amount of missing beacon interrupts to the host */
+ __le32 missing_bcns;
+
+ /* the amount of wake on host-access times */
+ __le32 wake_on_host;
+
+ /* the amount of wake on timer-expire */
+ __le32 wake_on_timer_exp;
+
+ /* the number of packets that were transmitted with PS bit set */
+ __le32 tx_with_ps;
+
+ /* the number of packets that were transmitted with PS bit clear */
+ __le32 tx_without_ps;
+
+ /* the number of received beacons */
+ __le32 rcvd_beacons;
+
+ /* the number of entering into PowerOn (power save off) */
+ __le32 power_save_off;
+
+ /* the number of entries into power save mode */
+ __le16 enable_ps;
+
+ /*
+ * the number of exits from power save, not including failed PS
+ * transitions
+ */
+ __le16 disable_ps;
+
+ /*
+ * the number of times the TSF counter was adjusted because
+ * of drift
+ */
+ __le32 fix_tsf_ps;
+
+ /* Gives statistics about the spread continuous missed beacons.
+ * The 16 LSB are dedicated for the PS mode.
+ * The 16 MSB are dedicated for the PS mode.
+ * cont_miss_bcns_spread[0] - single missed beacon.
+ * cont_miss_bcns_spread[1] - two continuous missed beacons.
+ * cont_miss_bcns_spread[2] - three continuous missed beacons.
+ * ...
+ * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
+ */
+ __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
+
+ /* the number of beacons in awake mode */
+ __le32 rcvd_awake_beacons;
+} __packed;
+
+struct wl12xx_acx_mic_statistics {
+ __le32 rx_pkts;
+ __le32 calc_failure;
+} __packed;
+
+struct wl12xx_acx_aes_statistics {
+ __le32 encrypt_fail;
+ __le32 decrypt_fail;
+ __le32 encrypt_packets;
+ __le32 decrypt_packets;
+ __le32 encrypt_interrupt;
+ __le32 decrypt_interrupt;
+} __packed;
+
+struct wl12xx_acx_event_statistics {
+ __le32 heart_beat;
+ __le32 calibration;
+ __le32 rx_mismatch;
+ __le32 rx_mem_empty;
+ __le32 rx_pool;
+ __le32 oom_late;
+ __le32 phy_transmit_error;
+ __le32 tx_stuck;
+} __packed;
+
+struct wl12xx_acx_ps_statistics {
+ __le32 pspoll_timeouts;
+ __le32 upsd_timeouts;
+ __le32 upsd_max_sptime;
+ __le32 upsd_max_apturn;
+ __le32 pspoll_max_apturn;
+ __le32 pspoll_utilization;
+ __le32 upsd_utilization;
+} __packed;
+
+struct wl12xx_acx_rxpipe_statistics {
+ __le32 rx_prep_beacon_drop;
+ __le32 descr_host_int_trig_rx_data;
+ __le32 beacon_buffer_thres_host_int_trig_rx_data;
+ __le32 missed_beacon_host_int_trig_rx_data;
+ __le32 tx_xfr_host_int_trig_rx_data;
+} __packed;
+
+struct wl12xx_acx_statistics {
+ struct acx_header header;
+
+ struct wl12xx_acx_tx_statistics tx;
+ struct wl12xx_acx_rx_statistics rx;
+ struct wl12xx_acx_dma_statistics dma;
+ struct wl12xx_acx_isr_statistics isr;
+ struct wl12xx_acx_wep_statistics wep;
+ struct wl12xx_acx_pwr_statistics pwr;
+ struct wl12xx_acx_aes_statistics aes;
+ struct wl12xx_acx_mic_statistics mic;
+ struct wl12xx_acx_event_statistics event;
+ struct wl12xx_acx_ps_statistics ps;
+ struct wl12xx_acx_rxpipe_statistics rxpipe;
+} __packed;
+
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
#endif /* __WL12XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
index 8ffaeb5..6222062 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -65,6 +65,7 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
struct wl1271_general_parms_cmd *gen_parms;
struct wl1271_ini_general_params *gp =
&((struct wl1271_nvs_file *)wl->nvs)->general_params;
+ struct wl12xx_priv *priv = wl->priv;
bool answer = false;
int ret;
@@ -84,11 +85,15 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
memcpy(&gen_parms->general_params, gp, sizeof(*gp));
- if (gp->tx_bip_fem_auto_detect)
+ /* If we started in PLT FEM_DETECT mode, force auto detect */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ gen_parms->general_params.tx_bip_fem_auto_detect = true;
+
+ if (gen_parms->general_params.tx_bip_fem_auto_detect)
answer = true;
/* Override the REF CLK from the NVS with the one from platform data */
- gen_parms->general_params.ref_clock = wl->ref_clock;
+ gen_parms->general_params.ref_clock = priv->ref_clock;
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
@@ -105,8 +110,17 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
goto out;
}
+ /* If we are in calibrator based fem auto detect - save fem nr */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ wl->fem_manuf = gp->tx_bip_fem_manufacturer;
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
- answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+ answer == false ?
+ "manual" :
+ wl->plt_mode == PLT_FEM_DETECT ?
+ "calibrator_fem_detect" :
+ "auto",
+ gp->tx_bip_fem_manufacturer);
out:
kfree(gen_parms);
@@ -118,6 +132,7 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
struct wl128x_general_parms_cmd *gen_parms;
struct wl128x_ini_general_params *gp =
&((struct wl128x_nvs_file *)wl->nvs)->general_params;
+ struct wl12xx_priv *priv = wl->priv;
bool answer = false;
int ret;
@@ -137,12 +152,16 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
memcpy(&gen_parms->general_params, gp, sizeof(*gp));
- if (gp->tx_bip_fem_auto_detect)
+ /* If we started in PLT FEM_DETECT mode, force auto detect */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ gen_parms->general_params.tx_bip_fem_auto_detect = true;
+
+ if (gen_parms->general_params.tx_bip_fem_auto_detect)
answer = true;
/* Replace REF and TCXO CLKs with the ones from platform data */
- gen_parms->general_params.ref_clock = wl->ref_clock;
- gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
+ gen_parms->general_params.ref_clock = priv->ref_clock;
+ gen_parms->general_params.tcxo_ref_clock = priv->tcxo_clock;
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
@@ -159,8 +178,17 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
goto out;
}
+ /* If we are in calibrator based fem auto detect - save fem nr */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ wl->fem_manuf = gp->tx_bip_fem_manufacturer;
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
- answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+ answer == false ?
+ "manual" :
+ wl->plt_mode == PLT_FEM_DETECT ?
+ "calibrator_fem_detect" :
+ "auto",
+ gp->tx_bip_fem_manufacturer);
out:
kfree(gen_parms);
@@ -172,7 +200,7 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
struct wl1271_radio_parms_cmd *radio_parms;
struct wl1271_ini_general_params *gp = &nvs->general_params;
- int ret;
+ int ret, fem_idx;
if (!wl->nvs)
return -ENODEV;
@@ -183,11 +211,13 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
+ fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
+
/* 2.4GHz parameters */
memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
sizeof(struct wl1271_ini_band_params_2));
memcpy(&radio_parms->dyn_params_2,
- &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_2[fem_idx].params,
sizeof(struct wl1271_ini_fem_params_2));
/* 5GHz parameters */
@@ -195,7 +225,7 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
&nvs->stat_radio_params_5,
sizeof(struct wl1271_ini_band_params_5));
memcpy(&radio_parms->dyn_params_5,
- &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_5[fem_idx].params,
sizeof(struct wl1271_ini_fem_params_5));
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
@@ -214,7 +244,7 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
struct wl128x_radio_parms_cmd *radio_parms;
struct wl128x_ini_general_params *gp = &nvs->general_params;
- int ret;
+ int ret, fem_idx;
if (!wl->nvs)
return -ENODEV;
@@ -225,11 +255,13 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
+ fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
+
/* 2.4GHz parameters */
memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
sizeof(struct wl128x_ini_band_params_2));
memcpy(&radio_parms->dyn_params_2,
- &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_2[fem_idx].params,
sizeof(struct wl128x_ini_fem_params_2));
/* 5GHz parameters */
@@ -237,7 +269,7 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
&nvs->stat_radio_params_5,
sizeof(struct wl128x_ini_band_params_5));
memcpy(&radio_parms->dyn_params_5,
- &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_5[fem_idx].params,
sizeof(struct wl128x_ini_fem_params_5));
radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.c b/drivers/net/wireless/ti/wl12xx/debugfs.c
new file mode 100644
index 0000000..0521cbf
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.c
@@ -0,0 +1,243 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/debugfs.h"
+#include "../wlcore/wlcore.h"
+
+#include "wl12xx.h"
+#include "acx.h"
+#include "debugfs.h"
+
+#define WL12XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
+ DEBUGFS_FWSTATS_FILE(a, b, c, wl12xx_acx_statistics)
+
+WL12XX_DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
+/* skipping wep.reserved */
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
+/* skipping cont_miss_bcns_spread for now */
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
+ "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
+
+int wl12xx_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
+{
+ int ret = 0;
+ struct dentry *entry, *stats, *moddir;
+
+ moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
+ if (!moddir || IS_ERR(moddir)) {
+ entry = moddir;
+ goto err;
+ }
+
+ stats = debugfs_create_dir("fw_stats", moddir);
+ if (!stats || IS_ERR(stats)) {
+ entry = stats;
+ goto err;
+ }
+
+ DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
+
+ DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
+ DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
+ DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
+ DEBUGFS_FWSTATS_ADD(rx, dropped);
+ DEBUGFS_FWSTATS_ADD(rx, fcs_err);
+ DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
+ DEBUGFS_FWSTATS_ADD(rx, path_reset);
+ DEBUGFS_FWSTATS_ADD(rx, reset_counter);
+
+ DEBUGFS_FWSTATS_ADD(dma, rx_requested);
+ DEBUGFS_FWSTATS_ADD(dma, rx_errors);
+ DEBUGFS_FWSTATS_ADD(dma, tx_requested);
+ DEBUGFS_FWSTATS_ADD(dma, tx_errors);
+
+ DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
+ DEBUGFS_FWSTATS_ADD(isr, fiqs);
+ DEBUGFS_FWSTATS_ADD(isr, rx_headers);
+ DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
+ DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
+ DEBUGFS_FWSTATS_ADD(isr, irqs);
+ DEBUGFS_FWSTATS_ADD(isr, tx_procs);
+ DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
+ DEBUGFS_FWSTATS_ADD(isr, dma0_done);
+ DEBUGFS_FWSTATS_ADD(isr, dma1_done);
+ DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
+ DEBUGFS_FWSTATS_ADD(isr, commands);
+ DEBUGFS_FWSTATS_ADD(isr, rx_procs);
+ DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
+ DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
+ DEBUGFS_FWSTATS_ADD(isr, pci_pm);
+ DEBUGFS_FWSTATS_ADD(isr, wakeups);
+ DEBUGFS_FWSTATS_ADD(isr, low_rssi);
+
+ DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
+ DEBUGFS_FWSTATS_ADD(wep, default_key_count);
+ /* skipping wep.reserved */
+ DEBUGFS_FWSTATS_ADD(wep, key_not_found);
+ DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
+ DEBUGFS_FWSTATS_ADD(wep, packets);
+ DEBUGFS_FWSTATS_ADD(wep, interrupt);
+
+ DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
+ DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
+ DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
+ DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
+ DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
+ DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
+ /* skipping cont_miss_bcns_spread for now */
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
+
+ DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
+ DEBUGFS_FWSTATS_ADD(mic, calc_failure);
+
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
+
+ DEBUGFS_FWSTATS_ADD(event, heart_beat);
+ DEBUGFS_FWSTATS_ADD(event, calibration);
+ DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
+ DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
+ DEBUGFS_FWSTATS_ADD(event, rx_pool);
+ DEBUGFS_FWSTATS_ADD(event, oom_late);
+ DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
+ DEBUGFS_FWSTATS_ADD(event, tx_stuck);
+
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
+
+ DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
+ DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
+
+ return 0;
+
+err:
+ if (IS_ERR(entry))
+ ret = PTR_ERR(entry);
+ else
+ ret = -ENOMEM;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.h b/drivers/net/wireless/ti/wl12xx/debugfs.h
new file mode 100644
index 0000000..96898e2
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_DEBUGFS_H__
+#define __WL12XX_DEBUGFS_H__
+
+int wl12xx_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir);
+
+#endif /* __WL12XX_DEBUGFS_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index d7dd3de..f429fc1 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -39,6 +39,10 @@
#include "reg.h"
#include "cmd.h"
#include "acx.h"
+#include "debugfs.h"
+
+static char *fref_param;
+static char *tcxo_param;
static struct wlcore_conf wl12xx_conf = {
.sg = {
@@ -212,7 +216,7 @@ static struct wlcore_conf wl12xx_conf = {
.suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
.suspend_listen_interval = 3,
.bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
- .bcn_filt_ie_count = 2,
+ .bcn_filt_ie_count = 3,
.bcn_filt_ie = {
[0] = {
.ie = WLAN_EID_CHANNEL_SWITCH,
@@ -222,9 +226,13 @@ static struct wlcore_conf wl12xx_conf = {
.ie = WLAN_EID_HT_OPERATION,
.rule = CONF_BCN_RULE_PASS_ON_CHANGE,
},
+ [2] = {
+ .ie = WLAN_EID_ERP_INFO,
+ .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
+ },
},
- .synch_fail_thold = 10,
- .bss_lose_timeout = 100,
+ .synch_fail_thold = 12,
+ .bss_lose_timeout = 400,
.beacon_rx_timeout = 10000,
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
@@ -234,10 +242,11 @@ static struct wlcore_conf wl12xx_conf = {
.psm_entry_retries = 8,
.psm_exit_retries = 16,
.psm_entry_nullfunc_retries = 3,
- .dynamic_ps_timeout = 40,
+ .dynamic_ps_timeout = 1500,
.forced_ps = false,
.keep_alive_interval = 55000,
.max_listen_interval = 20,
+ .sta_sleep_auth = WL1271_PSM_ILLEGAL,
},
.itrim = {
.enable = false,
@@ -245,7 +254,7 @@ static struct wlcore_conf wl12xx_conf = {
},
.pm_config = {
.host_clk_settling_time = 5000,
- .host_fast_wakeup_support = false
+ .host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
},
.roam_trigger = {
.trigger_pacing = 1,
@@ -305,8 +314,8 @@ static struct wlcore_conf wl12xx_conf = {
.swallow_period = 5,
.n_divider_fref_set_1 = 0xff, /* default */
.n_divider_fref_set_2 = 12,
- .m_divider_fref_set_1 = 148,
- .m_divider_fref_set_2 = 0xffff, /* default */
+ .m_divider_fref_set_1 = 0xffff,
+ .m_divider_fref_set_2 = 148, /* default */
.coex_pll_stabilization_time = 0xffffffff, /* default */
.ldo_stabilization_time = 0xffff, /* default */
.fm_disturbed_band_margin = 0xff, /* default */
@@ -581,19 +590,21 @@ static const int wl12xx_rtable[REG_TABLE_LEN] = {
};
/* TODO: maybe move to a new header file? */
-#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin"
-#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin"
-#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin"
+#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-5-mr.bin"
+#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-5-sr.bin"
+#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-5-plt.bin"
-#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin"
-#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
-#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
+#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-5-mr.bin"
+#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-5-sr.bin"
+#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-5-plt.bin"
-static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
+static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
{
+ int ret;
+
if (wl->chip.id != CHIP_ID_1283_PG20) {
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
- struct wl1271_rx_mem_pool_addr rx_mem_addr;
+ struct wl127x_rx_mem_pool_addr rx_mem_addr;
/*
* Choose the block we want to read
@@ -607,9 +618,13 @@ static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
- wl1271_write(wl, WL1271_SLV_REG_DATA,
- &rx_mem_addr, sizeof(rx_mem_addr), false);
+ ret = wlcore_write(wl, WL1271_SLV_REG_DATA, &rx_mem_addr,
+ sizeof(rx_mem_addr), false);
+ if (ret < 0)
+ return ret;
}
+
+ return 0;
}
static int wl12xx_identify_chip(struct wl1271 *wl)
@@ -621,10 +636,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
wl->chip.id);
- /* clear the alignment quirk, since we don't support it */
- wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
-
- wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
+ wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
+ WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ WLCORE_QUIRK_TKIP_HEADER_SPACE;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
@@ -633,16 +647,18 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
+ wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
+ WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
+ WL127X_MINOR_VER);
break;
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
- /* clear the alignment quirk, since we don't support it */
- wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
-
- wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
+ wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
+ WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ WLCORE_QUIRK_TKIP_HEADER_SPACE;
wl->plt_fw_name = WL127X_PLT_FW_NAME;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -652,6 +668,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
+ wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
+ WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
+ WL127X_MINOR_VER);
break;
case CHIP_ID_1283_PG20:
@@ -660,6 +679,15 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl->plt_fw_name = WL128X_PLT_FW_NAME;
wl->sr_fw_name = WL128X_FW_NAME_SINGLE;
wl->mr_fw_name = WL128X_FW_NAME_MULTI;
+
+ /* wl128x requires TX blocksize alignment */
+ wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
+ WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ WLCORE_QUIRK_TKIP_HEADER_SPACE;
+
+ wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, WL128X_IFTYPE_VER,
+ WL128X_MAJOR_VER, WL128X_SUBTYPE_VER,
+ WL128X_MINOR_VER);
break;
case CHIP_ID_1283_PG10:
default:
@@ -672,64 +700,95 @@ out:
return ret;
}
-static void wl12xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
+static int __must_check wl12xx_top_reg_write(struct wl1271 *wl, int addr,
+ u16 val)
{
+ int ret;
+
/* write address >> 1 + 0x30000 to OCP_POR_CTR */
addr = (addr >> 1) + 0x30000;
- wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
+ ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
+ if (ret < 0)
+ goto out;
/* write value to OCP_POR_WDATA */
- wl1271_write32(wl, WL12XX_OCP_DATA_WRITE, val);
+ ret = wlcore_write32(wl, WL12XX_OCP_DATA_WRITE, val);
+ if (ret < 0)
+ goto out;
/* write 1 to OCP_CMD */
- wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
+ ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
}
-static u16 wl12xx_top_reg_read(struct wl1271 *wl, int addr)
+static int __must_check wl12xx_top_reg_read(struct wl1271 *wl, int addr,
+ u16 *out)
{
u32 val;
int timeout = OCP_CMD_LOOP;
+ int ret;
/* write address >> 1 + 0x30000 to OCP_POR_CTR */
addr = (addr >> 1) + 0x30000;
- wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
+ ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
+ if (ret < 0)
+ return ret;
/* write 2 to OCP_CMD */
- wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
+ ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
+ if (ret < 0)
+ return ret;
/* poll for data ready */
do {
- val = wl1271_read32(wl, WL12XX_OCP_DATA_READ);
+ ret = wlcore_read32(wl, WL12XX_OCP_DATA_READ, &val);
+ if (ret < 0)
+ return ret;
} while (!(val & OCP_READY_MASK) && --timeout);
if (!timeout) {
wl1271_warning("Top register access timed out.");
- return 0xffff;
+ return -ETIMEDOUT;
}
/* check data status and return if OK */
- if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
- return val & 0xffff;
- else {
+ if ((val & OCP_STATUS_MASK) != OCP_STATUS_OK) {
wl1271_warning("Top register access returned error.");
- return 0xffff;
+ return -EIO;
}
+
+ if (out)
+ *out = val & 0xffff;
+
+ return 0;
}
static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
{
u16 spare_reg;
+ int ret;
/* Mask bits [2] & [8:4] in the sys_clk_cfg register */
- spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
+ ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
+ if (ret < 0)
+ return ret;
+
if (spare_reg == 0xFFFF)
return -EFAULT;
spare_reg |= (BIT(3) | BIT(5) | BIT(6));
- wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+ ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+ if (ret < 0)
+ return ret;
/* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
- wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
- WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
+ ret = wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
+ WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
+ if (ret < 0)
+ return ret;
/* Delay execution for 15msec, to let the HW settle */
mdelay(15);
@@ -740,8 +799,12 @@ static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
{
u16 tcxo_detection;
+ int ret;
+
+ ret = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG, &tcxo_detection);
+ if (ret < 0)
+ return false;
- tcxo_detection = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG);
if (tcxo_detection & TCXO_DET_FAILED)
return false;
@@ -751,8 +814,12 @@ static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
static bool wl128x_is_fref_valid(struct wl1271 *wl)
{
u16 fref_detection;
+ int ret;
+
+ ret = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG, &fref_detection);
+ if (ret < 0)
+ return false;
- fref_detection = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG);
if (fref_detection & FREF_CLK_DETECT_FAIL)
return false;
@@ -761,11 +828,21 @@ static bool wl128x_is_fref_valid(struct wl1271 *wl)
static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
{
- wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
- wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
- wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
+ int ret;
- return 0;
+ ret = wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG,
+ MCS_PLL_CONFIG_REG_VAL);
+
+out:
+ return ret;
}
static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
@@ -773,30 +850,40 @@ static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
u16 spare_reg;
u16 pll_config;
u8 input_freq;
+ struct wl12xx_priv *priv = wl->priv;
+ int ret;
/* Mask bits [3:1] in the sys_clk_cfg register */
- spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
+ ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
+ if (ret < 0)
+ return ret;
+
if (spare_reg == 0xFFFF)
return -EFAULT;
spare_reg |= BIT(2);
- wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+ ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+ if (ret < 0)
+ return ret;
/* Handle special cases of the TCXO clock */
- if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
- wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
+ if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
+ priv->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
return wl128x_manually_configure_mcs_pll(wl);
/* Set the input frequency according to the selected clock source */
input_freq = (clk & 1) + 1;
- pll_config = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG);
+ ret = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG, &pll_config);
+ if (ret < 0)
+ return ret;
+
if (pll_config == 0xFFFF)
return -EFAULT;
pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
pll_config |= MCS_PLL_ENABLE_HP;
- wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
+ ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
- return 0;
+ return ret;
}
/*
@@ -808,26 +895,31 @@ static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
*/
static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
{
+ struct wl12xx_priv *priv = wl->priv;
u16 sys_clk_cfg;
+ int ret;
/* For XTAL-only modes, FREF will be used after switching from TCXO */
- if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
- wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
+ if (priv->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
+ priv->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
if (!wl128x_switch_tcxo_to_fref(wl))
return -EINVAL;
goto fref_clk;
}
/* Query the HW, to determine which clock source we should use */
- sys_clk_cfg = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG);
+ ret = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG, &sys_clk_cfg);
+ if (ret < 0)
+ return ret;
+
if (sys_clk_cfg == 0xFFFF)
return -EINVAL;
if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
goto fref_clk;
/* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
- if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
- wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
+ if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
+ priv->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
if (!wl128x_switch_tcxo_to_fref(wl))
return -EINVAL;
goto fref_clk;
@@ -836,14 +928,14 @@ static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
/* TCXO clock is selected */
if (!wl128x_is_tcxo_valid(wl))
return -EINVAL;
- *selected_clock = wl->tcxo_clock;
+ *selected_clock = priv->tcxo_clock;
goto config_mcs_pll;
fref_clk:
/* FREF clock is selected */
if (!wl128x_is_fref_valid(wl))
return -EINVAL;
- *selected_clock = wl->ref_clock;
+ *selected_clock = priv->ref_clock;
config_mcs_pll:
return wl128x_configure_mcs_pll(wl, *selected_clock);
@@ -851,69 +943,98 @@ config_mcs_pll:
static int wl127x_boot_clk(struct wl1271 *wl)
{
+ struct wl12xx_priv *priv = wl->priv;
u32 pause;
u32 clk;
+ int ret;
if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION;
- if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
- wl->ref_clock == CONF_REF_CLK_38_4_E ||
- wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
+ if (priv->ref_clock == CONF_REF_CLK_19_2_E ||
+ priv->ref_clock == CONF_REF_CLK_38_4_E ||
+ priv->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
/* ref clk: 19.2/38.4/38.4-XTAL */
clk = 0x3;
- else if (wl->ref_clock == CONF_REF_CLK_26_E ||
- wl->ref_clock == CONF_REF_CLK_52_E)
+ else if (priv->ref_clock == CONF_REF_CLK_26_E ||
+ priv->ref_clock == CONF_REF_CLK_26_M_XTAL ||
+ priv->ref_clock == CONF_REF_CLK_52_E)
/* ref clk: 26/52 */
clk = 0x5;
else
return -EINVAL;
- if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
+ if (priv->ref_clock != CONF_REF_CLK_19_2_E) {
u16 val;
/* Set clock type (open drain) */
- val = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE);
+ ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE, &val);
+ if (ret < 0)
+ goto out;
+
val &= FREF_CLK_TYPE_BITS;
- wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+ ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+ if (ret < 0)
+ goto out;
/* Set clock pull mode (no pull) */
- val = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL);
+ ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL, &val);
+ if (ret < 0)
+ goto out;
+
val |= NO_PULL;
- wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
+ ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
+ if (ret < 0)
+ goto out;
} else {
u16 val;
/* Set clock polarity */
- val = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY);
+ ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY, &val);
+ if (ret < 0)
+ goto out;
+
val &= FREF_CLK_POLARITY_BITS;
val |= CLK_REQ_OUTN_SEL;
- wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
+ ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
+ if (ret < 0)
+ goto out;
}
- wl1271_write32(wl, WL12XX_PLL_PARAMETERS, clk);
+ ret = wlcore_write32(wl, WL12XX_PLL_PARAMETERS, clk);
+ if (ret < 0)
+ goto out;
- pause = wl1271_read32(wl, WL12XX_PLL_PARAMETERS);
+ ret = wlcore_read32(wl, WL12XX_PLL_PARAMETERS, &pause);
+ if (ret < 0)
+ goto out;
wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
pause &= ~(WU_COUNTER_PAUSE_VAL);
pause |= WU_COUNTER_PAUSE_VAL;
- wl1271_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
+ ret = wlcore_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
- return 0;
+out:
+ return ret;
}
static int wl1271_boot_soft_reset(struct wl1271 *wl)
{
unsigned long timeout;
u32 boot_data;
+ int ret = 0;
/* perform soft reset */
- wl1271_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
+ ret = wlcore_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
+ if (ret < 0)
+ goto out;
/* SOFT_RESET is self clearing */
timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
while (1) {
- boot_data = wl1271_read32(wl, WL12XX_SLV_SOFT_RESET);
+ ret = wlcore_read32(wl, WL12XX_SLV_SOFT_RESET, &boot_data);
+ if (ret < 0)
+ goto out;
+
wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
break;
@@ -929,16 +1050,20 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
}
/* disable Rx/Tx */
- wl1271_write32(wl, WL12XX_ENABLE, 0x0);
+ ret = wlcore_write32(wl, WL12XX_ENABLE, 0x0);
+ if (ret < 0)
+ goto out;
/* disable auto calibration on start*/
- wl1271_write32(wl, WL12XX_SPARE_A2, 0xffff);
+ ret = wlcore_write32(wl, WL12XX_SPARE_A2, 0xffff);
- return 0;
+out:
+ return ret;
}
static int wl12xx_pre_boot(struct wl1271 *wl)
{
+ struct wl12xx_priv *priv = wl->priv;
int ret = 0;
u32 clk;
int selected_clock = -1;
@@ -954,30 +1079,43 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
}
/* Continue the ELP wake up sequence */
- wl1271_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ if (ret < 0)
+ goto out;
+
udelay(500);
- wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ if (ret < 0)
+ goto out;
/* Read-modify-write DRPW_SCRATCH_START register (see next state)
to be used by DRPw FW. The RTRIM value will be added by the FW
before taking DRPw out of reset */
- clk = wl1271_read32(wl, WL12XX_DRPW_SCRATCH_START);
+ ret = wlcore_read32(wl, WL12XX_DRPW_SCRATCH_START, &clk);
+ if (ret < 0)
+ goto out;
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
if (wl->chip.id == CHIP_ID_1283_PG20)
clk |= ((selected_clock & 0x3) << 1) << 4;
else
- clk |= (wl->ref_clock << 1) << 4;
+ clk |= (priv->ref_clock << 1) << 4;
- wl1271_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
+ ret = wlcore_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
+ if (ret < 0)
+ goto out;
- wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ if (ret < 0)
+ goto out;
/* Disable interrupts */
- wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ if (ret < 0)
+ goto out;
ret = wl1271_boot_soft_reset(wl);
if (ret < 0)
@@ -987,47 +1125,72 @@ out:
return ret;
}
-static void wl12xx_pre_upload(struct wl1271 *wl)
+static int wl12xx_pre_upload(struct wl1271 *wl)
{
u32 tmp;
+ u16 polarity;
+ int ret;
/* write firmware's last address (ie. it's length) to
* ACX_EEPROMLESS_IND_REG */
wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
- wl1271_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
+ ret = wlcore_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
+ if (ret < 0)
+ goto out;
- tmp = wlcore_read_reg(wl, REG_CHIP_ID_B);
+ ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
+ if (ret < 0)
+ goto out;
wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
/* 6. read the EEPROM parameters */
- tmp = wl1271_read32(wl, WL12XX_SCR_PAD2);
+ ret = wlcore_read32(wl, WL12XX_SCR_PAD2, &tmp);
+ if (ret < 0)
+ goto out;
/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
* to upload_fw) */
- if (wl->chip.id == CHIP_ID_1283_PG20)
- wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
-}
-
-static void wl12xx_enable_interrupts(struct wl1271 *wl)
-{
- u32 polarity;
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
+ if (ret < 0)
+ goto out;
+ }
- polarity = wl12xx_top_reg_read(wl, OCP_REG_POLARITY);
+ /* polarity must be set before the firmware is loaded */
+ ret = wl12xx_top_reg_read(wl, OCP_REG_POLARITY, &polarity);
+ if (ret < 0)
+ goto out;
/* We use HIGH polarity, so unset the LOW bit */
polarity &= ~POLARITY_LOW;
- wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
+ ret = wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
+
+out:
+ return ret;
+}
- wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_ALL_EVENTS_VECTOR);
+static int wl12xx_enable_interrupts(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
+ WL12XX_ACX_ALL_EVENTS_VECTOR);
+ if (ret < 0)
+ goto out;
wlcore_enable_interrupts(wl);
- wlcore_write_reg(wl, REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
+ WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
- wl1271_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
+out:
+ return ret;
}
static int wl12xx_boot(struct wl1271 *wl)
@@ -1042,7 +1205,9 @@ static int wl12xx_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
- wl12xx_pre_upload(wl);
+ ret = wl12xx_pre_upload(wl);
+ if (ret < 0)
+ goto out;
ret = wlcore_boot_upload_firmware(wl);
if (ret < 0)
@@ -1052,22 +1217,30 @@ static int wl12xx_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
- wl12xx_enable_interrupts(wl);
+ ret = wl12xx_enable_interrupts(wl);
out:
return ret;
}
-static void wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
+static int wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
void *buf, size_t len)
{
- wl1271_write(wl, cmd_box_addr, buf, len, false);
- wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
+ int ret;
+
+ ret = wlcore_write(wl, cmd_box_addr, buf, len, false);
+ if (ret < 0)
+ return ret;
+
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
+
+ return ret;
}
-static void wl12xx_ack_event(struct wl1271 *wl)
+static int wl12xx_ack_event(struct wl1271 *wl)
{
- wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_EVENT_ACK);
+ return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
+ WL12XX_INTR_TRIG_EVENT_ACK);
}
static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
@@ -1147,12 +1320,13 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
return data_len - sizeof(*desc) - desc->pad_len;
}
-static void wl12xx_tx_delayed_compl(struct wl1271 *wl)
+static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
{
- if (wl->fw_status->tx_results_counter == (wl->tx_results_count & 0xff))
- return;
+ if (wl->fw_status_1->tx_results_counter ==
+ (wl->tx_results_count & 0xff))
+ return 0;
- wl1271_tx_complete(wl);
+ return wlcore_tx_complete(wl);
}
static int wl12xx_hw_init(struct wl1271 *wl)
@@ -1165,6 +1339,14 @@ static int wl12xx_hw_init(struct wl1271 *wl)
ret = wl128x_cmd_general_parms(wl);
if (ret < 0)
goto out;
+
+ /*
+ * If we are in calibrator based auto detect then we got the FEM nr
+ * in wl->fem_manuf. No need to continue further
+ */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ goto out;
+
ret = wl128x_cmd_radio_parms(wl);
if (ret < 0)
goto out;
@@ -1181,6 +1363,14 @@ static int wl12xx_hw_init(struct wl1271 *wl)
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
goto out;
+
+ /*
+ * If we are in calibrator based auto detect then we got the FEM nr
+ * in wl->fem_manuf. No need to continue further
+ */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ goto out;
+
ret = wl1271_cmd_radio_parms(wl);
if (ret < 0)
goto out;
@@ -1253,45 +1443,151 @@ static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
return supported;
}
-static void wl12xx_get_fuse_mac(struct wl1271 *wl)
+static int wl12xx_get_fuse_mac(struct wl1271 *wl)
{
u32 mac1, mac2;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ if (ret < 0)
+ goto out;
- wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1, &mac1);
+ if (ret < 0)
+ goto out;
- mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
- mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
+ ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2, &mac2);
+ if (ret < 0)
+ goto out;
/* these are the two parts of the BD_ADDR */
wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
((mac1 & 0xff000000) >> 24);
wl->fuse_nic_addr = mac1 & 0xffffff;
- wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
+
+out:
+ return ret;
}
-static s8 wl12xx_get_pg_ver(struct wl1271 *wl)
+static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
{
- u32 die_info;
+ u16 die_info;
+ int ret;
if (wl->chip.id == CHIP_ID_1283_PG20)
- die_info = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
+ ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1,
+ &die_info);
else
- die_info = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
+ ret = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1,
+ &die_info);
- return (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET;
+ if (ret >= 0 && ver)
+ *ver = (s8)((die_info & PG_VER_MASK) >> PG_VER_OFFSET);
+
+ return ret;
}
-static void wl12xx_get_mac(struct wl1271 *wl)
+static int wl12xx_get_mac(struct wl1271 *wl)
{
if (wl12xx_mac_in_fuse(wl))
- wl12xx_get_fuse_mac(wl);
+ return wl12xx_get_fuse_mac(wl);
+
+ return 0;
+}
+
+static void wl12xx_set_tx_desc_csum(struct wl1271 *wl,
+ struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb)
+{
+ desc->wl12xx_reserved = 0;
+}
+
+static int wl12xx_plt_init(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl->ops->boot(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl->ops->hw_init(wl);
+ if (ret < 0)
+ goto out_irq_disable;
+
+ /*
+ * If we are in calibrator based auto detect then we got the FEM nr
+ * in wl->fem_manuf. No need to continue further
+ */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ goto out;
+
+ ret = wl1271_acx_init_mem_config(wl);
+ if (ret < 0)
+ goto out_irq_disable;
+
+ ret = wl12xx_acx_mem_cfg(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Enable data path */
+ ret = wl1271_cmd_data_path(wl, 1);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Configure for CAM power saving (ie. always active) */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* configure PM */
+ ret = wl1271_acx_pm_config(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ goto out;
+
+out_free_memmap:
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
+out_irq_disable:
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ wlcore_disable_interrupts(wl);
+ mutex_lock(&wl->mutex);
+out:
+ return ret;
+}
+
+static int wl12xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
+{
+ if (is_gem)
+ return WL12XX_TX_HW_BLOCK_GEM_SPARE;
+
+ return WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
+}
+
+static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ return wlcore_set_key(wl, cmd, vif, sta, key_conf);
}
static struct wlcore_ops wl12xx_ops = {
.identify_chip = wl12xx_identify_chip,
.identify_fw = wl12xx_identify_fw,
.boot = wl12xx_boot,
+ .plt_init = wl12xx_plt_init,
.trigger_cmd = wl12xx_trigger_cmd,
.ack_event = wl12xx_ack_event,
.calc_tx_blocks = wl12xx_calc_tx_blocks,
@@ -1306,6 +1602,13 @@ static struct wlcore_ops wl12xx_ops = {
.sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
.get_pg_ver = wl12xx_get_pg_ver,
.get_mac = wl12xx_get_mac,
+ .set_tx_desc_csum = wl12xx_set_tx_desc_csum,
+ .set_rx_csum = NULL,
+ .ap_get_mimo_wide_rate_mask = NULL,
+ .debugfs_init = wl12xx_debugfs_add_files,
+ .get_spare_blocks = wl12xx_get_spare_blocks,
+ .set_key = wl12xx_set_key,
+ .pre_pkt_send = NULL,
};
static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
@@ -1323,6 +1626,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
static int __devinit wl12xx_probe(struct platform_device *pdev)
{
+ struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
struct wl1271 *wl;
struct ieee80211_hw *hw;
struct wl12xx_priv *priv;
@@ -1334,19 +1638,63 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
}
wl = hw->priv;
+ priv = wl->priv;
wl->ops = &wl12xx_ops;
wl->ptable = wl12xx_ptable;
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = 16;
- wl->normal_tx_spare = WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
- wl->gem_tx_spare = WL12XX_TX_HW_BLOCK_GEM_SPARE;
+ wl->num_rx_desc = 8;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
wl->fw_status_priv_len = 0;
- memcpy(&wl->ht_cap, &wl12xx_ht_cap, sizeof(wl12xx_ht_cap));
+ wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
+ if (!fref_param) {
+ priv->ref_clock = pdata->board_ref_clock;
+ } else {
+ if (!strcmp(fref_param, "19.2"))
+ priv->ref_clock = WL12XX_REFCLOCK_19;
+ else if (!strcmp(fref_param, "26"))
+ priv->ref_clock = WL12XX_REFCLOCK_26;
+ else if (!strcmp(fref_param, "26x"))
+ priv->ref_clock = WL12XX_REFCLOCK_26_XTAL;
+ else if (!strcmp(fref_param, "38.4"))
+ priv->ref_clock = WL12XX_REFCLOCK_38;
+ else if (!strcmp(fref_param, "38.4x"))
+ priv->ref_clock = WL12XX_REFCLOCK_38_XTAL;
+ else if (!strcmp(fref_param, "52"))
+ priv->ref_clock = WL12XX_REFCLOCK_52;
+ else
+ wl1271_error("Invalid fref parameter %s", fref_param);
+ }
+
+ if (!tcxo_param) {
+ priv->tcxo_clock = pdata->board_tcxo_clock;
+ } else {
+ if (!strcmp(tcxo_param, "19.2"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_19_2;
+ else if (!strcmp(tcxo_param, "26"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_26;
+ else if (!strcmp(tcxo_param, "38.4"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_38_4;
+ else if (!strcmp(tcxo_param, "52"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_52;
+ else if (!strcmp(tcxo_param, "16.368"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_16_368;
+ else if (!strcmp(tcxo_param, "32.736"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_32_736;
+ else if (!strcmp(tcxo_param, "16.8"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_16_8;
+ else if (!strcmp(tcxo_param, "33.6"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_33_6;
+ else
+ wl1271_error("Invalid tcxo parameter %s", tcxo_param);
+ }
+
return wlcore_probe(wl, pdev);
}
@@ -1378,6 +1726,13 @@ static void __exit wl12xx_exit(void)
}
module_exit(wl12xx_exit);
+module_param_named(fref, fref_param, charp, 0);
+MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
+
+module_param_named(tcxo, tcxo_param, charp, 0);
+MODULE_PARM_DESC(tcxo,
+ "TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 74cd332..26990fb 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -24,8 +24,30 @@
#include "conf.h"
+/* minimum FW required for driver for wl127x */
+#define WL127X_CHIP_VER 6
+#define WL127X_IFTYPE_VER 3
+#define WL127X_MAJOR_VER 10
+#define WL127X_SUBTYPE_VER 2
+#define WL127X_MINOR_VER 115
+
+/* minimum FW required for driver for wl128x */
+#define WL128X_CHIP_VER 7
+#define WL128X_IFTYPE_VER 3
+#define WL128X_MAJOR_VER 10
+#define WL128X_SUBTYPE_VER 2
+#define WL128X_MINOR_VER 115
+
+struct wl127x_rx_mem_pool_addr {
+ u32 addr;
+ u32 addr_extra;
+};
+
struct wl12xx_priv {
struct wl12xx_priv_conf conf;
+
+ int ref_clock;
+ int tcxo_clock;
};
#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/Kconfig b/drivers/net/wireless/ti/wl18xx/Kconfig
new file mode 100644
index 0000000..1cfdb25
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/Kconfig
@@ -0,0 +1,7 @@
+config WL18XX
+ tristate "TI wl18xx support"
+ depends on MAC80211
+ select WLCORE
+ ---help---
+ This module adds support for wireless adapters based on TI
+ WiLink 8 chipsets.
diff --git a/drivers/net/wireless/ti/wl18xx/Makefile b/drivers/net/wireless/ti/wl18xx/Makefile
new file mode 100644
index 0000000..67c0987
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/Makefile
@@ -0,0 +1,3 @@
+wl18xx-objs = main.o acx.o tx.o io.o debugfs.o
+
+obj-$(CONFIG_WL18XX) += wl18xx.o
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
new file mode 100644
index 0000000..72840e2
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/acx.h"
+
+#include "acx.h"
+
+int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
+ u32 sdio_blk_size, u32 extra_mem_blks,
+ u32 len_field_size)
+{
+ struct wl18xx_acx_host_config_bitmap *bitmap_conf;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx cfg bitmap %d blk %d spare %d field %d",
+ host_cfg_bitmap, sdio_blk_size, extra_mem_blks,
+ len_field_size);
+
+ bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
+ if (!bitmap_conf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
+ bitmap_conf->host_sdio_block_size = cpu_to_le32(sdio_blk_size);
+ bitmap_conf->extra_mem_blocks = cpu_to_le32(extra_mem_blks);
+ bitmap_conf->length_field_size = cpu_to_le32(len_field_size);
+
+ ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
+ bitmap_conf, sizeof(*bitmap_conf));
+ if (ret < 0) {
+ wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(bitmap_conf);
+
+ return ret;
+}
+
+int wl18xx_acx_set_checksum_state(struct wl1271 *wl)
+{
+ struct wl18xx_acx_checksum_state *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx checksum state");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED;
+
+ ret = wl1271_cmd_configure(wl, ACX_CHECKSUM_CONFIG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("failed to set Tx checksum state: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl18xx_acx_clear_statistics(struct wl1271 *wl)
+{
+ struct wl18xx_acx_clear_statistics *acx;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx clear statistics");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl1271_cmd_configure(wl, ACX_CLEAR_STATISTICS, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("failed to clear firmware statistics: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
new file mode 100644
index 0000000..e2609a6
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -0,0 +1,287 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_ACX_H__
+#define __WL18XX_ACX_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/acx.h"
+
+enum {
+ ACX_CLEAR_STATISTICS = 0x0047,
+};
+
+/* numbers of bits the length field takes (add 1 for the actual number) */
+#define WL18XX_HOST_IF_LEN_SIZE_FIELD 15
+
+#define WL18XX_ACX_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_INIT_COMPLETE | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_CMD_COMPLETE | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA | \
+ WL1271_ACX_SW_INTR_WATCHDOG)
+
+#define WL18XX_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA | \
+ WL1271_ACX_SW_INTR_WATCHDOG)
+
+struct wl18xx_acx_host_config_bitmap {
+ struct acx_header header;
+
+ __le32 host_cfg_bitmap;
+
+ __le32 host_sdio_block_size;
+
+ /* extra mem blocks per frame in TX. */
+ __le32 extra_mem_blocks;
+
+ /*
+ * number of bits of the length field in the first TX word
+ * (up to 15 - for using the entire 16 bits).
+ */
+ __le32 length_field_size;
+
+} __packed;
+
+enum {
+ CHECKSUM_OFFLOAD_DISABLED = 0,
+ CHECKSUM_OFFLOAD_ENABLED = 1,
+ CHECKSUM_OFFLOAD_FAKE_RX = 2,
+ CHECKSUM_OFFLOAD_INVALID = 0xFF
+};
+
+struct wl18xx_acx_checksum_state {
+ struct acx_header header;
+
+ /* enum acx_checksum_state */
+ u8 checksum_state;
+ u8 pad[3];
+} __packed;
+
+
+struct wl18xx_acx_error_stats {
+ u32 error_frame;
+ u32 error_null_Frame_tx_start;
+ u32 error_numll_frame_cts_start;
+ u32 error_bar_retry;
+ u32 error_frame_cts_nul_flid;
+} __packed;
+
+struct wl18xx_acx_debug_stats {
+ u32 debug1;
+ u32 debug2;
+ u32 debug3;
+ u32 debug4;
+ u32 debug5;
+ u32 debug6;
+} __packed;
+
+struct wl18xx_acx_ring_stats {
+ u32 prepared_descs;
+ u32 tx_cmplt;
+} __packed;
+
+struct wl18xx_acx_tx_stats {
+ u32 tx_prepared_descs;
+ u32 tx_cmplt;
+ u32 tx_template_prepared;
+ u32 tx_data_prepared;
+ u32 tx_template_programmed;
+ u32 tx_data_programmed;
+ u32 tx_burst_programmed;
+ u32 tx_starts;
+ u32 tx_imm_resp;
+ u32 tx_start_templates;
+ u32 tx_start_int_templates;
+ u32 tx_start_fw_gen;
+ u32 tx_start_data;
+ u32 tx_start_null_frame;
+ u32 tx_exch;
+ u32 tx_retry_template;
+ u32 tx_retry_data;
+ u32 tx_exch_pending;
+ u32 tx_exch_expiry;
+ u32 tx_done_template;
+ u32 tx_done_data;
+ u32 tx_done_int_template;
+ u32 tx_frame_checksum;
+ u32 tx_checksum_result;
+ u32 frag_called;
+ u32 frag_mpdu_alloc_failed;
+ u32 frag_init_called;
+ u32 frag_in_process_called;
+ u32 frag_tkip_called;
+ u32 frag_key_not_found;
+ u32 frag_need_fragmentation;
+ u32 frag_bad_mblk_num;
+ u32 frag_failed;
+ u32 frag_cache_hit;
+ u32 frag_cache_miss;
+} __packed;
+
+struct wl18xx_acx_rx_stats {
+ u32 rx_beacon_early_term;
+ u32 rx_out_of_mpdu_nodes;
+ u32 rx_hdr_overflow;
+ u32 rx_dropped_frame;
+ u32 rx_done_stage;
+ u32 rx_done;
+ u32 rx_defrag;
+ u32 rx_defrag_end;
+ u32 rx_cmplt;
+ u32 rx_pre_complt;
+ u32 rx_cmplt_task;
+ u32 rx_phy_hdr;
+ u32 rx_timeout;
+ u32 rx_timeout_wa;
+ u32 rx_wa_density_dropped_frame;
+ u32 rx_wa_ba_not_expected;
+ u32 rx_frame_checksum;
+ u32 rx_checksum_result;
+ u32 defrag_called;
+ u32 defrag_init_called;
+ u32 defrag_in_process_called;
+ u32 defrag_tkip_called;
+ u32 defrag_need_defrag;
+ u32 defrag_decrypt_failed;
+ u32 decrypt_key_not_found;
+ u32 defrag_need_decrypt;
+ u32 rx_tkip_replays;
+} __packed;
+
+struct wl18xx_acx_isr_stats {
+ u32 irqs;
+} __packed;
+
+#define PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD 10
+
+struct wl18xx_acx_pwr_stats {
+ u32 missing_bcns_cnt;
+ u32 rcvd_bcns_cnt;
+ u32 connection_out_of_sync;
+ u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
+ u32 rcvd_awake_bcns_cnt;
+} __packed;
+
+struct wl18xx_acx_event_stats {
+ u32 calibration;
+ u32 rx_mismatch;
+ u32 rx_mem_empty;
+} __packed;
+
+struct wl18xx_acx_ps_poll_stats {
+ u32 ps_poll_timeouts;
+ u32 upsd_timeouts;
+ u32 upsd_max_ap_turn;
+ u32 ps_poll_max_ap_turn;
+ u32 ps_poll_utilization;
+ u32 upsd_utilization;
+} __packed;
+
+struct wl18xx_acx_rx_filter_stats {
+ u32 beacon_filter;
+ u32 arp_filter;
+ u32 mc_filter;
+ u32 dup_filter;
+ u32 data_filter;
+ u32 ibss_filter;
+ u32 protection_filter;
+ u32 accum_arp_pend_requests;
+ u32 max_arp_queue_dep;
+} __packed;
+
+struct wl18xx_acx_rx_rate_stats {
+ u32 rx_frames_per_rates[50];
+} __packed;
+
+#define AGGR_STATS_TX_AGG 16
+#define AGGR_STATS_TX_RATE 16
+#define AGGR_STATS_RX_SIZE_LEN 16
+
+struct wl18xx_acx_aggr_stats {
+ u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
+ u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
+} __packed;
+
+#define PIPE_STATS_HW_FIFO 11
+
+struct wl18xx_acx_pipeline_stats {
+ u32 hs_tx_stat_fifo_int;
+ u32 hs_rx_stat_fifo_int;
+ u32 tcp_tx_stat_fifo_int;
+ u32 tcp_rx_stat_fifo_int;
+ u32 enc_tx_stat_fifo_int;
+ u32 enc_rx_stat_fifo_int;
+ u32 rx_complete_stat_fifo_int;
+ u32 pre_proc_swi;
+ u32 post_proc_swi;
+ u32 sec_frag_swi;
+ u32 pre_to_defrag_swi;
+ u32 defrag_to_csum_swi;
+ u32 csum_to_rx_xfer_swi;
+ u32 dec_packet_in;
+ u32 dec_packet_in_fifo_full;
+ u32 dec_packet_out;
+ u32 cs_rx_packet_in;
+ u32 cs_rx_packet_out;
+ u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
+} __packed;
+
+struct wl18xx_acx_mem_stats {
+ u32 rx_free_mem_blks;
+ u32 tx_free_mem_blks;
+ u32 fwlog_free_mem_blks;
+ u32 fw_gen_free_mem_blks;
+} __packed;
+
+struct wl18xx_acx_statistics {
+ struct acx_header header;
+
+ struct wl18xx_acx_error_stats error;
+ struct wl18xx_acx_debug_stats debug;
+ struct wl18xx_acx_tx_stats tx;
+ struct wl18xx_acx_rx_stats rx;
+ struct wl18xx_acx_isr_stats isr;
+ struct wl18xx_acx_pwr_stats pwr;
+ struct wl18xx_acx_ps_poll_stats ps_poll;
+ struct wl18xx_acx_rx_filter_stats rx_filter;
+ struct wl18xx_acx_rx_rate_stats rx_rate;
+ struct wl18xx_acx_aggr_stats aggr_size;
+ struct wl18xx_acx_pipeline_stats pipeline;
+ struct wl18xx_acx_mem_stats mem;
+} __packed;
+
+struct wl18xx_acx_clear_statistics {
+ struct acx_header header;
+};
+
+int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
+ u32 sdio_blk_size, u32 extra_mem_blks,
+ u32 len_field_size);
+int wl18xx_acx_set_checksum_state(struct wl1271 *wl);
+int wl18xx_acx_clear_statistics(struct wl1271 *wl);
+
+#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h
new file mode 100644
index 0000000..4d426cc
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/conf.h
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_CONF_H__
+#define __WL18XX_CONF_H__
+
+#define WL18XX_CONF_MAGIC 0x10e100ca
+#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0003)
+#define WL18XX_CONF_MASK 0x0000ffff
+#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \
+ sizeof(struct wl18xx_priv_conf))
+
+#define NUM_OF_CHANNELS_11_ABG 150
+#define NUM_OF_CHANNELS_11_P 7
+#define WL18XX_NUM_OF_SUB_BANDS 9
+#define SRF_TABLE_LEN 16
+#define PIN_MUXING_SIZE 2
+
+struct wl18xx_mac_and_phy_params {
+ u8 phy_standalone;
+ u8 rdl;
+ u8 enable_clpc;
+ u8 enable_tx_low_pwr_on_siso_rdl;
+ u8 auto_detect;
+ u8 dedicated_fem;
+
+ u8 low_band_component;
+
+ /* Bit 0: One Hot, Bit 1: Control Enable, Bit 2: 1.8V, Bit 3: 3V */
+ u8 low_band_component_type;
+
+ u8 high_band_component;
+
+ /* Bit 0: One Hot, Bit 1: Control Enable, Bit 2: 1.8V, Bit 3: 3V */
+ u8 high_band_component_type;
+ u8 number_of_assembled_ant2_4;
+ u8 number_of_assembled_ant5;
+ u8 pin_muxing_platform_options[PIN_MUXING_SIZE];
+ u8 external_pa_dc2dc;
+ u8 tcxo_ldo_voltage;
+ u8 xtal_itrim_val;
+ u8 srf_state;
+ u8 srf1[SRF_TABLE_LEN];
+ u8 srf2[SRF_TABLE_LEN];
+ u8 srf3[SRF_TABLE_LEN];
+ u8 io_configuration;
+ u8 sdio_configuration;
+ u8 settings;
+ u8 rx_profile;
+ u8 per_chan_pwr_limit_arr_11abg[NUM_OF_CHANNELS_11_ABG];
+ u8 pwr_limit_reference_11_abg;
+ u8 per_chan_pwr_limit_arr_11p[NUM_OF_CHANNELS_11_P];
+ u8 pwr_limit_reference_11p;
+ u8 per_sub_band_tx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
+ u8 per_sub_band_rx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
+ u8 primary_clock_setting_time;
+ u8 clock_valid_on_wake_up;
+ u8 secondary_clock_setting_time;
+ u8 board_type;
+ /* enable point saturation */
+ u8 psat;
+ /* low/medium/high Tx power in dBm */
+ s8 low_power_val;
+ s8 med_power_val;
+ s8 high_power_val;
+ u8 padding[1];
+} __packed;
+
+enum wl18xx_ht_mode {
+ /* Default - use MIMO, fallback to SISO20 */
+ HT_MODE_DEFAULT = 0,
+
+ /* Wide - use SISO40 */
+ HT_MODE_WIDE = 1,
+
+ /* Use SISO20 */
+ HT_MODE_SISO20 = 2,
+};
+
+struct wl18xx_ht_settings {
+ /* DEFAULT / WIDE / SISO20 */
+ u8 mode;
+} __packed;
+
+struct wl18xx_priv_conf {
+ /* Module params structures */
+ struct wl18xx_ht_settings ht;
+
+ /* this structure is copied wholesale to FW */
+ struct wl18xx_mac_and_phy_params phy;
+} __packed;
+
+#endif /* __WL18XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
new file mode 100644
index 0000000..3ce6f10
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -0,0 +1,403 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/debugfs.h"
+#include "../wlcore/wlcore.h"
+
+#include "wl18xx.h"
+#include "acx.h"
+#include "debugfs.h"
+
+#define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
+ DEBUGFS_FWSTATS_FILE(a, b, c, wl18xx_acx_statistics)
+#define WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c) \
+ DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
+
+
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_prepared, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_prepared, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_in_process_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_tkip_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_key_not_found, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_need_fragmentation, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_bad_mblk_num, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_failed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_hit, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_miss, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_beacon_early_term, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_out_of_mpdu_nodes, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_hdr_overflow, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_dropped_frame, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_done, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag_end, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_tkip_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_defrag, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_bcns_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
+ PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
+
+
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, mc_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, dup_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, data_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, ibss_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
+ AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
+ AGGR_STATS_RX_SIZE_LEN);
+
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
+ PIPE_STATS_HW_FIFO);
+
+WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
+
+static ssize_t conf_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl18xx_priv *priv = wl->priv;
+ struct wlcore_conf_header header;
+ char *buf, *pos;
+ size_t len;
+ int ret;
+
+ len = WL18XX_CONF_SIZE;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ header.magic = cpu_to_le32(WL18XX_CONF_MAGIC);
+ header.version = cpu_to_le32(WL18XX_CONF_VERSION);
+ header.checksum = 0;
+
+ mutex_lock(&wl->mutex);
+
+ pos = buf;
+ memcpy(pos, &header, sizeof(header));
+ pos += sizeof(header);
+ memcpy(pos, &wl->conf, sizeof(wl->conf));
+ pos += sizeof(wl->conf);
+ memcpy(pos, &priv->conf, sizeof(priv->conf));
+
+ mutex_unlock(&wl->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations conf_ops = {
+ .read = conf_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t clear_fw_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ ret = wl18xx_acx_clear_statistics(wl);
+ if (ret < 0) {
+ count = ret;
+ goto out;
+ }
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations clear_fw_stats_ops = {
+ .write = clear_fw_stats_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+int wl18xx_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
+{
+ int ret = 0;
+ struct dentry *entry, *stats, *moddir;
+
+ moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
+ if (!moddir || IS_ERR(moddir)) {
+ entry = moddir;
+ goto err;
+ }
+
+ stats = debugfs_create_dir("fw_stats", moddir);
+ if (!stats || IS_ERR(stats)) {
+ entry = stats;
+ goto err;
+ }
+
+ DEBUGFS_ADD(clear_fw_stats, stats);
+
+ DEBUGFS_FWSTATS_ADD(debug, debug1);
+ DEBUGFS_FWSTATS_ADD(debug, debug2);
+ DEBUGFS_FWSTATS_ADD(debug, debug3);
+ DEBUGFS_FWSTATS_ADD(debug, debug4);
+ DEBUGFS_FWSTATS_ADD(debug, debug5);
+ DEBUGFS_FWSTATS_ADD(debug, debug6);
+
+ DEBUGFS_FWSTATS_ADD(error, error_frame);
+ DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
+ DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
+ DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
+ DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
+
+ DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
+ DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
+ DEBUGFS_FWSTATS_ADD(tx, tx_template_prepared);
+ DEBUGFS_FWSTATS_ADD(tx, tx_data_prepared);
+ DEBUGFS_FWSTATS_ADD(tx, tx_template_programmed);
+ DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
+ DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
+ DEBUGFS_FWSTATS_ADD(tx, tx_starts);
+ DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_data);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_null_frame);
+ DEBUGFS_FWSTATS_ADD(tx, tx_exch);
+ DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
+ DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
+ DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
+ DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
+ DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
+ DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
+ DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
+ DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
+ DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
+ DEBUGFS_FWSTATS_ADD(tx, frag_called);
+ DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
+ DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
+ DEBUGFS_FWSTATS_ADD(tx, frag_in_process_called);
+ DEBUGFS_FWSTATS_ADD(tx, frag_tkip_called);
+ DEBUGFS_FWSTATS_ADD(tx, frag_key_not_found);
+ DEBUGFS_FWSTATS_ADD(tx, frag_need_fragmentation);
+ DEBUGFS_FWSTATS_ADD(tx, frag_bad_mblk_num);
+ DEBUGFS_FWSTATS_ADD(tx, frag_failed);
+ DEBUGFS_FWSTATS_ADD(tx, frag_cache_hit);
+ DEBUGFS_FWSTATS_ADD(tx, frag_cache_miss);
+
+ DEBUGFS_FWSTATS_ADD(rx, rx_beacon_early_term);
+ DEBUGFS_FWSTATS_ADD(rx, rx_out_of_mpdu_nodes);
+ DEBUGFS_FWSTATS_ADD(rx, rx_hdr_overflow);
+ DEBUGFS_FWSTATS_ADD(rx, rx_dropped_frame);
+ DEBUGFS_FWSTATS_ADD(rx, rx_done);
+ DEBUGFS_FWSTATS_ADD(rx, rx_defrag);
+ DEBUGFS_FWSTATS_ADD(rx, rx_defrag_end);
+ DEBUGFS_FWSTATS_ADD(rx, rx_cmplt);
+ DEBUGFS_FWSTATS_ADD(rx, rx_pre_complt);
+ DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
+ DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
+ DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
+ DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
+ DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
+ DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
+ DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
+ DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_called);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_tkip_called);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_need_defrag);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_decrypt_failed);
+ DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
+ DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
+
+ DEBUGFS_FWSTATS_ADD(isr, irqs);
+
+ DEBUGFS_FWSTATS_ADD(pwr, missing_bcns_cnt);
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_bcns_cnt);
+ DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
+ DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
+
+ DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
+ DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
+ DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
+ DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
+
+ DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, mc_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, dup_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, data_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, ibss_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, protection_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, accum_arp_pend_requests);
+ DEBUGFS_FWSTATS_ADD(rx_filter, max_arp_queue_dep);
+
+ DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
+
+ DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
+ DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
+
+ DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, pre_proc_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
+ DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
+ DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
+ DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
+ DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
+ DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
+
+ DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
+ DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
+ DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
+ DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
+
+ DEBUGFS_ADD(conf, moddir);
+
+ return 0;
+
+err:
+ if (IS_ERR(entry))
+ ret = PTR_ERR(entry);
+ else
+ ret = -ENOMEM;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.h b/drivers/net/wireless/ti/wl18xx/debugfs.h
new file mode 100644
index 0000000..ed679be
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_DEBUGFS_H__
+#define __WL18XX_DEBUGFS_H__
+
+int wl18xx_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir);
+
+#endif /* __WL18XX_DEBUGFS_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/io.c b/drivers/net/wireless/ti/wl18xx/io.c
new file mode 100644
index 0000000..f0abf3e
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/io.c
@@ -0,0 +1,75 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/io.h"
+
+#include "io.h"
+
+int wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
+{
+ u32 tmp;
+ int ret;
+
+ if (WARN_ON(addr % 2))
+ return -EINVAL;
+
+ if ((addr % 4) == 0) {
+ ret = wlcore_read32(wl, addr, &tmp);
+ if (ret < 0)
+ goto out;
+
+ tmp = (tmp & 0xffff0000) | val;
+ ret = wlcore_write32(wl, addr, tmp);
+ } else {
+ ret = wlcore_read32(wl, addr - 2, &tmp);
+ if (ret < 0)
+ goto out;
+
+ tmp = (tmp & 0xffff) | (val << 16);
+ ret = wlcore_write32(wl, addr - 2, tmp);
+ }
+
+out:
+ return ret;
+}
+
+int wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out)
+{
+ u32 val = 0;
+ int ret;
+
+ if (WARN_ON(addr % 2))
+ return -EINVAL;
+
+ if ((addr % 4) == 0) {
+ /* address is 4-bytes aligned */
+ ret = wlcore_read32(wl, addr, &val);
+ if (ret >= 0 && out)
+ *out = val & 0xffff;
+ } else {
+ ret = wlcore_read32(wl, addr - 2, &val);
+ if (ret >= 0 && out)
+ *out = (val & 0xffff0000) >> 16;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/io.h b/drivers/net/wireless/ti/wl18xx/io.h
new file mode 100644
index 0000000..c32ae30
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/io.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_IO_H__
+#define __WL18XX_IO_H__
+
+int __must_check wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+int __must_check wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out);
+
+#endif /* __WL18XX_IO_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
new file mode 100644
index 0000000..69042bb
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -0,0 +1,1610 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ip.h>
+#include <linux/firmware.h>
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/io.h"
+#include "../wlcore/acx.h"
+#include "../wlcore/tx.h"
+#include "../wlcore/rx.h"
+#include "../wlcore/io.h"
+#include "../wlcore/boot.h"
+
+#include "reg.h"
+#include "conf.h"
+#include "acx.h"
+#include "tx.h"
+#include "wl18xx.h"
+#include "io.h"
+#include "debugfs.h"
+
+#define WL18XX_RX_CHECKSUM_MASK 0x40
+
+static char *ht_mode_param = NULL;
+static char *board_type_param = NULL;
+static bool checksum_param = false;
+static bool enable_11a_param = true;
+static int num_rx_desc_param = -1;
+
+/* phy paramters */
+static int dc2dc_param = -1;
+static int n_antennas_2_param = -1;
+static int n_antennas_5_param = -1;
+static int low_band_component_param = -1;
+static int low_band_component_type_param = -1;
+static int high_band_component_param = -1;
+static int high_band_component_type_param = -1;
+static int pwr_limit_reference_11_abg_param = -1;
+
+static const u8 wl18xx_rate_to_idx_2ghz[] = {
+ /* MCS rates are used only with 11n */
+ 15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
+ 14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
+ 13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
+ 12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
+ 11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
+ 10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
+ 9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
+ 8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
+ 7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
+ 6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
+ 5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
+ 4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
+ 3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
+ 2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
+ 1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
+ 0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
+
+ 11, /* WL18XX_CONF_HW_RXTX_RATE_54 */
+ 10, /* WL18XX_CONF_HW_RXTX_RATE_48 */
+ 9, /* WL18XX_CONF_HW_RXTX_RATE_36 */
+ 8, /* WL18XX_CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
+
+ 7, /* WL18XX_CONF_HW_RXTX_RATE_18 */
+ 6, /* WL18XX_CONF_HW_RXTX_RATE_12 */
+ 3, /* WL18XX_CONF_HW_RXTX_RATE_11 */
+ 5, /* WL18XX_CONF_HW_RXTX_RATE_9 */
+ 4, /* WL18XX_CONF_HW_RXTX_RATE_6 */
+ 2, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
+ 1, /* WL18XX_CONF_HW_RXTX_RATE_2 */
+ 0 /* WL18XX_CONF_HW_RXTX_RATE_1 */
+};
+
+static const u8 wl18xx_rate_to_idx_5ghz[] = {
+ /* MCS rates are used only with 11n */
+ 15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
+ 14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
+ 13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
+ 12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
+ 11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
+ 10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
+ 9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
+ 8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
+ 7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
+ 6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
+ 5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
+ 4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
+ 3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
+ 2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
+ 1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
+ 0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
+
+ 7, /* WL18XX_CONF_HW_RXTX_RATE_54 */
+ 6, /* WL18XX_CONF_HW_RXTX_RATE_48 */
+ 5, /* WL18XX_CONF_HW_RXTX_RATE_36 */
+ 4, /* WL18XX_CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
+
+ 3, /* WL18XX_CONF_HW_RXTX_RATE_18 */
+ 2, /* WL18XX_CONF_HW_RXTX_RATE_12 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_11 */
+ 1, /* WL18XX_CONF_HW_RXTX_RATE_9 */
+ 0, /* WL18XX_CONF_HW_RXTX_RATE_6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_1 */
+};
+
+static const u8 *wl18xx_band_rate_to_idx[] = {
+ [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
+ [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
+};
+
+enum wl18xx_hw_rates {
+ WL18XX_CONF_HW_RXTX_RATE_MCS15 = 0,
+ WL18XX_CONF_HW_RXTX_RATE_MCS14,
+ WL18XX_CONF_HW_RXTX_RATE_MCS13,
+ WL18XX_CONF_HW_RXTX_RATE_MCS12,
+ WL18XX_CONF_HW_RXTX_RATE_MCS11,
+ WL18XX_CONF_HW_RXTX_RATE_MCS10,
+ WL18XX_CONF_HW_RXTX_RATE_MCS9,
+ WL18XX_CONF_HW_RXTX_RATE_MCS8,
+ WL18XX_CONF_HW_RXTX_RATE_MCS7,
+ WL18XX_CONF_HW_RXTX_RATE_MCS6,
+ WL18XX_CONF_HW_RXTX_RATE_MCS5,
+ WL18XX_CONF_HW_RXTX_RATE_MCS4,
+ WL18XX_CONF_HW_RXTX_RATE_MCS3,
+ WL18XX_CONF_HW_RXTX_RATE_MCS2,
+ WL18XX_CONF_HW_RXTX_RATE_MCS1,
+ WL18XX_CONF_HW_RXTX_RATE_MCS0,
+ WL18XX_CONF_HW_RXTX_RATE_54,
+ WL18XX_CONF_HW_RXTX_RATE_48,
+ WL18XX_CONF_HW_RXTX_RATE_36,
+ WL18XX_CONF_HW_RXTX_RATE_24,
+ WL18XX_CONF_HW_RXTX_RATE_22,
+ WL18XX_CONF_HW_RXTX_RATE_18,
+ WL18XX_CONF_HW_RXTX_RATE_12,
+ WL18XX_CONF_HW_RXTX_RATE_11,
+ WL18XX_CONF_HW_RXTX_RATE_9,
+ WL18XX_CONF_HW_RXTX_RATE_6,
+ WL18XX_CONF_HW_RXTX_RATE_5_5,
+ WL18XX_CONF_HW_RXTX_RATE_2,
+ WL18XX_CONF_HW_RXTX_RATE_1,
+ WL18XX_CONF_HW_RXTX_RATE_MAX,
+};
+
+static struct wlcore_conf wl18xx_conf = {
+ .sg = {
+ .params = {
+ [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
+ [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
+ [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
+ [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
+ [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
+ [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
+ [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
+ [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
+ [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
+ [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
+ [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
+ [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
+ [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
+ [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
+ [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
+ [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
+ [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
+ [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
+ /* active scan params */
+ [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
+ /* passive scan params */
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
+ /* passive scan in dual antenna params */
+ [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
+ [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
+ [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
+ /* general params */
+ [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
+ [CONF_SG_ANTENNA_CONFIGURATION] = 0,
+ [CONF_SG_BEACON_MISS_PERCENT] = 60,
+ [CONF_SG_DHCP_TIME] = 5000,
+ [CONF_SG_RXT] = 1200,
+ [CONF_SG_TXT] = 1000,
+ [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
+ [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
+ [CONF_SG_HV3_MAX_SERVED] = 6,
+ [CONF_SG_PS_POLL_TIMEOUT] = 10,
+ [CONF_SG_UPSD_TIMEOUT] = 10,
+ [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
+ [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
+ [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
+ /* AP params */
+ [CONF_AP_BEACON_MISS_TX] = 3,
+ [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
+ [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
+ [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
+ [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
+ [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
+ /* CTS Diluting params */
+ [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
+ [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
+ },
+ .state = CONF_SG_PROTECTIVE,
+ },
+ .rx = {
+ .rx_msdu_life_time = 512000,
+ .packet_detection_threshold = 0,
+ .ps_poll_timeout = 15,
+ .upsd_timeout = 15,
+ .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
+ .rx_cca_threshold = 0,
+ .irq_blk_threshold = 0xFFFF,
+ .irq_pkt_threshold = 0,
+ .irq_timeout = 600,
+ .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
+ },
+ .tx = {
+ .tx_energy_detection = 0,
+ .sta_rc_conf = {
+ .enabled_rates = 0,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ac_conf_count = 4,
+ .ac_conf = {
+ [CONF_TX_AC_BE] = {
+ .ac = CONF_TX_AC_BE,
+ .cw_min = 15,
+ .cw_max = 63,
+ .aifsn = 3,
+ .tx_op_limit = 0,
+ },
+ [CONF_TX_AC_BK] = {
+ .ac = CONF_TX_AC_BK,
+ .cw_min = 15,
+ .cw_max = 63,
+ .aifsn = 7,
+ .tx_op_limit = 0,
+ },
+ [CONF_TX_AC_VI] = {
+ .ac = CONF_TX_AC_VI,
+ .cw_min = 15,
+ .cw_max = 63,
+ .aifsn = CONF_TX_AIFS_PIFS,
+ .tx_op_limit = 3008,
+ },
+ [CONF_TX_AC_VO] = {
+ .ac = CONF_TX_AC_VO,
+ .cw_min = 15,
+ .cw_max = 63,
+ .aifsn = CONF_TX_AIFS_PIFS,
+ .tx_op_limit = 1504,
+ },
+ },
+ .max_tx_retries = 100,
+ .ap_aging_period = 300,
+ .tid_conf_count = 4,
+ .tid_conf = {
+ [CONF_TX_AC_BE] = {
+ .queue_id = CONF_TX_AC_BE,
+ .channel_type = CONF_CHANNEL_TYPE_EDCF,
+ .tsid = CONF_TX_AC_BE,
+ .ps_scheme = CONF_PS_SCHEME_LEGACY,
+ .ack_policy = CONF_ACK_POLICY_LEGACY,
+ .apsd_conf = {0, 0},
+ },
+ [CONF_TX_AC_BK] = {
+ .queue_id = CONF_TX_AC_BK,
+ .channel_type = CONF_CHANNEL_TYPE_EDCF,
+ .tsid = CONF_TX_AC_BK,
+ .ps_scheme = CONF_PS_SCHEME_LEGACY,
+ .ack_policy = CONF_ACK_POLICY_LEGACY,
+ .apsd_conf = {0, 0},
+ },
+ [CONF_TX_AC_VI] = {
+ .queue_id = CONF_TX_AC_VI,
+ .channel_type = CONF_CHANNEL_TYPE_EDCF,
+ .tsid = CONF_TX_AC_VI,
+ .ps_scheme = CONF_PS_SCHEME_LEGACY,
+ .ack_policy = CONF_ACK_POLICY_LEGACY,
+ .apsd_conf = {0, 0},
+ },
+ [CONF_TX_AC_VO] = {
+ .queue_id = CONF_TX_AC_VO,
+ .channel_type = CONF_CHANNEL_TYPE_EDCF,
+ .tsid = CONF_TX_AC_VO,
+ .ps_scheme = CONF_PS_SCHEME_LEGACY,
+ .ack_policy = CONF_ACK_POLICY_LEGACY,
+ .apsd_conf = {0, 0},
+ },
+ },
+ .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
+ .tx_compl_timeout = 350,
+ .tx_compl_threshold = 10,
+ .basic_rate = CONF_HW_BIT_RATE_1MBPS,
+ .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
+ .tmpl_short_retry_limit = 10,
+ .tmpl_long_retry_limit = 10,
+ .tx_watchdog_timeout = 5000,
+ },
+ .conn = {
+ .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
+ .listen_interval = 1,
+ .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
+ .suspend_listen_interval = 3,
+ .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
+ .bcn_filt_ie_count = 3,
+ .bcn_filt_ie = {
+ [0] = {
+ .ie = WLAN_EID_CHANNEL_SWITCH,
+ .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
+ },
+ [1] = {
+ .ie = WLAN_EID_HT_OPERATION,
+ .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
+ },
+ [2] = {
+ .ie = WLAN_EID_ERP_INFO,
+ .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
+ },
+ },
+ .synch_fail_thold = 12,
+ .bss_lose_timeout = 400,
+ .beacon_rx_timeout = 10000,
+ .broadcast_timeout = 20000,
+ .rx_broadcast_in_ps = 1,
+ .ps_poll_threshold = 10,
+ .bet_enable = CONF_BET_MODE_ENABLE,
+ .bet_max_consecutive = 50,
+ .psm_entry_retries = 8,
+ .psm_exit_retries = 16,
+ .psm_entry_nullfunc_retries = 3,
+ .dynamic_ps_timeout = 1500,
+ .forced_ps = false,
+ .keep_alive_interval = 55000,
+ .max_listen_interval = 20,
+ .sta_sleep_auth = WL1271_PSM_ILLEGAL,
+ },
+ .itrim = {
+ .enable = false,
+ .timeout = 50000,
+ },
+ .pm_config = {
+ .host_clk_settling_time = 5000,
+ .host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
+ },
+ .roam_trigger = {
+ .trigger_pacing = 1,
+ .avg_weight_rssi_beacon = 20,
+ .avg_weight_rssi_data = 10,
+ .avg_weight_snr_beacon = 20,
+ .avg_weight_snr_data = 10,
+ },
+ .scan = {
+ .min_dwell_time_active = 7500,
+ .max_dwell_time_active = 30000,
+ .min_dwell_time_passive = 100000,
+ .max_dwell_time_passive = 100000,
+ .num_probe_reqs = 2,
+ .split_scan_timeout = 50000,
+ },
+ .sched_scan = {
+ /*
+ * Values are in TU/1000 but since sched scan FW command
+ * params are in TUs rounding up may occur.
+ */
+ .base_dwell_time = 7500,
+ .max_dwell_time_delta = 22500,
+ /* based on 250bits per probe @1Mbps */
+ .dwell_time_delta_per_probe = 2000,
+ /* based on 250bits per probe @6Mbps (plus a bit more) */
+ .dwell_time_delta_per_probe_5 = 350,
+ .dwell_time_passive = 100000,
+ .dwell_time_dfs = 150000,
+ .num_probe_reqs = 2,
+ .rssi_threshold = -90,
+ .snr_threshold = 0,
+ },
+ .ht = {
+ .rx_ba_win_size = 10,
+ .tx_ba_win_size = 64,
+ .inactivity_timeout = 10000,
+ .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
+ },
+ .mem = {
+ .num_stations = 1,
+ .ssid_profiles = 1,
+ .rx_block_num = 40,
+ .tx_min_block_num = 40,
+ .dynamic_memory = 1,
+ .min_req_tx_blocks = 45,
+ .min_req_rx_blocks = 22,
+ .tx_min = 27,
+ },
+ .fm_coex = {
+ .enable = true,
+ .swallow_period = 5,
+ .n_divider_fref_set_1 = 0xff, /* default */
+ .n_divider_fref_set_2 = 12,
+ .m_divider_fref_set_1 = 0xffff,
+ .m_divider_fref_set_2 = 148, /* default */
+ .coex_pll_stabilization_time = 0xffffffff, /* default */
+ .ldo_stabilization_time = 0xffff, /* default */
+ .fm_disturbed_band_margin = 0xff, /* default */
+ .swallow_clk_diff = 0xff, /* default */
+ },
+ .rx_streaming = {
+ .duration = 150,
+ .queues = 0x1,
+ .interval = 20,
+ .always = 0,
+ },
+ .fwlog = {
+ .mode = WL12XX_FWLOG_ON_DEMAND,
+ .mem_blocks = 2,
+ .severity = 0,
+ .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
+ .output = WL12XX_FWLOG_OUTPUT_HOST,
+ .threshold = 0,
+ },
+ .rate = {
+ .rate_retry_score = 32000,
+ .per_add = 8192,
+ .per_th1 = 2048,
+ .per_th2 = 4096,
+ .max_per = 8100,
+ .inverse_curiosity_factor = 5,
+ .tx_fail_low_th = 4,
+ .tx_fail_high_th = 10,
+ .per_alpha_shift = 4,
+ .per_add_shift = 13,
+ .per_beta1_shift = 10,
+ .per_beta2_shift = 8,
+ .rate_check_up = 2,
+ .rate_check_down = 12,
+ .rate_retry_policy = {
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+ },
+ },
+ .hangover = {
+ .recover_time = 0,
+ .hangover_period = 20,
+ .dynamic_mode = 1,
+ .early_termination_mode = 1,
+ .max_period = 20,
+ .min_period = 1,
+ .increase_delta = 1,
+ .decrease_delta = 2,
+ .quiet_time = 4,
+ .increase_time = 1,
+ .window_size = 16,
+ },
+};
+
+static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
+ .ht = {
+ .mode = HT_MODE_DEFAULT,
+ },
+ .phy = {
+ .phy_standalone = 0x00,
+ .primary_clock_setting_time = 0x05,
+ .clock_valid_on_wake_up = 0x00,
+ .secondary_clock_setting_time = 0x05,
+ .board_type = BOARD_TYPE_HDK_18XX,
+ .rdl = 0x01,
+ .auto_detect = 0x00,
+ .dedicated_fem = FEM_NONE,
+ .low_band_component = COMPONENT_2_WAY_SWITCH,
+ .low_band_component_type = 0x06,
+ .high_band_component = COMPONENT_2_WAY_SWITCH,
+ .high_band_component_type = 0x09,
+ .tcxo_ldo_voltage = 0x00,
+ .xtal_itrim_val = 0x04,
+ .srf_state = 0x00,
+ .io_configuration = 0x01,
+ .sdio_configuration = 0x00,
+ .settings = 0x00,
+ .enable_clpc = 0x00,
+ .enable_tx_low_pwr_on_siso_rdl = 0x00,
+ .rx_profile = 0x00,
+ .pwr_limit_reference_11_abg = 0xc8,
+ .psat = 0,
+ .low_power_val = 0x00,
+ .med_power_val = 0x0a,
+ .high_power_val = 0x1e,
+ .external_pa_dc2dc = 0,
+ .number_of_assembled_ant2_4 = 1,
+ .number_of_assembled_ant5 = 1,
+ },
+};
+
+static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
+ [PART_TOP_PRCM_ELP_SOC] = {
+ .mem = { .start = 0x00A02000, .size = 0x00010000 },
+ .reg = { .start = 0x00807000, .size = 0x00005000 },
+ .mem2 = { .start = 0x00800000, .size = 0x0000B000 },
+ .mem3 = { .start = 0x00000000, .size = 0x00000000 },
+ },
+ [PART_DOWN] = {
+ .mem = { .start = 0x00000000, .size = 0x00014000 },
+ .reg = { .start = 0x00810000, .size = 0x0000BFFF },
+ .mem2 = { .start = 0x00000000, .size = 0x00000000 },
+ .mem3 = { .start = 0x00000000, .size = 0x00000000 },
+ },
+ [PART_BOOT] = {
+ .mem = { .start = 0x00700000, .size = 0x0000030c },
+ .reg = { .start = 0x00802000, .size = 0x00014578 },
+ .mem2 = { .start = 0x00B00404, .size = 0x00001000 },
+ .mem3 = { .start = 0x00C00000, .size = 0x00000400 },
+ },
+ [PART_WORK] = {
+ .mem = { .start = 0x00800000, .size = 0x000050FC },
+ .reg = { .start = 0x00B00404, .size = 0x00001000 },
+ .mem2 = { .start = 0x00C00000, .size = 0x00000400 },
+ .mem3 = { .start = 0x00000000, .size = 0x00000000 },
+ },
+ [PART_PHY_INIT] = {
+ .mem = { .start = 0x80926000,
+ .size = sizeof(struct wl18xx_mac_and_phy_params) },
+ .reg = { .start = 0x00000000, .size = 0x00000000 },
+ .mem2 = { .start = 0x00000000, .size = 0x00000000 },
+ .mem3 = { .start = 0x00000000, .size = 0x00000000 },
+ },
+};
+
+static const int wl18xx_rtable[REG_TABLE_LEN] = {
+ [REG_ECPU_CONTROL] = WL18XX_REG_ECPU_CONTROL,
+ [REG_INTERRUPT_NO_CLEAR] = WL18XX_REG_INTERRUPT_NO_CLEAR,
+ [REG_INTERRUPT_ACK] = WL18XX_REG_INTERRUPT_ACK,
+ [REG_COMMAND_MAILBOX_PTR] = WL18XX_REG_COMMAND_MAILBOX_PTR,
+ [REG_EVENT_MAILBOX_PTR] = WL18XX_REG_EVENT_MAILBOX_PTR,
+ [REG_INTERRUPT_TRIG] = WL18XX_REG_INTERRUPT_TRIG_H,
+ [REG_INTERRUPT_MASK] = WL18XX_REG_INTERRUPT_MASK,
+ [REG_PC_ON_RECOVERY] = WL18XX_SCR_PAD4,
+ [REG_CHIP_ID_B] = WL18XX_REG_CHIP_ID_B,
+ [REG_CMD_MBOX_ADDRESS] = WL18XX_CMD_MBOX_ADDRESS,
+
+ /* data access memory addresses, used with partition translation */
+ [REG_SLV_MEM_DATA] = WL18XX_SLV_MEM_DATA,
+ [REG_SLV_REG_DATA] = WL18XX_SLV_REG_DATA,
+
+ /* raw data access memory addresses */
+ [REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
+};
+
+static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
+ [CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
+ [CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
+ [CLOCK_CONFIG_16_8_M] = { 7, 100, 0, 0, false },
+ [CLOCK_CONFIG_19_2_M] = { 8, 100, 0, 0, false },
+ [CLOCK_CONFIG_26_M] = { 13, 120, 0, 0, false },
+ [CLOCK_CONFIG_32_736_M] = { 9, 132, 3751, 4, true },
+ [CLOCK_CONFIG_33_6_M] = { 7, 100, 0, 0, false },
+ [CLOCK_CONFIG_38_468_M] = { 8, 100, 0, 0, false },
+ [CLOCK_CONFIG_52_M] = { 13, 120, 0, 0, false },
+};
+
+/* TODO: maybe move to a new header file? */
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw.bin"
+
+static int wl18xx_identify_chip(struct wl1271 *wl)
+{
+ int ret = 0;
+
+ switch (wl->chip.id) {
+ case CHIP_ID_185x_PG20:
+ wl1271_debug(DEBUG_BOOT, "chip id 0x%x (185x PG20)",
+ wl->chip.id);
+ wl->sr_fw_name = WL18XX_FW_NAME;
+ /* wl18xx uses the same firmware for PLT */
+ wl->plt_fw_name = WL18XX_FW_NAME;
+ wl->quirks |= WLCORE_QUIRK_NO_ELP |
+ WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
+ WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
+ WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN |
+ WLCORE_QUIRK_TX_PAD_LAST_FRAME;
+
+ wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER, WL18XX_IFTYPE_VER,
+ WL18XX_MAJOR_VER, WL18XX_SUBTYPE_VER,
+ WL18XX_MINOR_VER);
+ break;
+ case CHIP_ID_185x_PG10:
+ wl1271_warning("chip id 0x%x (185x PG10) is deprecated",
+ wl->chip.id);
+ ret = -ENODEV;
+ goto out;
+
+ default:
+ wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int wl18xx_set_clk(struct wl1271 *wl)
+{
+ u16 clk_freq;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+ if (ret < 0)
+ goto out;
+
+ /* TODO: PG2: apparently we need to read the clk type */
+
+ ret = wl18xx_top_reg_read(wl, PRIMARY_CLK_DETECT, &clk_freq);
+ if (ret < 0)
+ goto out;
+
+ wl1271_debug(DEBUG_BOOT, "clock freq %d (%d, %d, %d, %d, %s)", clk_freq,
+ wl18xx_clk_table[clk_freq].n, wl18xx_clk_table[clk_freq].m,
+ wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
+ wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
+
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
+ wl18xx_clk_table[clk_freq].n);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_M,
+ wl18xx_clk_table[clk_freq].m);
+ if (ret < 0)
+ goto out;
+
+ if (wl18xx_clk_table[clk_freq].swallow) {
+ /* first the 16 lower bits */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_1,
+ wl18xx_clk_table[clk_freq].q &
+ PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* then the 16 higher bits, masked out */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_2,
+ (wl18xx_clk_table[clk_freq].q >> 16) &
+ PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* first the 16 lower bits */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_1,
+ wl18xx_clk_table[clk_freq].p &
+ PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* then the 16 higher bits, masked out */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_2,
+ (wl18xx_clk_table[clk_freq].p >> 16) &
+ PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK);
+ } else {
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_SWALLOW_EN,
+ PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
+ }
+
+out:
+ return ret;
+}
+
+static int wl18xx_boot_soft_reset(struct wl1271 *wl)
+{
+ int ret;
+
+ /* disable Rx/Tx */
+ ret = wlcore_write32(wl, WL18XX_ENABLE, 0x0);
+ if (ret < 0)
+ goto out;
+
+ /* disable auto calibration on start*/
+ ret = wlcore_write32(wl, WL18XX_SPARE_A2, 0xffff);
+
+out:
+ return ret;
+}
+
+static int wl18xx_pre_boot(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl18xx_set_clk(wl);
+ if (ret < 0)
+ goto out;
+
+ /* Continue the ELP wake up sequence */
+ ret = wlcore_write32(wl, WL18XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ if (ret < 0)
+ goto out;
+
+ udelay(500);
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ goto out;
+
+ /* Disable interrupts */
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_boot_soft_reset(wl);
+
+out:
+ return ret;
+}
+
+static int wl18xx_pre_upload(struct wl1271 *wl)
+{
+ u32 tmp;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ goto out;
+
+ /* TODO: check if this is all needed */
+ ret = wlcore_write32(wl, WL18XX_EEPROMLESS_IND, WL18XX_EEPROMLESS_IND);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
+ if (ret < 0)
+ goto out;
+
+ wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
+
+ ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp);
+
+out:
+ return ret;
+}
+
+static int wl18xx_set_mac_and_phy(struct wl1271 *wl)
+{
+ struct wl18xx_priv *priv = wl->priv;
+ struct wl18xx_mac_and_phy_params *params;
+ int ret;
+
+ params = kmemdup(&priv->conf.phy, sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_write(wl, WL18XX_PHY_INIT_MEM_ADDR, params,
+ sizeof(*params), false);
+
+out:
+ kfree(params);
+ return ret;
+}
+
+static int wl18xx_enable_interrupts(struct wl1271 *wl)
+{
+ u32 event_mask, intr_mask;
+ int ret;
+
+ event_mask = WL18XX_ACX_EVENTS_VECTOR;
+ intr_mask = WL18XX_INTR_MASK;
+
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, event_mask);
+ if (ret < 0)
+ goto out;
+
+ wlcore_enable_interrupts(wl);
+
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
+ WL1271_ACX_INTR_ALL & ~intr_mask);
+
+out:
+ return ret;
+}
+
+static int wl18xx_boot(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl18xx_pre_boot(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_pre_upload(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_boot_upload_firmware(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_set_mac_and_phy(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_boot_run_firmware(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_enable_interrupts(wl);
+
+out:
+ return ret;
+}
+
+static int wl18xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
+ void *buf, size_t len)
+{
+ struct wl18xx_priv *priv = wl->priv;
+
+ memcpy(priv->cmd_buf, buf, len);
+ memset(priv->cmd_buf + len, 0, WL18XX_CMD_MAX_SIZE - len);
+
+ return wlcore_write(wl, cmd_box_addr, priv->cmd_buf,
+ WL18XX_CMD_MAX_SIZE, false);
+}
+
+static int wl18xx_ack_event(struct wl1271 *wl)
+{
+ return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
+ WL18XX_INTR_TRIG_EVENT_ACK);
+}
+
+static u32 wl18xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
+{
+ u32 blk_size = WL18XX_TX_HW_BLOCK_SIZE;
+ return (len + blk_size - 1) / blk_size + spare_blks;
+}
+
+static void
+wl18xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
+ u32 blks, u32 spare_blks)
+{
+ desc->wl18xx_mem.total_mem_blocks = blks;
+}
+
+static void
+wl18xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb)
+{
+ desc->length = cpu_to_le16(skb->len);
+
+ /* if only the last frame is to be padded, we unset this bit on Tx */
+ if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME)
+ desc->wl18xx_mem.ctrl = WL18XX_TX_CTRL_NOT_PADDED;
+ else
+ desc->wl18xx_mem.ctrl = 0;
+
+ wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
+ "len: %d life: %d mem: %d", desc->hlid,
+ le16_to_cpu(desc->length),
+ le16_to_cpu(desc->life_time),
+ desc->wl18xx_mem.total_mem_blocks);
+}
+
+static enum wl_rx_buf_align
+wl18xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
+{
+ if (rx_desc & RX_BUF_PADDED_PAYLOAD)
+ return WLCORE_RX_BUF_PADDED;
+
+ return WLCORE_RX_BUF_ALIGNED;
+}
+
+static u32 wl18xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
+ u32 data_len)
+{
+ struct wl1271_rx_descriptor *desc = rx_data;
+
+ /* invalid packet */
+ if (data_len < sizeof(*desc))
+ return 0;
+
+ return data_len - sizeof(*desc);
+}
+
+static void wl18xx_tx_immediate_completion(struct wl1271 *wl)
+{
+ wl18xx_tx_immediate_complete(wl);
+}
+
+static int wl18xx_set_host_cfg_bitmap(struct wl1271 *wl, u32 extra_mem_blk)
+{
+ int ret;
+ u32 sdio_align_size = 0;
+ u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE |
+ HOST_IF_CFG_ADD_RX_ALIGNMENT;
+
+ /* Enable Tx SDIO padding */
+ if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) {
+ host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
+ sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
+ }
+
+ /* Enable Rx SDIO padding */
+ if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) {
+ host_cfg_bitmap |= HOST_IF_CFG_RX_PAD_TO_SDIO_BLK;
+ sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
+ }
+
+ ret = wl18xx_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap,
+ sdio_align_size, extra_mem_blk,
+ WL18XX_HOST_IF_LEN_SIZE_FIELD);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl18xx_hw_init(struct wl1271 *wl)
+{
+ int ret;
+ struct wl18xx_priv *priv = wl->priv;
+
+ /* (re)init private structures. Relevant on recovery as well. */
+ priv->last_fw_rls_idx = 0;
+ priv->extra_spare_vif_count = 0;
+
+ /* set the default amount of spare blocks in the bitmap */
+ ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE);
+ if (ret < 0)
+ return ret;
+
+ if (checksum_param) {
+ ret = wl18xx_acx_set_checksum_state(wl);
+ if (ret != 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
+ struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb)
+{
+ u32 ip_hdr_offset;
+ struct iphdr *ip_hdr;
+
+ if (!checksum_param) {
+ desc->wl18xx_checksum_data = 0;
+ return;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ desc->wl18xx_checksum_data = 0;
+ return;
+ }
+
+ ip_hdr_offset = skb_network_header(skb) - skb_mac_header(skb);
+ if (WARN_ON(ip_hdr_offset >= (1<<7))) {
+ desc->wl18xx_checksum_data = 0;
+ return;
+ }
+
+ desc->wl18xx_checksum_data = ip_hdr_offset << 1;
+
+ /* FW is interested only in the LSB of the protocol TCP=0 UDP=1 */
+ ip_hdr = (void *)skb_network_header(skb);
+ desc->wl18xx_checksum_data |= (ip_hdr->protocol & 0x01);
+}
+
+static void wl18xx_set_rx_csum(struct wl1271 *wl,
+ struct wl1271_rx_descriptor *desc,
+ struct sk_buff *skb)
+{
+ if (desc->status & WL18XX_RX_CHECKSUM_MASK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static bool wl18xx_is_mimo_supported(struct wl1271 *wl)
+{
+ struct wl18xx_priv *priv = wl->priv;
+
+ return priv->conf.phy.number_of_assembled_ant2_4 >= 2;
+}
+
+/*
+ * TODO: instead of having these two functions to get the rate mask,
+ * we should modify the wlvif->rate_set instead
+ */
+static u32 wl18xx_sta_get_ap_rate_mask(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ u32 hw_rate_set = wlvif->rate_set;
+
+ if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
+ wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
+ wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
+ hw_rate_set |= CONF_TX_RATE_USE_WIDE_CHAN;
+
+ /* we don't support MIMO in wide-channel mode */
+ hw_rate_set &= ~CONF_TX_MIMO_RATES;
+ } else if (wl18xx_is_mimo_supported(wl)) {
+ wl1271_debug(DEBUG_ACX, "using MIMO channel rate mask");
+ hw_rate_set |= CONF_TX_MIMO_RATES;
+ }
+
+ return hw_rate_set;
+}
+
+static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
+ wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
+ wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
+
+ /* sanity check - we don't support this */
+ if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ))
+ return 0;
+
+ return CONF_TX_RATE_USE_WIDE_CHAN;
+ } else if (wl18xx_is_mimo_supported(wl) &&
+ wlvif->band == IEEE80211_BAND_2GHZ) {
+ wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
+ /*
+ * we don't care about HT channel here - if a peer doesn't
+ * support MIMO, we won't enable it in its rates
+ */
+ return CONF_TX_MIMO_RATES;
+ } else {
+ return 0;
+ }
+}
+
+static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
+{
+ u32 fuse;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
+ if (ret < 0)
+ goto out;
+
+ if (ver)
+ *ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+
+out:
+ return ret;
+}
+
+#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
+static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+{
+ struct wl18xx_priv *priv = wl->priv;
+ struct wlcore_conf_file *conf_file;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, WL18XX_CONF_FILE_NAME, dev);
+ if (ret < 0) {
+ wl1271_error("could not get configuration binary %s: %d",
+ WL18XX_CONF_FILE_NAME, ret);
+ goto out_fallback;
+ }
+
+ if (fw->size != WL18XX_CONF_SIZE) {
+ wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
+ WL18XX_CONF_SIZE, fw->size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ conf_file = (struct wlcore_conf_file *) fw->data;
+
+ if (conf_file->header.magic != cpu_to_le32(WL18XX_CONF_MAGIC)) {
+ wl1271_error("configuration binary file magic number mismatch, "
+ "expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
+ conf_file->header.magic);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
+ wl1271_error("configuration binary file version not supported, "
+ "expected 0x%08x got 0x%08x",
+ WL18XX_CONF_VERSION, conf_file->header.version);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
+ memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
+
+ goto out;
+
+out_fallback:
+ wl1271_warning("falling back to default config");
+
+ /* apply driver default configuration */
+ memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
+ /* apply default private configuration */
+ memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
+
+ /* For now we just fallback */
+ return 0;
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int wl18xx_plt_init(struct wl1271 *wl)
+{
+ int ret;
+
+ /* calibrator based auto/fem detect not supported for 18xx */
+ if (wl->plt_mode == PLT_FEM_DETECT) {
+ wl1271_error("wl18xx_plt_init: PLT FEM_DETECT not supported");
+ return -EINVAL;
+ }
+
+ ret = wlcore_write32(wl, WL18XX_SCR_PAD8, WL18XX_SCR_PAD8_PLT);
+ if (ret < 0)
+ return ret;
+
+ return wl->ops->boot(wl);
+}
+
+static int wl18xx_get_mac(struct wl1271 *wl)
+{
+ u32 mac1, mac2;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_1, &mac1);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_2, &mac2);
+ if (ret < 0)
+ goto out;
+
+ /* these are the two parts of the BD_ADDR */
+ wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
+ ((mac1 & 0xff000000) >> 24);
+ wl->fuse_nic_addr = (mac1 & 0xffffff);
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
+
+out:
+ return ret;
+}
+
+static int wl18xx_handle_static_data(struct wl1271 *wl,
+ struct wl1271_static_data *static_data)
+{
+ struct wl18xx_static_data_priv *static_data_priv =
+ (struct wl18xx_static_data_priv *) static_data->priv;
+
+ wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
+
+ return 0;
+}
+
+static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
+{
+ struct wl18xx_priv *priv = wl->priv;
+
+ /* If we have VIFs requiring extra spare, indulge them */
+ if (priv->extra_spare_vif_count)
+ return WL18XX_TX_HW_EXTRA_BLOCK_SPARE;
+
+ return WL18XX_TX_HW_BLOCK_SPARE;
+}
+
+static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ struct wl18xx_priv *priv = wl->priv;
+ bool change_spare = false;
+ int ret;
+
+ /*
+ * when adding the first or removing the last GEM/TKIP interface,
+ * we have to adjust the number of spare blocks.
+ */
+ change_spare = (key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
+ key_conf->cipher == WLAN_CIPHER_SUITE_TKIP) &&
+ ((priv->extra_spare_vif_count == 0 && cmd == SET_KEY) ||
+ (priv->extra_spare_vif_count == 1 && cmd == DISABLE_KEY));
+
+ /* no need to change spare - just regular set_key */
+ if (!change_spare)
+ return wlcore_set_key(wl, cmd, vif, sta, key_conf);
+
+ /*
+ * stop the queues and flush to ensure the next packets are
+ * in sync with FW spare block accounting
+ */
+ wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+ wl1271_tx_flush(wl);
+
+ ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
+ if (ret < 0)
+ goto out;
+
+ /* key is now set, change the spare blocks */
+ if (cmd == SET_KEY) {
+ ret = wl18xx_set_host_cfg_bitmap(wl,
+ WL18XX_TX_HW_EXTRA_BLOCK_SPARE);
+ if (ret < 0)
+ goto out;
+
+ priv->extra_spare_vif_count++;
+ } else {
+ ret = wl18xx_set_host_cfg_bitmap(wl,
+ WL18XX_TX_HW_BLOCK_SPARE);
+ if (ret < 0)
+ goto out;
+
+ priv->extra_spare_vif_count--;
+ }
+
+out:
+ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+ return ret;
+}
+
+static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
+ u32 buf_offset, u32 last_len)
+{
+ if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) {
+ struct wl1271_tx_hw_descr *last_desc;
+
+ /* get the last TX HW descriptor written to the aggr buf */
+ last_desc = (struct wl1271_tx_hw_descr *)(wl->aggr_buf +
+ buf_offset - last_len);
+
+ /* the last frame is padded up to an SDIO block */
+ last_desc->wl18xx_mem.ctrl &= ~WL18XX_TX_CTRL_NOT_PADDED;
+ return ALIGN(buf_offset, WL12XX_BUS_BLOCK_SIZE);
+ }
+
+ /* no modifications */
+ return buf_offset;
+}
+
+static struct wlcore_ops wl18xx_ops = {
+ .identify_chip = wl18xx_identify_chip,
+ .boot = wl18xx_boot,
+ .plt_init = wl18xx_plt_init,
+ .trigger_cmd = wl18xx_trigger_cmd,
+ .ack_event = wl18xx_ack_event,
+ .calc_tx_blocks = wl18xx_calc_tx_blocks,
+ .set_tx_desc_blocks = wl18xx_set_tx_desc_blocks,
+ .set_tx_desc_data_len = wl18xx_set_tx_desc_data_len,
+ .get_rx_buf_align = wl18xx_get_rx_buf_align,
+ .get_rx_packet_len = wl18xx_get_rx_packet_len,
+ .tx_immediate_compl = wl18xx_tx_immediate_completion,
+ .tx_delayed_compl = NULL,
+ .hw_init = wl18xx_hw_init,
+ .set_tx_desc_csum = wl18xx_set_tx_desc_csum,
+ .get_pg_ver = wl18xx_get_pg_ver,
+ .set_rx_csum = wl18xx_set_rx_csum,
+ .sta_get_ap_rate_mask = wl18xx_sta_get_ap_rate_mask,
+ .ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask,
+ .get_mac = wl18xx_get_mac,
+ .debugfs_init = wl18xx_debugfs_add_files,
+ .handle_static_data = wl18xx_handle_static_data,
+ .get_spare_blocks = wl18xx_get_spare_blocks,
+ .set_key = wl18xx_set_key,
+ .pre_pkt_send = wl18xx_pre_pkt_send,
+};
+
+/* HT cap appropriate for wide channels in 2Ghz */
+static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
+ .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(150),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+};
+
+/* HT cap appropriate for wide channels in 5Ghz */
+static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
+ .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(150),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+};
+
+/* HT cap appropriate for SISO 20 */
+static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
+ .cap = IEEE80211_HT_CAP_SGI_20,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+};
+
+/* HT cap appropriate for MIMO rates in 20mhz channel */
+static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
+ .cap = IEEE80211_HT_CAP_SGI_20,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(144),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+};
+
+static int __devinit wl18xx_probe(struct platform_device *pdev)
+{
+ struct wl1271 *wl;
+ struct ieee80211_hw *hw;
+ struct wl18xx_priv *priv;
+ int ret;
+
+ hw = wlcore_alloc_hw(sizeof(*priv));
+ if (IS_ERR(hw)) {
+ wl1271_error("can't allocate hw");
+ ret = PTR_ERR(hw);
+ goto out;
+ }
+
+ wl = hw->priv;
+ priv = wl->priv;
+ wl->ops = &wl18xx_ops;
+ wl->ptable = wl18xx_ptable;
+ wl->rtable = wl18xx_rtable;
+ wl->num_tx_desc = 32;
+ wl->num_rx_desc = 32;
+ wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
+ wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
+ wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
+ wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
+ wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
+
+ if (num_rx_desc_param != -1)
+ wl->num_rx_desc = num_rx_desc_param;
+
+ ret = wl18xx_conf_init(wl, &pdev->dev);
+ if (ret < 0)
+ goto out_free;
+
+ /* If the module param is set, update it in conf */
+ if (board_type_param) {
+ if (!strcmp(board_type_param, "fpga")) {
+ priv->conf.phy.board_type = BOARD_TYPE_FPGA_18XX;
+ } else if (!strcmp(board_type_param, "hdk")) {
+ priv->conf.phy.board_type = BOARD_TYPE_HDK_18XX;
+ } else if (!strcmp(board_type_param, "dvp")) {
+ priv->conf.phy.board_type = BOARD_TYPE_DVP_18XX;
+ } else if (!strcmp(board_type_param, "evb")) {
+ priv->conf.phy.board_type = BOARD_TYPE_EVB_18XX;
+ } else if (!strcmp(board_type_param, "com8")) {
+ priv->conf.phy.board_type = BOARD_TYPE_COM8_18XX;
+ } else {
+ wl1271_error("invalid board type '%s'",
+ board_type_param);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ /* HACK! Just for now we hardcode COM8 and HDK to 0x06 */
+ switch (priv->conf.phy.board_type) {
+ case BOARD_TYPE_HDK_18XX:
+ case BOARD_TYPE_COM8_18XX:
+ priv->conf.phy.low_band_component_type = 0x06;
+ break;
+ case BOARD_TYPE_FPGA_18XX:
+ case BOARD_TYPE_DVP_18XX:
+ case BOARD_TYPE_EVB_18XX:
+ priv->conf.phy.low_band_component_type = 0x05;
+ break;
+ default:
+ wl1271_error("invalid board type '%d'",
+ priv->conf.phy.board_type);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (low_band_component_param != -1)
+ priv->conf.phy.low_band_component = low_band_component_param;
+ if (low_band_component_type_param != -1)
+ priv->conf.phy.low_band_component_type =
+ low_band_component_type_param;
+ if (high_band_component_param != -1)
+ priv->conf.phy.high_band_component = high_band_component_param;
+ if (high_band_component_type_param != -1)
+ priv->conf.phy.high_band_component_type =
+ high_band_component_type_param;
+ if (pwr_limit_reference_11_abg_param != -1)
+ priv->conf.phy.pwr_limit_reference_11_abg =
+ pwr_limit_reference_11_abg_param;
+ if (n_antennas_2_param != -1)
+ priv->conf.phy.number_of_assembled_ant2_4 = n_antennas_2_param;
+ if (n_antennas_5_param != -1)
+ priv->conf.phy.number_of_assembled_ant5 = n_antennas_5_param;
+ if (dc2dc_param != -1)
+ priv->conf.phy.external_pa_dc2dc = dc2dc_param;
+
+ if (ht_mode_param) {
+ if (!strcmp(ht_mode_param, "default"))
+ priv->conf.ht.mode = HT_MODE_DEFAULT;
+ else if (!strcmp(ht_mode_param, "wide"))
+ priv->conf.ht.mode = HT_MODE_WIDE;
+ else if (!strcmp(ht_mode_param, "siso20"))
+ priv->conf.ht.mode = HT_MODE_SISO20;
+ else {
+ wl1271_error("invalid ht_mode '%s'", ht_mode_param);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ if (priv->conf.ht.mode == HT_MODE_DEFAULT) {
+ /*
+ * Only support mimo with multiple antennas. Fall back to
+ * siso20.
+ */
+ if (wl18xx_is_mimo_supported(wl))
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ &wl18xx_mimo_ht_cap_2ghz);
+ else
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ &wl18xx_siso20_ht_cap);
+
+ /* 5Ghz is always wide */
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ &wl18xx_siso40_ht_cap_5ghz);
+ } else if (priv->conf.ht.mode == HT_MODE_WIDE) {
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ &wl18xx_siso40_ht_cap_2ghz);
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ &wl18xx_siso40_ht_cap_5ghz);
+ } else if (priv->conf.ht.mode == HT_MODE_SISO20) {
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ &wl18xx_siso20_ht_cap);
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ &wl18xx_siso20_ht_cap);
+ }
+
+ if (!checksum_param) {
+ wl18xx_ops.set_rx_csum = NULL;
+ wl18xx_ops.init_vif = NULL;
+ }
+
+ wl->enable_11a = enable_11a_param;
+
+ return wlcore_probe(wl, pdev);
+
+out_free:
+ wlcore_free_hw(wl);
+out:
+ return ret;
+}
+
+static const struct platform_device_id wl18xx_id_table[] __devinitconst = {
+ { "wl18xx", 0 },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl18xx_id_table);
+
+static struct platform_driver wl18xx_driver = {
+ .probe = wl18xx_probe,
+ .remove = __devexit_p(wlcore_remove),
+ .id_table = wl18xx_id_table,
+ .driver = {
+ .name = "wl18xx_driver",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init wl18xx_init(void)
+{
+ return platform_driver_register(&wl18xx_driver);
+}
+module_init(wl18xx_init);
+
+static void __exit wl18xx_exit(void)
+{
+ platform_driver_unregister(&wl18xx_driver);
+}
+module_exit(wl18xx_exit);
+
+module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
+MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
+
+module_param_named(board_type, board_type_param, charp, S_IRUSR);
+MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
+ "dvp");
+
+module_param_named(checksum, checksum_param, bool, S_IRUSR);
+MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
+
+module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
+MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
+
+module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
+MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
+
+module_param_named(n_antennas_2, n_antennas_2_param, int, S_IRUSR);
+MODULE_PARM_DESC(n_antennas_2,
+ "Number of installed 2.4GHz antennas: 1 (default) or 2");
+
+module_param_named(n_antennas_5, n_antennas_5_param, int, S_IRUSR);
+MODULE_PARM_DESC(n_antennas_5,
+ "Number of installed 5GHz antennas: 1 (default) or 2");
+
+module_param_named(low_band_component, low_band_component_param, int,
+ S_IRUSR);
+MODULE_PARM_DESC(low_band_component, "Low band component: u8 "
+ "(default is 0x01)");
+
+module_param_named(low_band_component_type, low_band_component_type_param,
+ int, S_IRUSR);
+MODULE_PARM_DESC(low_band_component_type, "Low band component type: u8 "
+ "(default is 0x05 or 0x06 depending on the board_type)");
+
+module_param_named(high_band_component, high_band_component_param, int,
+ S_IRUSR);
+MODULE_PARM_DESC(high_band_component, "High band component: u8, "
+ "(default is 0x01)");
+
+module_param_named(high_band_component_type, high_band_component_type_param,
+ int, S_IRUSR);
+MODULE_PARM_DESC(high_band_component_type, "High band component type: u8 "
+ "(default is 0x09)");
+
+module_param_named(pwr_limit_reference_11_abg,
+ pwr_limit_reference_11_abg_param, int, S_IRUSR);
+MODULE_PARM_DESC(pwr_limit_reference_11_abg, "Power limit reference: u8 "
+ "(default is 0xc8)");
+
+module_param_named(num_rx_desc,
+ num_rx_desc_param, int, S_IRUSR);
+MODULE_PARM_DESC(num_rx_desc_param,
+ "Number of Rx descriptors: u8 (default is 32)");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
+MODULE_FIRMWARE(WL18XX_FW_NAME);
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
new file mode 100644
index 0000000..937b71d
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -0,0 +1,191 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __REG_H__
+#define __REG_H__
+
+#define WL18XX_REGISTERS_BASE 0x00800000
+#define WL18XX_CODE_BASE 0x00000000
+#define WL18XX_DATA_BASE 0x00400000
+#define WL18XX_DOUBLE_BUFFER_BASE 0x00600000
+#define WL18XX_MCU_KEY_SEARCH_BASE 0x00700000
+#define WL18XX_PHY_BASE 0x00900000
+#define WL18XX_TOP_OCP_BASE 0x00A00000
+#define WL18XX_PACKET_RAM_BASE 0x00B00000
+#define WL18XX_HOST_BASE 0x00C00000
+
+#define WL18XX_REGISTERS_DOWN_SIZE 0x0000B000
+
+#define WL18XX_REG_BOOT_PART_START 0x00802000
+#define WL18XX_REG_BOOT_PART_SIZE 0x00014578
+
+#define WL18XX_PHY_INIT_MEM_ADDR 0x80926000
+
+#define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE)
+#define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000)
+#define WL18XX_WGCM_REGS_BASE (WL18XX_REGISTERS_BASE + 0x03000)
+#define WL18XX_ENC_BASE (WL18XX_REGISTERS_BASE + 0x04000)
+#define WL18XX_INTERRUPT_BASE (WL18XX_REGISTERS_BASE + 0x05000)
+#define WL18XX_UART_BASE (WL18XX_REGISTERS_BASE + 0x06000)
+#define WL18XX_WELP_BASE (WL18XX_REGISTERS_BASE + 0x07000)
+#define WL18XX_TCP_CKSM_BASE (WL18XX_REGISTERS_BASE + 0x08000)
+#define WL18XX_FIFO_BASE (WL18XX_REGISTERS_BASE + 0x09000)
+#define WL18XX_OCP_BRIDGE_BASE (WL18XX_REGISTERS_BASE + 0x0A000)
+#define WL18XX_PMAC_RX_BASE (WL18XX_REGISTERS_BASE + 0x14800)
+#define WL18XX_PMAC_ACM_BASE (WL18XX_REGISTERS_BASE + 0x14C00)
+#define WL18XX_PMAC_TX_BASE (WL18XX_REGISTERS_BASE + 0x15000)
+#define WL18XX_PMAC_CSR_BASE (WL18XX_REGISTERS_BASE + 0x15400)
+
+#define WL18XX_REG_ECPU_CONTROL (WL18XX_REGISTERS_BASE + 0x02004)
+#define WL18XX_REG_INTERRUPT_NO_CLEAR (WL18XX_REGISTERS_BASE + 0x050E8)
+#define WL18XX_REG_INTERRUPT_ACK (WL18XX_REGISTERS_BASE + 0x050F0)
+#define WL18XX_REG_INTERRUPT_TRIG (WL18XX_REGISTERS_BASE + 0x5074)
+#define WL18XX_REG_INTERRUPT_TRIG_H (WL18XX_REGISTERS_BASE + 0x5078)
+#define WL18XX_REG_INTERRUPT_MASK (WL18XX_REGISTERS_BASE + 0x0050DC)
+
+#define WL18XX_REG_CHIP_ID_B (WL18XX_REGISTERS_BASE + 0x01542C)
+
+#define WL18XX_SLV_MEM_DATA (WL18XX_HOST_BASE + 0x0018)
+#define WL18XX_SLV_REG_DATA (WL18XX_HOST_BASE + 0x0008)
+
+/* Scratch Pad registers*/
+#define WL18XX_SCR_PAD0 (WL18XX_REGISTERS_BASE + 0x0154EC)
+#define WL18XX_SCR_PAD1 (WL18XX_REGISTERS_BASE + 0x0154F0)
+#define WL18XX_SCR_PAD2 (WL18XX_REGISTERS_BASE + 0x0154F4)
+#define WL18XX_SCR_PAD3 (WL18XX_REGISTERS_BASE + 0x0154F8)
+#define WL18XX_SCR_PAD4 (WL18XX_REGISTERS_BASE + 0x0154FC)
+#define WL18XX_SCR_PAD4_SET (WL18XX_REGISTERS_BASE + 0x015504)
+#define WL18XX_SCR_PAD4_CLR (WL18XX_REGISTERS_BASE + 0x015500)
+#define WL18XX_SCR_PAD5 (WL18XX_REGISTERS_BASE + 0x015508)
+#define WL18XX_SCR_PAD5_SET (WL18XX_REGISTERS_BASE + 0x015510)
+#define WL18XX_SCR_PAD5_CLR (WL18XX_REGISTERS_BASE + 0x01550C)
+#define WL18XX_SCR_PAD6 (WL18XX_REGISTERS_BASE + 0x015514)
+#define WL18XX_SCR_PAD7 (WL18XX_REGISTERS_BASE + 0x015518)
+#define WL18XX_SCR_PAD8 (WL18XX_REGISTERS_BASE + 0x01551C)
+#define WL18XX_SCR_PAD9 (WL18XX_REGISTERS_BASE + 0x015520)
+
+/* Spare registers*/
+#define WL18XX_SPARE_A1 (WL18XX_REGISTERS_BASE + 0x002194)
+#define WL18XX_SPARE_A2 (WL18XX_REGISTERS_BASE + 0x002198)
+#define WL18XX_SPARE_A3 (WL18XX_REGISTERS_BASE + 0x00219C)
+#define WL18XX_SPARE_A4 (WL18XX_REGISTERS_BASE + 0x0021A0)
+#define WL18XX_SPARE_A5 (WL18XX_REGISTERS_BASE + 0x0021A4)
+#define WL18XX_SPARE_A6 (WL18XX_REGISTERS_BASE + 0x0021A8)
+#define WL18XX_SPARE_A7 (WL18XX_REGISTERS_BASE + 0x0021AC)
+#define WL18XX_SPARE_A8 (WL18XX_REGISTERS_BASE + 0x0021B0)
+#define WL18XX_SPARE_B1 (WL18XX_REGISTERS_BASE + 0x015524)
+#define WL18XX_SPARE_B2 (WL18XX_REGISTERS_BASE + 0x015528)
+#define WL18XX_SPARE_B3 (WL18XX_REGISTERS_BASE + 0x01552C)
+#define WL18XX_SPARE_B4 (WL18XX_REGISTERS_BASE + 0x015530)
+#define WL18XX_SPARE_B5 (WL18XX_REGISTERS_BASE + 0x015534)
+#define WL18XX_SPARE_B6 (WL18XX_REGISTERS_BASE + 0x015538)
+#define WL18XX_SPARE_B7 (WL18XX_REGISTERS_BASE + 0x01553C)
+#define WL18XX_SPARE_B8 (WL18XX_REGISTERS_BASE + 0x015540)
+
+#define WL18XX_REG_COMMAND_MAILBOX_PTR (WL18XX_SCR_PAD0)
+#define WL18XX_REG_EVENT_MAILBOX_PTR (WL18XX_SCR_PAD1)
+#define WL18XX_EEPROMLESS_IND (WL18XX_SCR_PAD4)
+
+#define WL18XX_WELP_ARM_COMMAND (WL18XX_REGISTERS_BASE + 0x7100)
+#define WL18XX_ENABLE (WL18XX_REGISTERS_BASE + 0x01543C)
+
+/* PRCM registers */
+#define PLATFORM_DETECTION 0xA0E3E0
+#define OCS_EN 0xA02080
+#define PRIMARY_CLK_DETECT 0xA020A6
+#define PLLSH_WCS_PLL_N 0xA02362
+#define PLLSH_WCS_PLL_M 0xA02360
+#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
+#define PLLSH_WCS_PLL_Q_FACTOR_CFG_2 0xA02366
+#define PLLSH_WCS_PLL_P_FACTOR_CFG_1 0xA02368
+#define PLLSH_WCS_PLL_P_FACTOR_CFG_2 0xA0236A
+#define PLLSH_WCS_PLL_SWALLOW_EN 0xA0236C
+#define PLLSH_WL_PLL_EN 0xA02392
+
+#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK 0xFFFF
+#define PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK 0x007F
+#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
+#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
+
+#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
+#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
+
+#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
+#define WL18XX_PG_VER_MASK 0x70
+#define WL18XX_PG_VER_OFFSET 4
+
+#define WL18XX_REG_FUSE_BD_ADDR_1 0xA02602
+#define WL18XX_REG_FUSE_BD_ADDR_2 0xA02606
+
+#define WL18XX_CMD_MBOX_ADDRESS 0xB007B4
+
+#define WL18XX_FW_STATUS_ADDR 0x50F8
+
+#define CHIP_ID_185x_PG10 (0x06030101)
+#define CHIP_ID_185x_PG20 (0x06030111)
+
+/*
+ * Host Command Interrupt. Setting this bit masks
+ * the interrupt that the host issues to inform
+ * the FW that it has sent a command
+ * to the Wlan hardware Command Mailbox.
+ */
+#define WL18XX_INTR_TRIG_CMD BIT(28)
+
+/*
+ * Host Event Acknowlegde Interrupt. The host
+ * sets this bit to acknowledge that it received
+ * the unsolicited information from the event
+ * mailbox.
+ */
+#define WL18XX_INTR_TRIG_EVENT_ACK BIT(29)
+
+/*
+ * To boot the firmware in PLT mode we need to write this value in
+ * SCR_PAD8 before starting.
+ */
+#define WL18XX_SCR_PAD8_PLT 0xBABABEBE
+
+enum {
+ COMPONENT_NO_SWITCH = 0x0,
+ COMPONENT_2_WAY_SWITCH = 0x1,
+ COMPONENT_3_WAY_SWITCH = 0x2,
+ COMPONENT_MATCHING = 0x3,
+};
+
+enum {
+ FEM_NONE = 0x0,
+ FEM_VENDOR_1 = 0x1,
+ FEM_VENDOR_2 = 0x2,
+ FEM_VENDOR_3 = 0x3,
+};
+
+enum {
+ BOARD_TYPE_EVB_18XX = 0,
+ BOARD_TYPE_DVP_18XX = 1,
+ BOARD_TYPE_HDK_18XX = 2,
+ BOARD_TYPE_FPGA_18XX = 3,
+ BOARD_TYPE_COM8_18XX = 4,
+
+ NUM_BOARD_TYPES,
+};
+
+#endif /* __REG_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
new file mode 100644
index 0000000..5b1fb10
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -0,0 +1,127 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/acx.h"
+#include "../wlcore/tx.h"
+
+#include "wl18xx.h"
+#include "tx.h"
+
+static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK;
+ bool tx_success;
+
+ /* check for id legality */
+ if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
+ wl1271_warning("illegal id in tx completion: %d", id);
+ return;
+ }
+
+ /* a zero bit indicates Tx success */
+ tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));
+
+
+ skb = wl->tx_frames[id];
+ info = IEEE80211_SKB_CB(skb);
+
+ if (wl12xx_is_dummy_packet(wl, skb)) {
+ wl1271_free_tx_id(wl, id);
+ return;
+ }
+
+ /* update the TX status info */
+ if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ /* no real data about Tx completion */
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ info->status.rates[0].flags = 0;
+ info->status.ack_signal = -1;
+
+ if (!tx_success)
+ wl->stats.retry_count++;
+
+ /*
+ * TODO: update sequence number for encryption? seems to be
+ * unsupported for now. needed for recovery with encryption.
+ */
+
+ /* remove private header from packet */
+ skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+
+ /* remove TKIP header space if present */
+ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
+ info->control.hw_key &&
+ info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, hdrlen);
+ skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
+ }
+
+ wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p success %d",
+ id, skb, tx_success);
+
+ /* return the packet to the stack */
+ skb_queue_tail(&wl->deferred_tx_queue, skb);
+ queue_work(wl->freezable_wq, &wl->netstack_work);
+ wl1271_free_tx_id(wl, id);
+}
+
+void wl18xx_tx_immediate_complete(struct wl1271 *wl)
+{
+ struct wl18xx_fw_status_priv *status_priv =
+ (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ struct wl18xx_priv *priv = wl->priv;
+ u8 i;
+
+ /* nothing to do here */
+ if (priv->last_fw_rls_idx == status_priv->fw_release_idx)
+ return;
+
+ /* freed Tx descriptors */
+ wl1271_debug(DEBUG_TX, "last released desc = %d, current idx = %d",
+ priv->last_fw_rls_idx, status_priv->fw_release_idx);
+
+ if (status_priv->fw_release_idx >= WL18XX_FW_MAX_TX_STATUS_DESC) {
+ wl1271_error("invalid desc release index %d",
+ status_priv->fw_release_idx);
+ WARN_ON(1);
+ return;
+ }
+
+ for (i = priv->last_fw_rls_idx;
+ i != status_priv->fw_release_idx;
+ i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) {
+ wl18xx_tx_complete_packet(wl,
+ status_priv->released_tx_desc[i]);
+
+ wl->tx_results_count++;
+ }
+
+ priv->last_fw_rls_idx = status_priv->fw_release_idx;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/tx.h b/drivers/net/wireless/ti/wl18xx/tx.h
new file mode 100644
index 0000000..ccddc54
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/tx.h
@@ -0,0 +1,46 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_TX_H__
+#define __WL18XX_TX_H__
+
+#include "../wlcore/wlcore.h"
+
+#define WL18XX_TX_HW_BLOCK_SPARE 1
+/* for special cases - namely, TKIP and GEM */
+#define WL18XX_TX_HW_EXTRA_BLOCK_SPARE 2
+#define WL18XX_TX_HW_BLOCK_SIZE 268
+
+#define WL18XX_TX_STATUS_DESC_ID_MASK 0x7F
+#define WL18XX_TX_STATUS_STAT_BIT_IDX 7
+
+/* Indicates this TX HW frame is not padded to SDIO block size */
+#define WL18XX_TX_CTRL_NOT_PADDED BIT(7)
+
+/*
+ * The FW uses a special bit to indicate a wide channel should be used in
+ * the rate policy.
+ */
+#define CONF_TX_RATE_USE_WIDE_CHAN BIT(31)
+
+void wl18xx_tx_immediate_complete(struct wl1271 *wl);
+
+#endif /* __WL12XX_TX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
new file mode 100644
index 0000000..6452396
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -0,0 +1,95 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_PRIV_H__
+#define __WL18XX_PRIV_H__
+
+#include "conf.h"
+
+/* minimum FW required for driver */
+#define WL18XX_CHIP_VER 8
+#define WL18XX_IFTYPE_VER 2
+#define WL18XX_MAJOR_VER 0
+#define WL18XX_SUBTYPE_VER 0
+#define WL18XX_MINOR_VER 100
+
+#define WL18XX_CMD_MAX_SIZE 740
+
+struct wl18xx_priv {
+ /* buffer for sending commands to FW */
+ u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
+
+ struct wl18xx_priv_conf conf;
+
+ /* Index of last released Tx desc in FW */
+ u8 last_fw_rls_idx;
+
+ /* number of VIFs requiring extra spare mem-blocks */
+ int extra_spare_vif_count;
+};
+
+#define WL18XX_FW_MAX_TX_STATUS_DESC 33
+
+struct wl18xx_fw_status_priv {
+ /*
+ * Index in released_tx_desc for first byte that holds
+ * released tx host desc
+ */
+ u8 fw_release_idx;
+
+ /*
+ * Array of host Tx descriptors, where fw_release_idx
+ * indicated the first released idx.
+ */
+ u8 released_tx_desc[WL18XX_FW_MAX_TX_STATUS_DESC];
+
+ u8 padding[2];
+};
+
+#define WL18XX_PHY_VERSION_MAX_LEN 20
+
+struct wl18xx_static_data_priv {
+ char phy_version[WL18XX_PHY_VERSION_MAX_LEN];
+};
+
+struct wl18xx_clk_cfg {
+ u32 n;
+ u32 m;
+ u32 p;
+ u32 q;
+ bool swallow;
+};
+
+enum {
+ CLOCK_CONFIG_16_2_M = 1,
+ CLOCK_CONFIG_16_368_M,
+ CLOCK_CONFIG_16_8_M,
+ CLOCK_CONFIG_19_2_M,
+ CLOCK_CONFIG_26_M,
+ CLOCK_CONFIG_32_736_M,
+ CLOCK_CONFIG_33_6_M,
+ CLOCK_CONFIG_38_468_M,
+ CLOCK_CONFIG_52_M,
+
+ NUM_CLOCK_CONFIGS,
+};
+
+#endif /* __WL18XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index 54156b0..d7b907e 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -1,7 +1,6 @@
config WLCORE
tristate "TI wlcore support"
depends on WL_TI && GENERIC_HARDIRQS && MAC80211
- depends on INET
select FW_LOADER
---help---
This module contains the main code for TI WLAN chips. It abstracts
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 509aa88..ce108a7 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -70,7 +70,7 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
struct acx_sleep_auth *auth;
int ret;
- wl1271_debug(DEBUG_ACX, "acx sleep auth");
+ wl1271_debug(DEBUG_ACX, "acx sleep auth %d", sleep_auth);
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
if (!auth) {
@@ -81,11 +81,18 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
auth->sleep_auth = sleep_auth;
ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
+ if (ret < 0) {
+ wl1271_error("could not configure sleep_auth to %d: %d",
+ sleep_auth, ret);
+ goto out;
+ }
+ wl->sleep_auth = sleep_auth;
out:
kfree(auth);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_acx_sleep_auth);
int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int power)
@@ -708,14 +715,14 @@ out:
return ret;
}
-int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
+int wl1271_acx_statistics(struct wl1271 *wl, void *stats)
{
int ret;
wl1271_debug(DEBUG_ACX, "acx statistics");
ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats,
- sizeof(*stats));
+ wl->stats.fw_stats_len);
if (ret < 0) {
wl1271_warning("acx statistics failed: %d", ret);
return -ENOMEM;
@@ -997,6 +1004,7 @@ out:
kfree(mem_conf);
return ret;
}
+EXPORT_SYMBOL_GPL(wl12xx_acx_mem_cfg);
int wl1271_acx_init_mem_config(struct wl1271 *wl)
{
@@ -1027,6 +1035,7 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_acx_init_mem_config);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
{
@@ -1150,6 +1159,7 @@ out:
kfree(acx);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_acx_pm_config);
int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable)
@@ -1715,6 +1725,7 @@ out:
}
+#ifdef CONFIG_PM
/* Set the global behaviour of RX filters - On/Off + default action */
int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
enum rx_filter_action action)
@@ -1794,3 +1805,4 @@ out:
kfree(acx);
return ret;
}
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 8106b2e..d03215d 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -51,21 +51,18 @@
#define WL1271_ACX_INTR_TRACE_A BIT(7)
/* Trace message on MBOX #B */
#define WL1271_ACX_INTR_TRACE_B BIT(8)
+/* SW FW Initiated interrupt Watchdog timer expiration */
+#define WL1271_ACX_SW_INTR_WATCHDOG BIT(9)
-#define WL1271_ACX_INTR_ALL 0xFFFFFFFF
-#define WL1271_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
- WL1271_ACX_INTR_INIT_COMPLETE | \
- WL1271_ACX_INTR_EVENT_A | \
- WL1271_ACX_INTR_EVENT_B | \
- WL1271_ACX_INTR_CMD_COMPLETE | \
- WL1271_ACX_INTR_HW_AVAILABLE | \
- WL1271_ACX_INTR_DATA)
-
-#define WL1271_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
- WL1271_ACX_INTR_EVENT_A | \
- WL1271_ACX_INTR_EVENT_B | \
- WL1271_ACX_INTR_HW_AVAILABLE | \
- WL1271_ACX_INTR_DATA)
+#define WL1271_ACX_INTR_ALL 0xFFFFFFFF
+
+/* all possible interrupts - only appropriate ones will be masked in */
+#define WLCORE_ALL_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA | \
+ WL1271_ACX_SW_INTR_WATCHDOG)
/* Target's information element */
struct acx_header {
@@ -121,6 +118,11 @@ enum wl1271_psm_mode {
/* Extreme low power */
WL1271_PSM_ELP = 2,
+
+ WL1271_PSM_MAX = WL1271_PSM_ELP,
+
+ /* illegal out of band value of PSM mode */
+ WL1271_PSM_ILLEGAL = 0xff
};
struct acx_sleep_auth {
@@ -417,228 +419,6 @@ struct acx_ctsprotect {
u8 padding[2];
} __packed;
-struct acx_tx_statistics {
- __le32 internal_desc_overflow;
-} __packed;
-
-struct acx_rx_statistics {
- __le32 out_of_mem;
- __le32 hdr_overflow;
- __le32 hw_stuck;
- __le32 dropped;
- __le32 fcs_err;
- __le32 xfr_hint_trig;
- __le32 path_reset;
- __le32 reset_counter;
-} __packed;
-
-struct acx_dma_statistics {
- __le32 rx_requested;
- __le32 rx_errors;
- __le32 tx_requested;
- __le32 tx_errors;
-} __packed;
-
-struct acx_isr_statistics {
- /* host command complete */
- __le32 cmd_cmplt;
-
- /* fiqisr() */
- __le32 fiqs;
-
- /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
- __le32 rx_headers;
-
- /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
- __le32 rx_completes;
-
- /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
- __le32 rx_mem_overflow;
-
- /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
- __le32 rx_rdys;
-
- /* irqisr() */
- __le32 irqs;
-
- /* (INT_STS_ND & INT_TRIG_TX_PROC) */
- __le32 tx_procs;
-
- /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
- __le32 decrypt_done;
-
- /* (INT_STS_ND & INT_TRIG_DMA0) */
- __le32 dma0_done;
-
- /* (INT_STS_ND & INT_TRIG_DMA1) */
- __le32 dma1_done;
-
- /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
- __le32 tx_exch_complete;
-
- /* (INT_STS_ND & INT_TRIG_COMMAND) */
- __le32 commands;
-
- /* (INT_STS_ND & INT_TRIG_RX_PROC) */
- __le32 rx_procs;
-
- /* (INT_STS_ND & INT_TRIG_PM_802) */
- __le32 hw_pm_mode_changes;
-
- /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
- __le32 host_acknowledges;
-
- /* (INT_STS_ND & INT_TRIG_PM_PCI) */
- __le32 pci_pm;
-
- /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
- __le32 wakeups;
-
- /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
- __le32 low_rssi;
-} __packed;
-
-struct acx_wep_statistics {
- /* WEP address keys configured */
- __le32 addr_key_count;
-
- /* default keys configured */
- __le32 default_key_count;
-
- __le32 reserved;
-
- /* number of times that WEP key not found on lookup */
- __le32 key_not_found;
-
- /* number of times that WEP key decryption failed */
- __le32 decrypt_fail;
-
- /* WEP packets decrypted */
- __le32 packets;
-
- /* WEP decrypt interrupts */
- __le32 interrupt;
-} __packed;
-
-#define ACX_MISSED_BEACONS_SPREAD 10
-
-struct acx_pwr_statistics {
- /* the amount of enters into power save mode (both PD & ELP) */
- __le32 ps_enter;
-
- /* the amount of enters into ELP mode */
- __le32 elp_enter;
-
- /* the amount of missing beacon interrupts to the host */
- __le32 missing_bcns;
-
- /* the amount of wake on host-access times */
- __le32 wake_on_host;
-
- /* the amount of wake on timer-expire */
- __le32 wake_on_timer_exp;
-
- /* the number of packets that were transmitted with PS bit set */
- __le32 tx_with_ps;
-
- /* the number of packets that were transmitted with PS bit clear */
- __le32 tx_without_ps;
-
- /* the number of received beacons */
- __le32 rcvd_beacons;
-
- /* the number of entering into PowerOn (power save off) */
- __le32 power_save_off;
-
- /* the number of entries into power save mode */
- __le16 enable_ps;
-
- /*
- * the number of exits from power save, not including failed PS
- * transitions
- */
- __le16 disable_ps;
-
- /*
- * the number of times the TSF counter was adjusted because
- * of drift
- */
- __le32 fix_tsf_ps;
-
- /* Gives statistics about the spread continuous missed beacons.
- * The 16 LSB are dedicated for the PS mode.
- * The 16 MSB are dedicated for the PS mode.
- * cont_miss_bcns_spread[0] - single missed beacon.
- * cont_miss_bcns_spread[1] - two continuous missed beacons.
- * cont_miss_bcns_spread[2] - three continuous missed beacons.
- * ...
- * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
- */
- __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
-
- /* the number of beacons in awake mode */
- __le32 rcvd_awake_beacons;
-} __packed;
-
-struct acx_mic_statistics {
- __le32 rx_pkts;
- __le32 calc_failure;
-} __packed;
-
-struct acx_aes_statistics {
- __le32 encrypt_fail;
- __le32 decrypt_fail;
- __le32 encrypt_packets;
- __le32 decrypt_packets;
- __le32 encrypt_interrupt;
- __le32 decrypt_interrupt;
-} __packed;
-
-struct acx_event_statistics {
- __le32 heart_beat;
- __le32 calibration;
- __le32 rx_mismatch;
- __le32 rx_mem_empty;
- __le32 rx_pool;
- __le32 oom_late;
- __le32 phy_transmit_error;
- __le32 tx_stuck;
-} __packed;
-
-struct acx_ps_statistics {
- __le32 pspoll_timeouts;
- __le32 upsd_timeouts;
- __le32 upsd_max_sptime;
- __le32 upsd_max_apturn;
- __le32 pspoll_max_apturn;
- __le32 pspoll_utilization;
- __le32 upsd_utilization;
-} __packed;
-
-struct acx_rxpipe_statistics {
- __le32 rx_prep_beacon_drop;
- __le32 descr_host_int_trig_rx_data;
- __le32 beacon_buffer_thres_host_int_trig_rx_data;
- __le32 missed_beacon_host_int_trig_rx_data;
- __le32 tx_xfr_host_int_trig_rx_data;
-} __packed;
-
-struct acx_statistics {
- struct acx_header header;
-
- struct acx_tx_statistics tx;
- struct acx_rx_statistics rx;
- struct acx_dma_statistics dma;
- struct acx_isr_statistics isr;
- struct acx_wep_statistics wep;
- struct acx_pwr_statistics pwr;
- struct acx_aes_statistics aes;
- struct acx_mic_statistics mic;
- struct acx_event_statistics event;
- struct acx_ps_statistics ps;
- struct acx_rxpipe_statistics rxpipe;
-} __packed;
-
struct acx_rate_class {
__le32 enabled_rates;
u8 short_retry_limit;
@@ -828,6 +608,8 @@ struct wl1271_acx_keep_alive_config {
#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
+#define HOST_IF_CFG_RX_PAD_TO_SDIO_BLK BIT(4)
+#define HOST_IF_CFG_ADD_RX_ALIGNMENT BIT(6)
enum {
WL1271_ACX_TRIG_TYPE_LEVEL = 0,
@@ -946,7 +728,7 @@ struct wl1271_acx_ht_information {
u8 padding[2];
} __packed;
-#define RX_BA_MAX_SESSIONS 2
+#define RX_BA_MAX_SESSIONS 3
struct wl1271_acx_ba_initiator_policy {
struct acx_header header;
@@ -1243,6 +1025,7 @@ enum {
ACX_CONFIG_HANGOVER = 0x0042,
ACX_FEATURE_CFG = 0x0043,
ACX_PROTECTION_CFG = 0x0044,
+ ACX_CHECKSUM_CONFIG = 0x0045,
};
@@ -1281,7 +1064,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect);
-int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
+int wl1271_acx_statistics(struct wl1271 *wl, void *stats);
int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
u8 idx);
@@ -1330,9 +1113,11 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
int wl12xx_acx_config_hangover(struct wl1271 *wl);
+
+#ifdef CONFIG_PM
int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
enum rx_filter_action action);
int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
struct wl12xx_rx_filter *filter);
-
+#endif /* CONFIG_PM */
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 9b98230..375ea57 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -33,22 +33,35 @@
#include "rx.h"
#include "hw_ops.h"
-static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
+static int wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
{
u32 cpu_ctrl;
+ int ret;
/* 10.5.0 run the firmware (I) */
- cpu_ctrl = wlcore_read_reg(wl, REG_ECPU_CONTROL);
+ ret = wlcore_read_reg(wl, REG_ECPU_CONTROL, &cpu_ctrl);
+ if (ret < 0)
+ goto out;
/* 10.5.1 run the firmware (II) */
cpu_ctrl |= flag;
- wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
+ ret = wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
+
+out:
+ return ret;
}
-static int wlcore_parse_fw_ver(struct wl1271 *wl)
+static int wlcore_boot_parse_fw_ver(struct wl1271 *wl,
+ struct wl1271_static_data *static_data)
{
int ret;
+ strncpy(wl->chip.fw_ver_str, static_data->fw_version,
+ sizeof(wl->chip.fw_ver_str));
+
+ /* make sure the string is NULL-terminated */
+ wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
&wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
&wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
@@ -57,43 +70,96 @@ static int wlcore_parse_fw_ver(struct wl1271 *wl)
if (ret != 5) {
wl1271_warning("fw version incorrect value");
memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = wlcore_identify_fw(wl);
if (ret < 0)
- return ret;
+ goto out;
+out:
+ return ret;
+}
+
+static int wlcore_validate_fw_ver(struct wl1271 *wl)
+{
+ unsigned int *fw_ver = wl->chip.fw_ver;
+ unsigned int *min_ver = wl->min_fw_ver;
+ /* the chip must be exactly equal */
+ if (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP])
+ goto fail;
+
+ /* always check the next digit if all previous ones are equal */
+
+ if (min_ver[FW_VER_IF_TYPE] < fw_ver[FW_VER_IF_TYPE])
+ goto out;
+ else if (min_ver[FW_VER_IF_TYPE] > fw_ver[FW_VER_IF_TYPE])
+ goto fail;
+
+ if (min_ver[FW_VER_MAJOR] < fw_ver[FW_VER_MAJOR])
+ goto out;
+ else if (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR])
+ goto fail;
+
+ if (min_ver[FW_VER_SUBTYPE] < fw_ver[FW_VER_SUBTYPE])
+ goto out;
+ else if (min_ver[FW_VER_SUBTYPE] > fw_ver[FW_VER_SUBTYPE])
+ goto fail;
+
+ if (min_ver[FW_VER_MINOR] < fw_ver[FW_VER_MINOR])
+ goto out;
+ else if (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])
+ goto fail;
+
+out:
return 0;
+
+fail:
+ wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is outdated.\n"
+ "Please use at least FW %u.%u.%u.%u.%u.\n"
+ "You can get more information at:\n"
+ "http://wireless.kernel.org/en/users/Drivers/wl12xx",
+ fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE],
+ fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE],
+ fw_ver[FW_VER_MINOR], min_ver[FW_VER_CHIP],
+ min_ver[FW_VER_IF_TYPE], min_ver[FW_VER_MAJOR],
+ min_ver[FW_VER_SUBTYPE], min_ver[FW_VER_MINOR]);
+ return -EINVAL;
}
-static int wlcore_boot_fw_version(struct wl1271 *wl)
+static int wlcore_boot_static_data(struct wl1271 *wl)
{
struct wl1271_static_data *static_data;
+ size_t len = sizeof(*static_data) + wl->static_data_priv_len;
int ret;
- static_data = kmalloc(sizeof(*static_data), GFP_KERNEL | GFP_DMA);
+ static_data = kmalloc(len, GFP_KERNEL);
if (!static_data) {
- wl1271_error("Couldn't allocate memory for static data!");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- wl1271_read(wl, wl->cmd_box_addr, static_data, sizeof(*static_data),
- false);
-
- strncpy(wl->chip.fw_ver_str, static_data->fw_version,
- sizeof(wl->chip.fw_ver_str));
+ ret = wlcore_read(wl, wl->cmd_box_addr, static_data, len, false);
+ if (ret < 0)
+ goto out_free;
- kfree(static_data);
+ ret = wlcore_boot_parse_fw_ver(wl, static_data);
+ if (ret < 0)
+ goto out_free;
- /* make sure the string is NULL-terminated */
- wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+ ret = wlcore_validate_fw_ver(wl);
+ if (ret < 0)
+ goto out_free;
- ret = wlcore_parse_fw_ver(wl);
+ ret = wlcore_handle_static_data(wl, static_data);
if (ret < 0)
- return ret;
+ goto out_free;
- return 0;
+out_free:
+ kfree(static_data);
+out:
+ return ret;
}
static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -102,6 +168,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
struct wlcore_partition_set partition;
int addr, chunk_num, partition_limit;
u8 *p, *chunk;
+ int ret;
/* whal_FwCtrl_LoadFwImageSm() */
@@ -123,7 +190,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition));
partition.mem.start = dest;
- wlcore_set_partition(wl, &partition);
+ ret = wlcore_set_partition(wl, &partition);
+ if (ret < 0)
+ goto out;
/* 10.1 set partition limit and chunk num */
chunk_num = 0;
@@ -137,7 +206,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
partition_limit = chunk_num * CHUNK_SIZE +
wl->ptable[PART_DOWN].mem.size;
partition.mem.start = addr;
- wlcore_set_partition(wl, &partition);
+ ret = wlcore_set_partition(wl, &partition);
+ if (ret < 0)
+ goto out;
}
/* 10.3 upload the chunk */
@@ -146,7 +217,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(chunk, p, CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
p, addr);
- wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
+ ret = wlcore_write(wl, addr, chunk, CHUNK_SIZE, false);
+ if (ret < 0)
+ goto out;
chunk_num++;
}
@@ -157,10 +230,11 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
fw_data_len % CHUNK_SIZE, p, addr);
- wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
+ ret = wlcore_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
+out:
kfree(chunk);
- return 0;
+ return ret;
}
int wlcore_boot_upload_firmware(struct wl1271 *wl)
@@ -203,9 +277,12 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
int i;
u32 dest_addr, val;
u8 *nvs_ptr, *nvs_aligned;
+ int ret;
- if (wl->nvs == NULL)
+ if (wl->nvs == NULL) {
+ wl1271_error("NVS file is needed during boot");
return -ENODEV;
+ }
if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
struct wl1271_nvs_file *nvs =
@@ -298,7 +375,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT,
"nvs burst write 0x%x: 0x%x",
dest_addr, val);
- wl1271_write32(wl, dest_addr, val);
+ ret = wlcore_write32(wl, dest_addr, val);
+ if (ret < 0)
+ return ret;
nvs_ptr += 4;
dest_addr += 4;
@@ -324,7 +403,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
- wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ if (ret < 0)
+ return ret;
/* Copy the NVS tables to a new block to ensure alignment */
nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
@@ -332,11 +413,11 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
return -ENOMEM;
/* And finally we upload the NVS tables */
- wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS,
- nvs_aligned, nvs_len, false);
+ ret = wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, nvs_aligned, nvs_len,
+ false);
kfree(nvs_aligned);
- return 0;
+ return ret;
out_badnvs:
wl1271_error("nvs data is malformed");
@@ -350,11 +431,17 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
u32 chip_id, intr;
/* Make sure we have the boot partition */
- wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ return ret;
- wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
+ ret = wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
+ if (ret < 0)
+ return ret;
- chip_id = wlcore_read_reg(wl, REG_CHIP_ID_B);
+ ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &chip_id);
+ if (ret < 0)
+ return ret;
wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
@@ -367,7 +454,9 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
- intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
+ ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
+ if (ret < 0)
+ return ret;
if (intr == 0xffffffff) {
wl1271_error("error reading hardware complete "
@@ -376,8 +465,10 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
- wlcore_write_reg(wl, REG_INTERRUPT_ACK,
- WL1271_ACX_INTR_INIT_COMPLETE);
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
+ WL1271_ACX_INTR_INIT_COMPLETE);
+ if (ret < 0)
+ return ret;
break;
}
}
@@ -389,20 +480,25 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
}
/* get hardware config command mail box */
- wl->cmd_box_addr = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR);
+ ret = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR, &wl->cmd_box_addr);
+ if (ret < 0)
+ return ret;
wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr);
/* get hardware config event mail box */
- wl->mbox_ptr[0] = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR);
+ ret = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR, &wl->mbox_ptr[0]);
+ if (ret < 0)
+ return ret;
+
wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
wl->mbox_ptr[0], wl->mbox_ptr[1]);
- ret = wlcore_boot_fw_version(wl);
+ ret = wlcore_boot_static_data(wl);
if (ret < 0) {
- wl1271_error("couldn't boot firmware");
+ wl1271_error("error getting static data");
return ret;
}
@@ -436,9 +532,9 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
}
/* set the working partition to its "running" mode offset */
- wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
/* firmware startup completed */
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware);
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
index 094981d..a525225 100644
--- a/drivers/net/wireless/ti/wlcore/boot.h
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -40,6 +40,7 @@ struct wl1271_static_data {
u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
u32 hw_version;
u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
+ u8 priv[0];
};
/* number of times we try to read the INIT interrupt */
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 5b128a9..20e1bd9 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -36,8 +36,10 @@
#include "cmd.h"
#include "event.h"
#include "tx.h"
+#include "hw_ops.h"
#define WL1271_CMD_FAST_POLL_COUNT 50
+#define WL1271_WAIT_EVENT_FAST_POLL_COUNT 20
/*
* send command to firmware
@@ -64,17 +66,24 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
WARN_ON(len % 4 != 0);
WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
- wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
+ ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false);
+ if (ret < 0)
+ goto fail;
/*
* TODO: we just need this because one bit is in a different
* place. Is there any better way?
*/
- wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
+ ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
+ if (ret < 0)
+ goto fail;
timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
- intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
+ ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
+ if (ret < 0)
+ goto fail;
+
while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1271_error("command complete timeout");
@@ -88,13 +97,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
else
msleep(1);
- intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
+ ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
+ if (ret < 0)
+ goto fail;
}
/* read back the status code of the command */
if (res_len == 0)
res_len = sizeof(struct wl1271_cmd_header);
- wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false);
+
+ ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false);
+ if (ret < 0)
+ goto fail;
status = le16_to_cpu(cmd->status);
if (status != CMD_STATUS_SUCCESS) {
@@ -103,11 +117,14 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
goto fail;
}
- wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE);
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
+ WL1271_ACX_INTR_CMD_COMPLETE);
+ if (ret < 0)
+ goto fail;
+
return 0;
fail:
- WARN_ON(1);
wl12xx_queue_recovery_work(wl);
return ret;
}
@@ -116,35 +133,50 @@ fail:
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
-static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
+static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
+ u32 mask, bool *timeout)
{
u32 *events_vector;
u32 event;
- unsigned long timeout;
+ unsigned long timeout_time;
+ u16 poll_count = 0;
int ret = 0;
+ *timeout = false;
+
events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA);
if (!events_vector)
return -ENOMEM;
- timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+ timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
do {
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
(int)mask);
- ret = -ETIMEDOUT;
+ *timeout = true;
goto out;
}
- msleep(1);
+ poll_count++;
+ if (poll_count < WL1271_WAIT_EVENT_FAST_POLL_COUNT)
+ usleep_range(50, 51);
+ else
+ usleep_range(1000, 5000);
/* read from both event fields */
- wl1271_read(wl, wl->mbox_ptr[0], events_vector,
- sizeof(*events_vector), false);
+ ret = wlcore_read(wl, wl->mbox_ptr[0], events_vector,
+ sizeof(*events_vector), false);
+ if (ret < 0)
+ goto out;
+
event = *events_vector & mask;
- wl1271_read(wl, wl->mbox_ptr[1], events_vector,
- sizeof(*events_vector), false);
+
+ ret = wlcore_read(wl, wl->mbox_ptr[1], events_vector,
+ sizeof(*events_vector), false);
+ if (ret < 0)
+ goto out;
+
event |= *events_vector & mask;
} while (!event);
@@ -156,9 +188,10 @@ out:
static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
{
int ret;
+ bool timeout = false;
- ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
- if (ret != 0) {
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask, &timeout);
+ if (ret != 0 || timeout) {
wl12xx_queue_recovery_work(wl);
return ret;
}
@@ -291,6 +324,23 @@ static int wl12xx_get_new_session_id(struct wl1271 *wl,
return wlvif->session_counter;
}
+static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
+{
+ switch (nl_channel_type) {
+ case NL80211_CHAN_NO_HT:
+ return WLCORE_CHAN_NO_HT;
+ case NL80211_CHAN_HT20:
+ return WLCORE_CHAN_HT20;
+ case NL80211_CHAN_HT40MINUS:
+ return WLCORE_CHAN_HT40MINUS;
+ case NL80211_CHAN_HT40PLUS:
+ return WLCORE_CHAN_HT40PLUS;
+ default:
+ WARN_ON(1);
+ return WLCORE_CHAN_NO_HT;
+ }
+}
+
static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
@@ -407,6 +457,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
+ cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
@@ -446,6 +497,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
+ bool timeout = false;
if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
@@ -468,6 +520,17 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
goto out_free;
}
+ /*
+ * Sometimes the firmware doesn't send this event, so we just
+ * time out without failing. Queue recovery for other
+ * failures.
+ */
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl,
+ ROLE_STOP_COMPLETE_EVENT_ID,
+ &timeout);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
+
wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
@@ -482,6 +545,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
struct wl12xx_cmd_role_start *cmd;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ u32 supported_rates;
int ret;
wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
@@ -519,6 +583,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* FIXME: Change when adding DFS */
cmd->ap.reset_tsf = 1; /* By default reset AP TSF */
cmd->channel = wlvif->channel;
+ cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
if (!bss_conf->hidden_ssid) {
/* take the SSID from the beacon for backward compatibility */
@@ -531,7 +596,13 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
}
- cmd->ap.local_rates = cpu_to_le32(0xffffffff);
+ supported_rates = CONF_TX_AP_ENABLED_RATES | CONF_TX_MCS_RATES |
+ wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
+
+ wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x",
+ supported_rates);
+
+ cmd->ap.local_rates = cpu_to_le32(supported_rates);
switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
@@ -797,6 +868,7 @@ out:
kfree(cmd);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_cmd_data_path);
int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 ps_mode, u16 auto_ps_timeout)
@@ -953,12 +1025,14 @@ out:
int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 role_id, u8 band,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len)
+ const u8 *ie, size_t ie_len, bool sched_scan)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret;
u32 rate;
+ u16 template_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
+ u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie, ie_len);
@@ -969,14 +1043,20 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
+ if (!sched_scan &&
+ (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
+ template_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4;
+ template_id_5 = CMD_TEMPL_APP_PROBE_REQ_5;
+ }
+
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, role_id,
- CMD_TEMPL_CFG_PROBE_REQ_2_4,
+ template_id_2_4,
skb->data, skb->len, 0, rate);
else
ret = wl1271_cmd_template_set(wl, role_id,
- CMD_TEMPL_CFG_PROBE_REQ_5,
+ template_id_5,
skb->data, skb->len, 0, rate);
out:
@@ -1018,7 +1098,7 @@ out:
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- int ret, extra;
+ int ret, extra = 0;
u16 fc;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
@@ -1057,7 +1137,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* encryption space */
switch (wlvif->encryption_type) {
case KEY_TKIP:
- extra = WL1271_EXTRA_SPACE_TKIP;
+ if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
+ extra = WL1271_EXTRA_SPACE_TKIP;
break;
case KEY_AES:
extra = WL1271_EXTRA_SPACE_AES;
@@ -1346,13 +1427,18 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
- cmd->psd_type[i] = WL1271_PSD_UPSD_TRIGGER;
+ cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
+ WL1271_PSD_UPSD_TRIGGER;
else
- cmd->psd_type[i] = WL1271_PSD_LEGACY;
+ cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
+ WL1271_PSD_LEGACY;
+
sta_rates = sta->supp_rates[wlvif->band];
if (sta->ht_cap.ht_supported)
- sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
+ sta_rates |=
+ (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
+ (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
@@ -1378,6 +1464,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
{
struct wl12xx_cmd_remove_peer *cmd;
int ret;
+ bool timeout = false;
wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid);
@@ -1398,12 +1485,16 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
goto out_free;
}
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl,
+ PEER_REMOVE_COMPLETE_EVENT_ID,
+ &timeout);
/*
* We are ok with a timeout here. The event is sometimes not sent
- * due to a firmware bug.
+ * due to a firmware bug. In case of another error (like SDIO timeout)
+ * queue a recovery.
*/
- wl1271_cmd_wait_for_event_or_timeout(wl,
- PEER_REMOVE_COMPLETE_EVENT_ID);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
out_free:
kfree(cmd);
@@ -1573,19 +1664,25 @@ out:
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
{
int ret = 0;
+ bool is_first_roc;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
+ is_first_roc = (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >=
+ WL12XX_MAX_ROLES);
+
ret = wl12xx_cmd_roc(wl, wlvif, role_id);
if (ret < 0)
goto out;
- ret = wl1271_cmd_wait_for_event(wl,
- REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
- if (ret < 0) {
- wl1271_error("cmd roc event completion error");
- goto out;
+ if (is_first_roc) {
+ ret = wl1271_cmd_wait_for_event(wl,
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
+ if (ret < 0) {
+ wl1271_error("cmd roc event completion error");
+ goto out;
+ }
}
__set_bit(role_id, wl->roc_map);
@@ -1714,7 +1811,9 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
return -EINVAL;
/* flush all pending packets */
- wl1271_tx_work_locked(wl);
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0)
+ goto out;
if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
ret = wl12xx_croc(wl, wlvif->dev_role_id);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index a46ae07..4ef0b09 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -58,7 +58,7 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 role_id, u8 band,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len);
+ const u8 *ie, size_t ie_len, bool sched_scan);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct sk_buff *skb);
@@ -172,8 +172,8 @@ enum cmd_templ {
CMD_TEMPL_PS_POLL,
CMD_TEMPL_KLV,
CMD_TEMPL_DISCONNECT,
- CMD_TEMPL_PROBE_REQ_2_4, /* for firmware internal use only */
- CMD_TEMPL_PROBE_REQ_5, /* for firmware internal use only */
+ CMD_TEMPL_APP_PROBE_REQ_2_4,
+ CMD_TEMPL_APP_PROBE_REQ_5,
CMD_TEMPL_BAR, /* for firmware internal use only */
CMD_TEMPL_CTS, /*
* For CTS-to-self (FastCTS) mechanism
@@ -192,7 +192,7 @@ enum cmd_templ {
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_DFLT_SIZE 252
#define WL1271_CMD_TEMPL_MAX_SIZE 512
-#define WL1271_EVENT_TIMEOUT 750
+#define WL1271_EVENT_TIMEOUT 1500
struct wl1271_cmd_header {
__le16 id;
@@ -266,13 +266,22 @@ enum wlcore_band {
WLCORE_BAND_MAX_RADIO = 0x7F,
};
+enum wlcore_channel_type {
+ WLCORE_CHAN_NO_HT,
+ WLCORE_CHAN_HT20,
+ WLCORE_CHAN_HT40MINUS,
+ WLCORE_CHAN_HT40PLUS
+};
+
struct wl12xx_cmd_role_start {
struct wl1271_cmd_header header;
u8 role_id;
u8 band;
u8 channel;
- u8 padding;
+
+ /* enum wlcore_channel_type */
+ u8 channel_type;
union {
struct {
@@ -643,4 +652,25 @@ struct wl12xx_cmd_stop_channel_switch {
struct wl1271_cmd_header header;
} __packed;
+/* Used to check radio status after calibration */
+#define MAX_TLV_LENGTH 500
+#define TEST_CMD_P2G_CAL 2 /* TX BiP */
+
+struct wl1271_cmd_cal_p2g {
+ struct wl1271_cmd_header header;
+
+ struct wl1271_cmd_test_header test;
+
+ __le32 ver;
+ __le16 len;
+ u8 buf[MAX_TLV_LENGTH];
+ u8 type;
+ u8 padding;
+
+ __le16 radio_status;
+
+ u8 sub_band_mask;
+ u8 padding2;
+} __packed;
+
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index fef0db4..d77224f 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -45,7 +45,15 @@ enum {
CONF_HW_BIT_RATE_MCS_4 = BIT(17),
CONF_HW_BIT_RATE_MCS_5 = BIT(18),
CONF_HW_BIT_RATE_MCS_6 = BIT(19),
- CONF_HW_BIT_RATE_MCS_7 = BIT(20)
+ CONF_HW_BIT_RATE_MCS_7 = BIT(20),
+ CONF_HW_BIT_RATE_MCS_8 = BIT(21),
+ CONF_HW_BIT_RATE_MCS_9 = BIT(22),
+ CONF_HW_BIT_RATE_MCS_10 = BIT(23),
+ CONF_HW_BIT_RATE_MCS_11 = BIT(24),
+ CONF_HW_BIT_RATE_MCS_12 = BIT(25),
+ CONF_HW_BIT_RATE_MCS_13 = BIT(26),
+ CONF_HW_BIT_RATE_MCS_14 = BIT(27),
+ CONF_HW_BIT_RATE_MCS_15 = BIT(28),
};
enum {
@@ -310,7 +318,7 @@ enum {
struct conf_sg_settings {
u32 params[CONF_SG_PARAMS_MAX];
u8 state;
-};
+} __packed;
enum conf_rx_queue_type {
CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */
@@ -394,7 +402,7 @@ struct conf_rx_settings {
* Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
*/
u8 queue_type;
-};
+} __packed;
#define CONF_TX_MAX_RATE_CLASSES 10
@@ -435,6 +443,12 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \
CONF_HW_BIT_RATE_MCS_7)
+#define CONF_TX_MIMO_RATES (CONF_HW_BIT_RATE_MCS_8 | \
+ CONF_HW_BIT_RATE_MCS_9 | CONF_HW_BIT_RATE_MCS_10 | \
+ CONF_HW_BIT_RATE_MCS_11 | CONF_HW_BIT_RATE_MCS_12 | \
+ CONF_HW_BIT_RATE_MCS_13 | CONF_HW_BIT_RATE_MCS_14 | \
+ CONF_HW_BIT_RATE_MCS_15)
+
/*
* Default rates for management traffic when operating in AP mode. This
* should be configured according to the basic rate set of the AP
@@ -487,7 +501,7 @@ struct conf_tx_rate_class {
* the policy (0 - long preamble, 1 - short preamble.
*/
u8 aflags;
-};
+} __packed;
#define CONF_TX_MAX_AC_COUNT 4
@@ -504,7 +518,7 @@ enum conf_tx_ac {
CONF_TX_AC_VI = 2, /* video */
CONF_TX_AC_VO = 3, /* voice */
CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */
- CONF_TX_AC_ANY_TID = 0x1f
+ CONF_TX_AC_ANY_TID = 0xff
};
struct conf_tx_ac_category {
@@ -544,7 +558,7 @@ struct conf_tx_ac_category {
* Range: u16
*/
u16 tx_op_limit;
-};
+} __packed;
#define CONF_TX_MAX_TID_COUNT 8
@@ -578,7 +592,7 @@ struct conf_tx_tid {
u8 ps_scheme;
u8 ack_policy;
u32 apsd_conf[2];
-};
+} __packed;
struct conf_tx_settings {
/*
@@ -664,7 +678,7 @@ struct conf_tx_settings {
/* Time in ms for Tx watchdog timer to expire */
u32 tx_watchdog_timeout;
-};
+} __packed;
enum {
CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/
@@ -711,7 +725,7 @@ struct conf_bcn_filt_rule {
* Version for the vendor specifie IE (221)
*/
u8 version[CONF_BCN_IE_VER_LEN];
-};
+} __packed;
#define CONF_MAX_RSSI_SNR_TRIGGERS 8
@@ -762,7 +776,7 @@ struct conf_sig_weights {
* Range: u8
*/
u8 snr_pkt_avg_weight;
-};
+} __packed;
enum conf_bcn_filt_mode {
CONF_BCN_FILT_MODE_DISABLED = 0,
@@ -810,7 +824,7 @@ struct conf_conn_settings {
*
* Range: CONF_BCN_FILT_MODE_*
*/
- enum conf_bcn_filt_mode bcn_filt_mode;
+ u8 bcn_filt_mode;
/*
* Configure Beacon filter pass-thru rules.
@@ -937,7 +951,13 @@ struct conf_conn_settings {
* Range: u16
*/
u8 max_listen_interval;
-};
+
+ /*
+ * Default sleep authorization for a new STA interface. This determines
+ * whether we can go to ELP.
+ */
+ u8 sta_sleep_auth;
+} __packed;
enum {
CONF_REF_CLK_19_2_E,
@@ -965,6 +985,11 @@ struct conf_itrim_settings {
/* moderation timeout in microsecs from the last TX */
u32 timeout;
+} __packed;
+
+enum conf_fast_wakeup {
+ CONF_FAST_WAKEUP_ENABLE,
+ CONF_FAST_WAKEUP_DISABLE,
};
struct conf_pm_config_settings {
@@ -978,10 +1003,10 @@ struct conf_pm_config_settings {
/*
* Host fast wakeup support
*
- * Range: true, false
+ * Range: enum conf_fast_wakeup
*/
- bool host_fast_wakeup_support;
-};
+ u8 host_fast_wakeup_support;
+} __packed;
struct conf_roam_trigger_settings {
/*
@@ -1018,7 +1043,7 @@ struct conf_roam_trigger_settings {
* Range: 0 - 255
*/
u8 avg_weight_snr_data;
-};
+} __packed;
struct conf_scan_settings {
/*
@@ -1064,7 +1089,7 @@ struct conf_scan_settings {
* Range: u32 Microsecs
*/
u32 split_scan_timeout;
-};
+} __packed;
struct conf_sched_scan_settings {
/*
@@ -1102,7 +1127,7 @@ struct conf_sched_scan_settings {
/* SNR threshold to be used for filtering */
s8 snr_threshold;
-};
+} __packed;
struct conf_ht_setting {
u8 rx_ba_win_size;
@@ -1111,7 +1136,7 @@ struct conf_ht_setting {
/* bitmap of enabled TIDs for TX BA sessions */
u8 tx_ba_tid_bitmap;
-};
+} __packed;
struct conf_memory_settings {
/* Number of stations supported in IBSS mode */
@@ -1151,7 +1176,7 @@ struct conf_memory_settings {
* Range: 0-120
*/
u8 tx_min;
-};
+} __packed;
struct conf_fm_coex {
u8 enable;
@@ -1164,7 +1189,7 @@ struct conf_fm_coex {
u16 ldo_stabilization_time;
u8 fm_disturbed_band_margin;
u8 swallow_clk_diff;
-};
+} __packed;
struct conf_rx_streaming_settings {
/*
@@ -1193,7 +1218,7 @@ struct conf_rx_streaming_settings {
* enable rx streaming also when there is no coex activity
*/
u8 always;
-};
+} __packed;
struct conf_fwlog {
/* Continuous or on-demand */
@@ -1217,7 +1242,7 @@ struct conf_fwlog {
/* Regulates the frequency of log messages */
u8 threshold;
-};
+} __packed;
#define ACX_RATE_MGMT_NUM_OF_RATES 13
struct conf_rate_policy_settings {
@@ -1236,7 +1261,7 @@ struct conf_rate_policy_settings {
u8 rate_check_up;
u8 rate_check_down;
u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
-};
+} __packed;
struct conf_hangover_settings {
u32 recover_time;
@@ -1250,7 +1275,23 @@ struct conf_hangover_settings {
u8 quiet_time;
u8 increase_time;
u8 window_size;
-};
+} __packed;
+
+/*
+ * The conf version consists of 4 bytes. The two MSB are the wlcore
+ * version, the two LSB are the lower driver's private conf
+ * version.
+ */
+#define WLCORE_CONF_VERSION (0x0002 << 16)
+#define WLCORE_CONF_MASK 0xffff0000
+#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
+ sizeof(struct wlcore_conf))
+
+struct wlcore_conf_header {
+ __le32 magic;
+ __le32 version;
+ __le32 checksum;
+} __packed;
struct wlcore_conf {
struct conf_sg_settings sg;
@@ -1269,6 +1310,12 @@ struct wlcore_conf {
struct conf_fwlog fwlog;
struct conf_rate_policy_settings rate;
struct conf_hangover_settings hangover;
-};
+} __packed;
+
+struct wlcore_conf_file {
+ struct wlcore_conf_header header;
+ struct wlcore_conf core;
+ u8 priv[0];
+} __packed;
#endif
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index d5aea1f..80dbc53 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -25,6 +25,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "wlcore.h"
#include "debug.h"
@@ -32,14 +33,16 @@
#include "ps.h"
#include "io.h"
#include "tx.h"
+#include "hw_ops.h"
/* ms */
#define WL1271_DEBUGFS_STATS_LIFETIME 1000
+#define WLCORE_MAX_BLOCK_SIZE ((size_t)(4*PAGE_SIZE))
+
/* debugfs macros idea from mac80211 */
-#define DEBUGFS_FORMAT_BUFFER_SIZE 100
-static int wl1271_format_buffer(char __user *userbuf, size_t count,
- loff_t *ppos, char *fmt, ...)
+int wl1271_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...)
{
va_list args;
char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
@@ -51,59 +54,9 @@ static int wl1271_format_buffer(char __user *userbuf, size_t count,
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
+EXPORT_SYMBOL_GPL(wl1271_format_buffer);
-#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
-static ssize_t name## _read(struct file *file, char __user *userbuf, \
- size_t count, loff_t *ppos) \
-{ \
- struct wl1271 *wl = file->private_data; \
- return wl1271_format_buffer(userbuf, count, ppos, \
- fmt "\n", ##value); \
-} \
- \
-static const struct file_operations name## _ops = { \
- .read = name## _read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_ADD(name, parent) \
- entry = debugfs_create_file(#name, 0400, parent, \
- wl, &name## _ops); \
- if (!entry || IS_ERR(entry)) \
- goto err; \
-
-#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
- do { \
- entry = debugfs_create_file(#name, 0400, parent, \
- wl, &prefix## _## name## _ops); \
- if (!entry || IS_ERR(entry)) \
- goto err; \
- } while (0);
-
-#define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \
-static ssize_t sub## _ ##name## _read(struct file *file, \
- char __user *userbuf, \
- size_t count, loff_t *ppos) \
-{ \
- struct wl1271 *wl = file->private_data; \
- \
- wl1271_debugfs_update_stats(wl); \
- \
- return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
- wl->stats.fw_stats->sub.name); \
-} \
- \
-static const struct file_operations sub## _ ##name## _ops = { \
- .read = sub## _ ##name## _read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_FWSTATS_ADD(sub, name) \
- DEBUGFS_ADD(sub## _ ##name, stats)
-
-static void wl1271_debugfs_update_stats(struct wl1271 *wl)
+void wl1271_debugfs_update_stats(struct wl1271 *wl)
{
int ret;
@@ -125,97 +78,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
out:
mutex_unlock(&wl->mutex);
}
-
-DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
-
-DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
-DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
-DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
-DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
-DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
-DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
-DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
-DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
-
-DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
-DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
-DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
-DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
-
-DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
-DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
-DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
-DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
-DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
-DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
-DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
-DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
-DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
-DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
-DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
-DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
-DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
-DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
-
-DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
-DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
-/* skipping wep.reserved */
-DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
-DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
-DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
-DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
-
-DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
-/* skipping cont_miss_bcns_spread for now */
-DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
-
-DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
-DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
-
-DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
-DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
-DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
-
-DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
-DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
-DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
-DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
-DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
-
-DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
-DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
-DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
-
-DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
+EXPORT_SYMBOL_GPL(wl1271_debugfs_update_stats);
DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
DEBUGFS_READONLY_FILE(excessive_retries, "%u",
@@ -241,6 +104,89 @@ static const struct file_operations tx_queue_len_ops = {
.llseek = default_llseek,
};
+static void chip_op_handler(struct wl1271 *wl, unsigned long value,
+ void *arg)
+{
+ int ret;
+ int (*chip_op) (struct wl1271 *wl);
+
+ if (!arg) {
+ wl1271_warning("debugfs chip_op_handler with no callback");
+ return;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ return;
+
+ chip_op = arg;
+ chip_op(wl);
+
+ wl1271_ps_elp_sleep(wl);
+}
+
+
+static inline void no_write_handler(struct wl1271 *wl,
+ unsigned long value,
+ unsigned long param)
+{
+}
+
+#define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \
+ min_val, max_val, write_handler_locked, \
+ write_handler_arg) \
+ static ssize_t param##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ struct wl1271 *wl = file->private_data; \
+ return wl1271_format_buffer(user_buf, count, \
+ ppos, "%d\n", \
+ wl->conf.conf_sub_struct.param); \
+ } \
+ \
+ static ssize_t param##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ struct wl1271 *wl = file->private_data; \
+ unsigned long value; \
+ int ret; \
+ \
+ ret = kstrtoul_from_user(user_buf, count, 10, &value); \
+ if (ret < 0) { \
+ wl1271_warning("illegal value for " #param); \
+ return -EINVAL; \
+ } \
+ \
+ if (value < min_val || value > max_val) { \
+ wl1271_warning(#param " is not in valid range"); \
+ return -ERANGE; \
+ } \
+ \
+ mutex_lock(&wl->mutex); \
+ wl->conf.conf_sub_struct.param = value; \
+ \
+ write_handler_locked(wl, value, write_handler_arg); \
+ \
+ mutex_unlock(&wl->mutex); \
+ return count; \
+ } \
+ \
+ static const struct file_operations param##_ops = { \
+ .read = param##_read, \
+ .write = param##_write, \
+ .open = simple_open, \
+ .llseek = default_llseek, \
+ };
+
+WL12XX_CONF_DEBUGFS(irq_pkt_threshold, rx, 0, 65535,
+ chip_op_handler, wl1271_acx_init_rx_interrupt)
+WL12XX_CONF_DEBUGFS(irq_blk_threshold, rx, 0, 65535,
+ chip_op_handler, wl1271_acx_init_rx_interrupt)
+WL12XX_CONF_DEBUGFS(irq_timeout, rx, 0, 100,
+ chip_op_handler, wl1271_acx_init_rx_interrupt)
+
static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -535,8 +481,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_LHEX(ap_ps_map);
DRIVER_STATE_PRINT_HEX(quirks);
DRIVER_STATE_PRINT_HEX(irq);
- DRIVER_STATE_PRINT_HEX(ref_clock);
- DRIVER_STATE_PRINT_HEX(tcxo_clock);
+ /* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */
DRIVER_STATE_PRINT_HEX(hw_pg_ver);
DRIVER_STATE_PRINT_HEX(platform_quirks);
DRIVER_STATE_PRINT_HEX(chip.id);
@@ -647,7 +592,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
VIF_STATE_PRINT_INT(last_rssi_event);
VIF_STATE_PRINT_INT(ba_support);
VIF_STATE_PRINT_INT(ba_allowed);
- VIF_STATE_PRINT_INT(is_gem);
VIF_STATE_PRINT_LLHEX(tx_security_seq);
VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
}
@@ -1002,108 +946,281 @@ static const struct file_operations beacon_filtering_ops = {
.llseek = default_llseek,
};
-static int wl1271_debugfs_add_files(struct wl1271 *wl,
- struct dentry *rootdir)
+static ssize_t fw_stats_raw_read(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- int ret = 0;
- struct dentry *entry, *stats, *streaming;
+ struct wl1271 *wl = file->private_data;
- stats = debugfs_create_dir("fw-statistics", rootdir);
- if (!stats || IS_ERR(stats)) {
- entry = stats;
- goto err;
+ wl1271_debugfs_update_stats(wl);
+
+ return simple_read_from_buffer(userbuf, count, ppos,
+ wl->stats.fw_stats,
+ wl->stats.fw_stats_len);
+}
+
+static const struct file_operations fw_stats_raw_ops = {
+ .read = fw_stats_raw_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t sleep_auth_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+
+ return wl1271_format_buffer(user_buf, count,
+ ppos, "%d\n",
+ wl->sleep_auth);
+}
+
+static ssize_t sleep_auth_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in sleep_auth");
+ return -EINVAL;
+ }
+
+ if (value < 0 || value > WL1271_PSM_MAX) {
+ wl1271_warning("sleep_auth must be between 0 and %d",
+ WL1271_PSM_MAX);
+ return -ERANGE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.conn.sta_sleep_auth = value;
+
+ if (wl->state == WL1271_STATE_OFF) {
+ /* this will show up on "read" in case we are off */
+ wl->sleep_auth = value;
+ goto out;
}
- DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
-
- DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
- DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
- DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
- DEBUGFS_FWSTATS_ADD(rx, dropped);
- DEBUGFS_FWSTATS_ADD(rx, fcs_err);
- DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
- DEBUGFS_FWSTATS_ADD(rx, path_reset);
- DEBUGFS_FWSTATS_ADD(rx, reset_counter);
-
- DEBUGFS_FWSTATS_ADD(dma, rx_requested);
- DEBUGFS_FWSTATS_ADD(dma, rx_errors);
- DEBUGFS_FWSTATS_ADD(dma, tx_requested);
- DEBUGFS_FWSTATS_ADD(dma, tx_errors);
-
- DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
- DEBUGFS_FWSTATS_ADD(isr, fiqs);
- DEBUGFS_FWSTATS_ADD(isr, rx_headers);
- DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
- DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
- DEBUGFS_FWSTATS_ADD(isr, irqs);
- DEBUGFS_FWSTATS_ADD(isr, tx_procs);
- DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
- DEBUGFS_FWSTATS_ADD(isr, dma0_done);
- DEBUGFS_FWSTATS_ADD(isr, dma1_done);
- DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
- DEBUGFS_FWSTATS_ADD(isr, commands);
- DEBUGFS_FWSTATS_ADD(isr, rx_procs);
- DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
- DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
- DEBUGFS_FWSTATS_ADD(isr, pci_pm);
- DEBUGFS_FWSTATS_ADD(isr, wakeups);
- DEBUGFS_FWSTATS_ADD(isr, low_rssi);
-
- DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
- DEBUGFS_FWSTATS_ADD(wep, default_key_count);
- /* skipping wep.reserved */
- DEBUGFS_FWSTATS_ADD(wep, key_not_found);
- DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
- DEBUGFS_FWSTATS_ADD(wep, packets);
- DEBUGFS_FWSTATS_ADD(wep, interrupt);
-
- DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
- DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
- DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
- DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
- DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
- DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
- DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
- DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
- DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
- DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
- DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
- DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
- /* skipping cont_miss_bcns_spread for now */
- DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
-
- DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
- DEBUGFS_FWSTATS_ADD(mic, calc_failure);
-
- DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
- DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
- DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
-
- DEBUGFS_FWSTATS_ADD(event, heart_beat);
- DEBUGFS_FWSTATS_ADD(event, calibration);
- DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
- DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
- DEBUGFS_FWSTATS_ADD(event, rx_pool);
- DEBUGFS_FWSTATS_ADD(event, oom_late);
- DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
- DEBUGFS_FWSTATS_ADD(event, tx_stuck);
-
- DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
- DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
- DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
- DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
- DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
- DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
- DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
-
- DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
- DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_acx_sleep_auth(wl, value);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations sleep_auth_ops = {
+ .read = sleep_auth_read,
+ .write = sleep_auth_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t dev_mem_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wlcore_partition_set part, old_part;
+ size_t bytes = count;
+ int ret;
+ char *buf;
+
+ /* only requests of dword-aligned size and offset are supported */
+ if (bytes % 4)
+ return -EINVAL;
+
+ if (*ppos % 4)
+ return -EINVAL;
+
+ /* function should return in reasonable time */
+ bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
+
+ if (bytes == 0)
+ return -EINVAL;
+
+ memset(&part, 0, sizeof(part));
+ part.mem.start = file->f_pos;
+ part.mem.size = bytes;
+
+ buf = kmalloc(bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EFAULT;
+ goto skip_read;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto skip_read;
+
+ /* store current partition and switch partition */
+ memcpy(&old_part, &wl->curr_part, sizeof(old_part));
+ ret = wlcore_set_partition(wl, &part);
+ if (ret < 0)
+ goto part_err;
+
+ ret = wlcore_raw_read(wl, 0, buf, bytes, false);
+ if (ret < 0)
+ goto read_err;
+
+read_err:
+ /* recover partition */
+ ret = wlcore_set_partition(wl, &old_part);
+ if (ret < 0)
+ goto part_err;
+
+part_err:
+ wl1271_ps_elp_sleep(wl);
+
+skip_read:
+ mutex_unlock(&wl->mutex);
+
+ if (ret == 0) {
+ ret = copy_to_user(user_buf, buf, bytes);
+ if (ret < bytes) {
+ bytes -= ret;
+ *ppos += bytes;
+ ret = 0;
+ } else {
+ ret = -EFAULT;
+ }
+ }
+
+ kfree(buf);
+
+ return ((ret == 0) ? bytes : ret);
+}
+
+static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wlcore_partition_set part, old_part;
+ size_t bytes = count;
+ int ret;
+ char *buf;
+
+ /* only requests of dword-aligned size and offset are supported */
+ if (bytes % 4)
+ return -EINVAL;
+
+ if (*ppos % 4)
+ return -EINVAL;
+
+ /* function should return in reasonable time */
+ bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
+
+ if (bytes == 0)
+ return -EINVAL;
+
+ memset(&part, 0, sizeof(part));
+ part.mem.start = file->f_pos;
+ part.mem.size = bytes;
+
+ buf = kmalloc(bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = copy_from_user(buf, user_buf, bytes);
+ if (ret) {
+ ret = -EFAULT;
+ goto err_out;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EFAULT;
+ goto skip_write;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto skip_write;
+
+ /* store current partition and switch partition */
+ memcpy(&old_part, &wl->curr_part, sizeof(old_part));
+ ret = wlcore_set_partition(wl, &part);
+ if (ret < 0)
+ goto part_err;
+
+ ret = wlcore_raw_write(wl, 0, buf, bytes, false);
+ if (ret < 0)
+ goto write_err;
+
+write_err:
+ /* recover partition */
+ ret = wlcore_set_partition(wl, &old_part);
+ if (ret < 0)
+ goto part_err;
+
+part_err:
+ wl1271_ps_elp_sleep(wl);
+
+skip_write:
+ mutex_unlock(&wl->mutex);
+
+ if (ret == 0)
+ *ppos += bytes;
+
+err_out:
+ kfree(buf);
+
+ return ((ret == 0) ? bytes : ret);
+}
+
+static loff_t dev_mem_seek(struct file *file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ /* only requests of dword-aligned size and offset are supported */
+ if (offset % 4)
+ return -EINVAL;
+
+ switch (orig) {
+ case SEEK_SET:
+ file->f_pos = offset;
+ ret = file->f_pos;
+ break;
+ case SEEK_CUR:
+ file->f_pos += offset;
+ ret = file->f_pos;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct file_operations dev_mem_ops = {
+ .open = simple_open,
+ .read = dev_mem_read,
+ .write = dev_mem_write,
+ .llseek = dev_mem_seek,
+};
+
+static int wl1271_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
+{
+ int ret = 0;
+ struct dentry *entry, *streaming;
DEBUGFS_ADD(tx_queue_len, rootdir);
DEBUGFS_ADD(retry_count, rootdir);
@@ -1120,6 +1237,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(dynamic_ps_timeout, rootdir);
DEBUGFS_ADD(forced_ps, rootdir);
DEBUGFS_ADD(split_scan_timeout, rootdir);
+ DEBUGFS_ADD(irq_pkt_threshold, rootdir);
+ DEBUGFS_ADD(irq_blk_threshold, rootdir);
+ DEBUGFS_ADD(irq_timeout, rootdir);
+ DEBUGFS_ADD(fw_stats_raw, rootdir);
+ DEBUGFS_ADD(sleep_auth, rootdir);
streaming = debugfs_create_dir("rx_streaming", rootdir);
if (!streaming || IS_ERR(streaming))
@@ -1128,6 +1250,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming);
DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming);
+ DEBUGFS_ADD_PREFIX(dev, mem, rootdir);
return 0;
@@ -1145,7 +1268,7 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
if (!wl->stats.fw_stats)
return;
- memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
+ memset(wl->stats.fw_stats, 0, wl->stats.fw_stats_len);
wl->stats.retry_count = 0;
wl->stats.excessive_retries = 0;
}
@@ -1160,34 +1283,34 @@ int wl1271_debugfs_init(struct wl1271 *wl)
if (IS_ERR(rootdir)) {
ret = PTR_ERR(rootdir);
- goto err;
+ goto out;
}
- wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
- GFP_KERNEL);
-
+ wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL);
if (!wl->stats.fw_stats) {
ret = -ENOMEM;
- goto err_fw;
+ goto out_remove;
}
wl->stats.fw_stats_update = jiffies;
ret = wl1271_debugfs_add_files(wl, rootdir);
+ if (ret < 0)
+ goto out_exit;
+ ret = wlcore_debugfs_init(wl, rootdir);
if (ret < 0)
- goto err_file;
+ goto out_exit;
- return 0;
+ goto out;
-err_file:
- kfree(wl->stats.fw_stats);
- wl->stats.fw_stats = NULL;
+out_exit:
+ wl1271_debugfs_exit(wl);
-err_fw:
+out_remove:
debugfs_remove_recursive(rootdir);
-err:
+out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index a8d3aef..f7381dd 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -26,8 +26,95 @@
#include "wlcore.h"
+int wl1271_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...);
+
int wl1271_debugfs_init(struct wl1271 *wl);
void wl1271_debugfs_exit(struct wl1271 *wl);
void wl1271_debugfs_reset(struct wl1271 *wl);
+void wl1271_debugfs_update_stats(struct wl1271 *wl);
+
+#define DEBUGFS_FORMAT_BUFFER_SIZE 256
+
+#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
+static ssize_t name## _read(struct file *file, char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl1271 *wl = file->private_data; \
+ return wl1271_format_buffer(userbuf, count, ppos, \
+ fmt "\n", ##value); \
+} \
+ \
+static const struct file_operations name## _ops = { \
+ .read = name## _read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_ADD(name, parent) \
+ do { \
+ entry = debugfs_create_file(#name, 0400, parent, \
+ wl, &name## _ops); \
+ if (!entry || IS_ERR(entry)) \
+ goto err; \
+ } while (0);
+
+
+#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
+ do { \
+ entry = debugfs_create_file(#name, 0400, parent, \
+ wl, &prefix## _## name## _ops); \
+ if (!entry || IS_ERR(entry)) \
+ goto err; \
+ } while (0);
+
+#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
+static ssize_t sub## _ ##name## _read(struct file *file, \
+ char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl1271 *wl = file->private_data; \
+ struct struct_type *stats = wl->stats.fw_stats; \
+ \
+ wl1271_debugfs_update_stats(wl); \
+ \
+ return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
+ stats->sub.name); \
+} \
+ \
+static const struct file_operations sub## _ ##name## _ops = { \
+ .read = sub## _ ##name## _read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_FWSTATS_FILE_ARRAY(sub, name, len, struct_type) \
+static ssize_t sub## _ ##name## _read(struct file *file, \
+ char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl1271 *wl = file->private_data; \
+ struct struct_type *stats = wl->stats.fw_stats; \
+ char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \
+ int res, i; \
+ \
+ wl1271_debugfs_update_stats(wl); \
+ \
+ for (i = 0; i < len; i++) \
+ res = snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
+ buf, i, stats->sub.name[i]); \
+ \
+ return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
+} \
+ \
+static const struct file_operations sub## _ ##name## _ops = { \
+ .read = sub## _ ##name## _read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_FWSTATS_ADD(sub, name) \
+ DEBUGFS_ADD(sub## _ ##name, stats)
+
#endif /* WL1271_DEBUGFS_H */
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 28e2a63..4890705 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -105,6 +105,7 @@ static int wl1271_event_process(struct wl1271 *wl)
u32 vector;
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
+ int ret;
wl1271_event_mbox_dump(mbox);
@@ -148,15 +149,33 @@ static int wl1271_event_process(struct wl1271 *wl)
int delay = wl->conf.conn.synch_fail_thold *
wl->conf.conn.bss_lose_timeout;
wl1271_info("Beacon loss detected.");
- cancel_delayed_work_sync(&wl->connection_loss_work);
+
+ /*
+ * if the work is already queued, it should take place. We
+ * don't want to delay the connection loss indication
+ * any more.
+ */
ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
- msecs_to_jiffies(delay));
+ msecs_to_jiffies(delay));
+
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
+ GFP_KERNEL);
+ }
}
if (vector & REGAINED_BSS_EVENT_ID) {
/* TODO: check for multi-role */
wl1271_info("Beacon regained.");
- cancel_delayed_work_sync(&wl->connection_loss_work);
+ cancel_delayed_work(&wl->connection_loss_work);
+
+ /* sanity check - we can't lose and gain the beacon together */
+ WARN(vector & BSS_LOSE_EVENT_ID,
+ "Concurrent beacon loss and gain from FW");
}
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
@@ -210,7 +229,9 @@ static int wl1271_event_process(struct wl1271 *wl)
if ((vector & DUMMY_PACKET_EVENT_ID)) {
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
- wl1271_tx_dummy_packet(wl);
+ ret = wl1271_tx_dummy_packet(wl);
+ if (ret < 0)
+ return ret;
}
/*
@@ -283,8 +304,10 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
return -EINVAL;
/* first we read the mbox descriptor */
- wl1271_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
- sizeof(*wl->mbox), false);
+ ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
+ sizeof(*wl->mbox), false);
+ if (ret < 0)
+ return ret;
/* process the descriptor */
ret = wl1271_event_process(wl);
@@ -295,7 +318,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
* TODO: we just need this because one bit is in a different
* place. Is there any better way?
*/
- wl->ops->ack_event(wl);
+ ret = wl->ops->ack_event(wl);
- return 0;
+ return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 9384b4d..2673d78 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -65,11 +65,13 @@ wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
return wl->ops->get_rx_buf_align(wl, rx_desc);
}
-static inline void
+static inline int
wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
{
if (wl->ops->prepare_read)
- wl->ops->prepare_read(wl, rx_desc, len);
+ return wl->ops->prepare_read(wl, rx_desc, len);
+
+ return 0;
}
static inline u32
@@ -81,10 +83,12 @@ wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len)
return wl->ops->get_rx_packet_len(wl, rx_data, data_len);
}
-static inline void wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
+static inline int wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
{
if (wl->ops->tx_delayed_compl)
- wl->ops->tx_delayed_compl(wl);
+ return wl->ops->tx_delayed_compl(wl);
+
+ return 0;
}
static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl)
@@ -119,4 +123,82 @@ static inline int wlcore_identify_fw(struct wl1271 *wl)
return 0;
}
+static inline void
+wlcore_hw_set_tx_desc_csum(struct wl1271 *wl,
+ struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb)
+{
+ if (!wl->ops->set_tx_desc_csum)
+ BUG_ON(1);
+
+ wl->ops->set_tx_desc_csum(wl, desc, skb);
+}
+
+static inline void
+wlcore_hw_set_rx_csum(struct wl1271 *wl,
+ struct wl1271_rx_descriptor *desc,
+ struct sk_buff *skb)
+{
+ if (wl->ops->set_rx_csum)
+ wl->ops->set_rx_csum(wl, desc, skb);
+}
+
+static inline u32
+wlcore_hw_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ if (wl->ops->ap_get_mimo_wide_rate_mask)
+ return wl->ops->ap_get_mimo_wide_rate_mask(wl, wlvif);
+
+ return 0;
+}
+
+static inline int
+wlcore_debugfs_init(struct wl1271 *wl, struct dentry *rootdir)
+{
+ if (wl->ops->debugfs_init)
+ return wl->ops->debugfs_init(wl, rootdir);
+
+ return 0;
+}
+
+static inline int
+wlcore_handle_static_data(struct wl1271 *wl, void *static_data)
+{
+ if (wl->ops->handle_static_data)
+ return wl->ops->handle_static_data(wl, static_data);
+
+ return 0;
+}
+
+static inline int
+wlcore_hw_get_spare_blocks(struct wl1271 *wl, bool is_gem)
+{
+ if (!wl->ops->get_spare_blocks)
+ BUG_ON(1);
+
+ return wl->ops->get_spare_blocks(wl, is_gem);
+}
+
+static inline int
+wlcore_hw_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ if (!wl->ops->set_key)
+ BUG_ON(1);
+
+ return wl->ops->set_key(wl, cmd, vif, sta, key_conf);
+}
+
+static inline u32
+wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
+{
+ if (wl->ops->pre_pkt_send)
+ return wl->ops->pre_pkt_send(wl, buf_offset, last_len);
+
+ return buf_offset;
+}
+
#endif
diff --git a/drivers/net/wireless/ti/wlcore/ini.h b/drivers/net/wireless/ti/wlcore/ini.h
index 4cf9ecc..d24fe3b 100644
--- a/drivers/net/wireless/ti/wlcore/ini.h
+++ b/drivers/net/wireless/ti/wlcore/ini.h
@@ -172,7 +172,19 @@ struct wl128x_ini_fem_params_5 {
/* NVS data structure */
#define WL1271_INI_NVS_SECTION_SIZE 468
-#define WL1271_INI_FEM_MODULE_COUNT 2
+
+/* We have four FEM module types: 0-RFMD, 1-TQS, 2-SKW, 3-TQS_HP */
+#define WL1271_INI_FEM_MODULE_COUNT 4
+
+/*
+ * In NVS we only store two FEM module entries -
+ * FEM modules 0,2,3 are stored in entry 0
+ * FEM module 1 is stored in entry 1
+ */
+#define WL12XX_NVS_FEM_MODULE_COUNT 2
+
+#define WL12XX_FEM_TO_NVS_ENTRY(ini_fem_module) \
+ ((ini_fem_module) == 1 ? 1 : 0)
#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800
@@ -188,13 +200,13 @@ struct wl1271_nvs_file {
struct {
struct wl1271_ini_fem_params_2 params;
u8 padding;
- } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
+ } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT];
struct wl1271_ini_band_params_5 stat_radio_params_5;
u8 padding3;
struct {
struct wl1271_ini_fem_params_5 params;
u8 padding;
- } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
+ } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT];
} __packed;
struct wl128x_nvs_file {
@@ -209,12 +221,12 @@ struct wl128x_nvs_file {
struct {
struct wl128x_ini_fem_params_2 params;
u8 padding;
- } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
+ } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT];
struct wl128x_ini_band_params_5 stat_radio_params_5;
u8 padding3;
struct {
struct wl128x_ini_fem_params_5 params;
u8 padding;
- } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
+ } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT];
} __packed;
#endif
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 9f89255..a3c8677 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -54,6 +54,22 @@ int wl1271_init_templates_config(struct wl1271 *wl)
if (ret < 0)
return ret;
+ if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) {
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_APP_PROBE_REQ_2_4, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_APP_PROBE_REQ_5, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+ }
+
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_NULL_DATA, NULL,
sizeof(struct wl12xx_null_data_template),
@@ -460,6 +476,9 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* unconditionally enable HT rates */
supported_rates |= CONF_TX_MCS_RATES;
+ /* get extra MIMO or wide-chan rates where the HW supports it */
+ supported_rates |= wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
+
/* configure unicast TX rate classes */
for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
rc.enabled_rates = supported_rates;
@@ -551,29 +570,28 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret, i;
- /*
- * consider all existing roles before configuring psm.
- * TODO: reconfigure on interface removal.
- */
- if (!wl->ap_count) {
- if (is_ap) {
- /* Configure for power always on */
+ /* consider all existing roles before configuring psm. */
+
+ if (wl->ap_count == 0 && is_ap) { /* first AP */
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+ /* first STA, no APs */
+ } else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) {
+ u8 sta_auth = wl->conf.conn.sta_sleep_auth;
+ /* Configure for power according to debugfs */
+ if (sta_auth != WL1271_PSM_ILLEGAL)
+ ret = wl1271_acx_sleep_auth(wl, sta_auth);
+ /* Configure for power always on */
+ else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- return ret;
- } else if (!wl->sta_count) {
- if (wl->quirks & WLCORE_QUIRK_NO_ELP) {
- /* Configure for power always on */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- return ret;
- } else {
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- return ret;
- }
- }
+ /* Configure for ELP power saving */
+ else
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+
+ if (ret < 0)
+ return ret;
}
/* Mode specific init */
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 7cd0081..68e74ee 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -48,12 +48,24 @@ void wlcore_disable_interrupts(struct wl1271 *wl)
}
EXPORT_SYMBOL_GPL(wlcore_disable_interrupts);
+void wlcore_disable_interrupts_nosync(struct wl1271 *wl)
+{
+ disable_irq_nosync(wl->irq);
+}
+EXPORT_SYMBOL_GPL(wlcore_disable_interrupts_nosync);
+
void wlcore_enable_interrupts(struct wl1271 *wl)
{
enable_irq(wl->irq);
}
EXPORT_SYMBOL_GPL(wlcore_enable_interrupts);
+void wlcore_synchronize_interrupts(struct wl1271 *wl)
+{
+ synchronize_irq(wl->irq);
+}
+EXPORT_SYMBOL_GPL(wlcore_synchronize_interrupts);
+
int wlcore_translate_addr(struct wl1271 *wl, int addr)
{
struct wlcore_partition_set *part = &wl->curr_part;
@@ -122,9 +134,11 @@ EXPORT_SYMBOL_GPL(wlcore_translate_addr);
* | |
*
*/
-void wlcore_set_partition(struct wl1271 *wl,
- const struct wlcore_partition_set *p)
+int wlcore_set_partition(struct wl1271 *wl,
+ const struct wlcore_partition_set *p)
{
+ int ret;
+
/* copy partition info */
memcpy(&wl->curr_part, p, sizeof(*p));
@@ -137,28 +151,41 @@ void wlcore_set_partition(struct wl1271 *wl,
wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X",
p->mem3.start, p->mem3.size);
- wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
- wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
- wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
- wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
- wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
- wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+ ret = wlcore_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+ if (ret < 0)
+ goto out;
+
/*
* We don't need the size of the last partition, as it is
* automatically calculated based on the total memory size and
* the sizes of the previous partitions.
*/
- wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
-}
-EXPORT_SYMBOL_GPL(wlcore_set_partition);
-
-void wlcore_select_partition(struct wl1271 *wl, u8 part)
-{
- wl1271_debug(DEBUG_IO, "setting partition %d", part);
+ ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
- wlcore_set_partition(wl, &wl->ptable[part]);
+out:
+ return ret;
}
-EXPORT_SYMBOL_GPL(wlcore_select_partition);
+EXPORT_SYMBOL_GPL(wlcore_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 8942954..259149f 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -45,86 +45,122 @@
struct wl1271;
void wlcore_disable_interrupts(struct wl1271 *wl);
+void wlcore_disable_interrupts_nosync(struct wl1271 *wl);
void wlcore_enable_interrupts(struct wl1271 *wl);
+void wlcore_synchronize_interrupts(struct wl1271 *wl);
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
int wlcore_translate_addr(struct wl1271 *wl, int addr);
/* Raw target IO, address is not translated */
-static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
+ void *buf, size_t len,
+ bool fixed)
{
- wl->if_ops->write(wl->dev, addr, buf, len, fixed);
+ int ret;
+
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ return -EIO;
+
+ ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
+ if (ret && wl->state != WL1271_STATE_OFF)
+ set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
+
+ return ret;
}
-static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
+ void *buf, size_t len,
+ bool fixed)
{
- wl->if_ops->read(wl->dev, addr, buf, len, fixed);
+ int ret;
+
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ return -EIO;
+
+ ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
+ if (ret && wl->state != WL1271_STATE_OFF)
+ set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
+
+ return ret;
}
-static inline void wlcore_raw_read_data(struct wl1271 *wl, int reg, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_raw_read_data(struct wl1271 *wl, int reg,
+ void *buf, size_t len,
+ bool fixed)
{
- wl1271_raw_read(wl, wl->rtable[reg], buf, len, fixed);
+ return wlcore_raw_read(wl, wl->rtable[reg], buf, len, fixed);
}
-static inline void wlcore_raw_write_data(struct wl1271 *wl, int reg, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_raw_write_data(struct wl1271 *wl, int reg,
+ void *buf, size_t len,
+ bool fixed)
{
- wl1271_raw_write(wl, wl->rtable[reg], buf, len, fixed);
+ return wlcore_raw_write(wl, wl->rtable[reg], buf, len, fixed);
}
-static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
+static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
+ u32 *val)
{
- wl1271_raw_read(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
+ int ret;
+
+ ret = wlcore_raw_read(wl, addr, &wl->buffer_32,
+ sizeof(wl->buffer_32), false);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ *val = le32_to_cpu(wl->buffer_32);
- return le32_to_cpu(wl->buffer_32);
+ return 0;
}
-static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
+static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr,
+ u32 val)
{
wl->buffer_32 = cpu_to_le32(val);
- wl1271_raw_write(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
+ return wlcore_raw_write(wl, addr, &wl->buffer_32,
+ sizeof(wl->buffer_32), false);
}
-static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_read(struct wl1271 *wl, int addr,
+ void *buf, size_t len, bool fixed)
{
int physical;
physical = wlcore_translate_addr(wl, addr);
- wl1271_raw_read(wl, physical, buf, len, fixed);
+ return wlcore_raw_read(wl, physical, buf, len, fixed);
}
-static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_write(struct wl1271 *wl, int addr,
+ void *buf, size_t len, bool fixed)
{
int physical;
physical = wlcore_translate_addr(wl, addr);
- wl1271_raw_write(wl, physical, buf, len, fixed);
+ return wlcore_raw_write(wl, physical, buf, len, fixed);
}
-static inline void wlcore_write_data(struct wl1271 *wl, int reg, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_write_data(struct wl1271 *wl, int reg,
+ void *buf, size_t len,
+ bool fixed)
{
- wl1271_write(wl, wl->rtable[reg], buf, len, fixed);
+ return wlcore_write(wl, wl->rtable[reg], buf, len, fixed);
}
-static inline void wlcore_read_data(struct wl1271 *wl, int reg, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_read_data(struct wl1271 *wl, int reg,
+ void *buf, size_t len,
+ bool fixed)
{
- wl1271_read(wl, wl->rtable[reg], buf, len, fixed);
+ return wlcore_read(wl, wl->rtable[reg], buf, len, fixed);
}
-static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
- void *buf, size_t len, bool fixed)
+static inline int __must_check wlcore_read_hwaddr(struct wl1271 *wl, int hwaddr,
+ void *buf, size_t len,
+ bool fixed)
{
int physical;
int addr;
@@ -134,34 +170,47 @@ static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
physical = wlcore_translate_addr(wl, addr);
- wl1271_raw_read(wl, physical, buf, len, fixed);
+ return wlcore_raw_read(wl, physical, buf, len, fixed);
}
-static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
+static inline int __must_check wlcore_read32(struct wl1271 *wl, int addr,
+ u32 *val)
{
- return wl1271_raw_read32(wl, wlcore_translate_addr(wl, addr));
+ return wlcore_raw_read32(wl, wlcore_translate_addr(wl, addr), val);
}
-static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+static inline int __must_check wlcore_write32(struct wl1271 *wl, int addr,
+ u32 val)
{
- wl1271_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
+ return wlcore_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
}
-static inline u32 wlcore_read_reg(struct wl1271 *wl, int reg)
+static inline int __must_check wlcore_read_reg(struct wl1271 *wl, int reg,
+ u32 *val)
{
- return wl1271_raw_read32(wl,
- wlcore_translate_addr(wl, wl->rtable[reg]));
+ return wlcore_raw_read32(wl,
+ wlcore_translate_addr(wl, wl->rtable[reg]),
+ val);
}
-static inline void wlcore_write_reg(struct wl1271 *wl, int reg, u32 val)
+static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
+ u32 val)
{
- wl1271_raw_write32(wl, wlcore_translate_addr(wl, wl->rtable[reg]), val);
+ return wlcore_raw_write32(wl,
+ wlcore_translate_addr(wl, wl->rtable[reg]),
+ val);
}
static inline void wl1271_power_off(struct wl1271 *wl)
{
- wl->if_ops->power(wl->dev, false);
- clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ int ret;
+
+ if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
+ return;
+
+ ret = wl->if_ops->power(wl->dev, false);
+ if (!ret)
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static inline int wl1271_power_on(struct wl1271 *wl)
@@ -173,8 +222,8 @@ static inline int wl1271_power_on(struct wl1271 *wl)
return ret;
}
-void wlcore_set_partition(struct wl1271 *wl,
- const struct wlcore_partition_set *p);
+int wlcore_set_partition(struct wl1271 *wl,
+ const struct wlcore_partition_set *p);
bool wl1271_set_block_size(struct wl1271 *wl);
@@ -182,6 +231,4 @@ bool wl1271_set_block_size(struct wl1271 *wl);
int wl1271_tx_dummy_packet(struct wl1271 *wl);
-void wlcore_select_partition(struct wl1271 *wl, u8 part);
-
#endif
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index acef933..7254860 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -62,7 +62,7 @@ static bool no_recovery;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
struct ieee80211_vif *vif,
bool reset_tx_queues);
-static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wlcore_op_stop_locked(struct wl1271 *wl);
static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
static int wl12xx_set_authorized(struct wl1271 *wl,
@@ -320,46 +320,6 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
}
}
-static int wl1271_plt_init(struct wl1271 *wl)
-{
- int ret;
-
- ret = wl->ops->hw_init(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_init_mem_config(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Enable data path */
- ret = wl1271_cmd_data_path(wl, 1);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Configure for CAM power saving (ie. always active) */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- goto out_free_memmap;
-
- /* configure PM */
- ret = wl1271_acx_pm_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- return 0;
-
- out_free_memmap:
- kfree(wl->target_mem_map);
- wl->target_mem_map = NULL;
-
- return ret;
-}
-
static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid, u8 tx_pkts)
@@ -387,7 +347,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
- struct wl_fw_status *status)
+ struct wl_fw_status_2 *status)
{
struct wl1271_link *lnk;
u32 cur_fw_ps_map;
@@ -418,8 +378,9 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
}
}
-static void wl12xx_fw_status(struct wl1271 *wl,
- struct wl_fw_status *status)
+static int wlcore_fw_status(struct wl1271 *wl,
+ struct wl_fw_status_1 *status_1,
+ struct wl_fw_status_2 *status_2)
{
struct wl12xx_vif *wlvif;
struct timespec ts;
@@ -427,38 +388,42 @@ static void wl12xx_fw_status(struct wl1271 *wl,
int avail, freed_blocks;
int i;
size_t status_len;
+ int ret;
- status_len = sizeof(*status) + wl->fw_status_priv_len;
+ status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
+ sizeof(*status_2) + wl->fw_status_priv_len;
- wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status,
- status_len, false);
+ ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
+ status_len, false);
+ if (ret < 0)
+ return ret;
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
- status->intr,
- status->fw_rx_counter,
- status->drv_rx_counter,
- status->tx_results_counter);
+ status_1->intr,
+ status_1->fw_rx_counter,
+ status_1->drv_rx_counter,
+ status_1->tx_results_counter);
for (i = 0; i < NUM_TX_QUEUES; i++) {
/* prevent wrap-around in freed-packets counter */
wl->tx_allocated_pkts[i] -=
- (status->counters.tx_released_pkts[i] -
+ (status_2->counters.tx_released_pkts[i] -
wl->tx_pkts_freed[i]) & 0xff;
- wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
+ wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
}
/* prevent wrap-around in total blocks counter */
if (likely(wl->tx_blocks_freed <=
- le32_to_cpu(status->total_released_blks)))
- freed_blocks = le32_to_cpu(status->total_released_blks) -
+ le32_to_cpu(status_2->total_released_blks)))
+ freed_blocks = le32_to_cpu(status_2->total_released_blks) -
wl->tx_blocks_freed;
else
freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
- le32_to_cpu(status->total_released_blks);
+ le32_to_cpu(status_2->total_released_blks);
- wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
+ wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
wl->tx_allocated_blocks -= freed_blocks;
@@ -474,7 +439,7 @@ static void wl12xx_fw_status(struct wl1271 *wl,
cancel_delayed_work(&wl->tx_watchdog_work);
}
- avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
+ avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
/*
* The FW might change the total number of TX memblocks before
@@ -493,13 +458,15 @@ static void wl12xx_fw_status(struct wl1271 *wl,
/* for AP update num of allocated TX blocks per link and ps status */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
- wl12xx_irq_update_links_status(wl, wlvif, status);
+ wl12xx_irq_update_links_status(wl, wlvif, status_2);
}
/* update the host-chipset time offset */
getnstimeofday(&ts);
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
- (s64)le32_to_cpu(status->fw_localtime);
+ (s64)le32_to_cpu(status_2->fw_localtime);
+
+ return 0;
}
static void wl1271_flush_deferred_work(struct wl1271 *wl)
@@ -527,20 +494,15 @@ static void wl1271_netstack_work(struct work_struct *work)
#define WL1271_IRQ_MAX_LOOPS 256
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static int wlcore_irq_locked(struct wl1271 *wl)
{
- int ret;
+ int ret = 0;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
- struct wl1271 *wl = (struct wl1271 *)cookie;
bool done = false;
unsigned int defer_count;
unsigned long flags;
- /* TX might be handled here, avoid redundant work */
- set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
- cancel_work_sync(&wl->tx_work);
-
/*
* In case edge triggered interrupt must be used, we cannot iterate
* more than once without introducing race conditions with the hardirq.
@@ -548,8 +510,6 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
loopcount = 1;
- mutex_lock(&wl->mutex);
-
wl1271_debug(DEBUG_IRQ, "IRQ work");
if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -568,21 +528,33 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
- wl12xx_fw_status(wl, wl->fw_status);
+ ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ if (ret < 0)
+ goto out;
wlcore_hw_tx_immediate_compl(wl);
- intr = le32_to_cpu(wl->fw_status->intr);
- intr &= WL1271_INTR_MASK;
+ intr = le32_to_cpu(wl->fw_status_1->intr);
+ intr &= WLCORE_ALL_INTR_MASK;
if (!intr) {
done = true;
continue;
}
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
- wl1271_error("watchdog interrupt received! "
+ wl1271_error("HW watchdog interrupt received! starting recovery.");
+ wl->watchdog_recovery = true;
+ ret = -EIO;
+
+ /* restarting the chip. ignore any other interrupt. */
+ goto out;
+ }
+
+ if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
+ wl1271_error("SW watchdog interrupt received! "
"starting recovery.");
- wl12xx_queue_recovery_work(wl);
+ wl->watchdog_recovery = true;
+ ret = -EIO;
/* restarting the chip. ignore any other interrupt. */
goto out;
@@ -591,7 +563,9 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- wl12xx_rx(wl, wl->fw_status);
+ ret = wlcore_rx(wl, wl->fw_status_1);
+ if (ret < 0)
+ goto out;
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -602,13 +576,17 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
- wl1271_tx_work_locked(wl);
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0)
+ goto out;
} else {
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/* check for tx results */
- wlcore_hw_tx_delayed_compl(wl);
+ ret = wlcore_hw_tx_delayed_compl(wl);
+ if (ret < 0)
+ goto out;
/* Make sure the deferred queues don't get too long */
defer_count = skb_queue_len(&wl->deferred_tx_queue) +
@@ -619,12 +597,16 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
if (intr & WL1271_ACX_INTR_EVENT_A) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0);
+ ret = wl1271_event_handle(wl, 0);
+ if (ret < 0)
+ goto out;
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1);
+ ret = wl1271_event_handle(wl, 1);
+ if (ret < 0)
+ goto out;
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -638,6 +620,25 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
wl1271_ps_elp_sleep(wl);
out:
+ return ret;
+}
+
+static irqreturn_t wlcore_irq(int irq, void *cookie)
+{
+ int ret;
+ unsigned long flags;
+ struct wl1271 *wl = cookie;
+
+ /* TX might be handled here, avoid redundant work */
+ set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+ cancel_work_sync(&wl->tx_work);
+
+ mutex_lock(&wl->mutex);
+
+ ret = wlcore_irq_locked(wl);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
+
spin_lock_irqsave(&wl->wl_lock, flags);
/* In case TX was not handled here, queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
@@ -743,7 +744,7 @@ out:
return ret;
}
-static int wl1271_fetch_nvs(struct wl1271 *wl)
+static void wl1271_fetch_nvs(struct wl1271 *wl)
{
const struct firmware *fw;
int ret;
@@ -751,16 +752,15 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
if (ret < 0) {
- wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
- ret);
- return ret;
+ wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
+ WL12XX_NVS_NAME, ret);
+ return;
}
wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
- ret = -ENOMEM;
goto out;
}
@@ -768,14 +768,17 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
out:
release_firmware(fw);
-
- return ret;
}
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
- if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+ WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
+
+ /* Avoid a recursive recovery */
+ if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
+ wlcore_disable_interrupts_nosync(wl);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ }
}
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
@@ -801,14 +804,17 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
return len;
}
+#define WLCORE_FW_LOG_END 0x2000000
+
static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
u32 addr;
- u32 first_addr;
+ u32 offset;
+ u32 end_of_log;
u8 *block;
+ int ret;
if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
- (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
(wl->conf.fwlog.mem_blocks == 0))
return;
@@ -820,34 +826,49 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
/*
* Make sure the chip is awake and the logger isn't active.
- * This might fail if the firmware hanged.
+ * Do not send a stop fwlog command if the fw is hanged.
*/
- if (!wl1271_ps_elp_wakeup(wl))
+ if (wl1271_ps_elp_wakeup(wl))
+ goto out;
+ if (!wl->watchdog_recovery)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
- wl12xx_fw_status(wl, wl->fw_status);
- first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
- if (!first_addr)
+ ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ if (ret < 0)
goto out;
+ addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
+ if (!addr)
+ goto out;
+
+ if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
+ offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
+ end_of_log = WLCORE_FW_LOG_END;
+ } else {
+ offset = sizeof(addr);
+ end_of_log = addr;
+ }
+
/* Traverse the memory blocks linked list */
- addr = first_addr;
do {
memset(block, 0, WL12XX_HW_BLOCK_SIZE);
- wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
- false);
+ ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
+ false);
+ if (ret < 0)
+ goto out;
/*
* Memory blocks are linked to one another. The first 4 bytes
* of each memory block hold the hardware address of the next
- * one. The last memory block points to the first one.
+ * one. The last memory block points to the first one in
+ * on demand mode and is equal to 0x2000000 in continuous mode.
*/
addr = le32_to_cpup((__le32 *)block);
- if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
- WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
+ if (!wl12xx_copy_fwlog(wl, block + offset,
+ WL12XX_HW_BLOCK_SIZE - offset))
break;
- } while (addr && (addr != first_addr));
+ } while (addr && (addr != end_of_log));
wake_up_interruptible(&wl->fwlog_waitq);
@@ -855,6 +876,34 @@ out:
kfree(block);
}
+static void wlcore_print_recovery(struct wl1271 *wl)
+{
+ u32 pc = 0;
+ u32 hint_sts = 0;
+ int ret;
+
+ wl1271_info("Hardware recovery in progress. FW ver: %s",
+ wl->chip.fw_ver_str);
+
+ /* change partitions momentarily so we can read the FW pc */
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ return;
+
+ ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
+ if (ret < 0)
+ return;
+
+ ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
+ if (ret < 0)
+ return;
+
+ wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
+
+ wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+}
+
+
static void wl1271_recovery_work(struct work_struct *work)
{
struct wl1271 *wl =
@@ -867,26 +916,19 @@ static void wl1271_recovery_work(struct work_struct *work)
if (wl->state != WL1271_STATE_ON || wl->plt)
goto out_unlock;
- /* Avoid a recursive recovery */
- set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
-
- wl12xx_read_fwlog_panic(wl);
-
- wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
- wl->chip.fw_ver_str,
- wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
+ if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
+ wl12xx_read_fwlog_panic(wl);
+ wlcore_print_recovery(wl);
+ }
BUG_ON(bug_on_recovery &&
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
if (no_recovery) {
wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
- clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
goto out_unlock;
}
- BUG_ON(bug_on_recovery);
-
/*
* Advance security sequence number to overcome potential progress
* in the firmware during recovery. This doens't hurt if the network is
@@ -900,7 +942,7 @@ static void wl1271_recovery_work(struct work_struct *work)
}
/* Prevent spurious TX during FW restart */
- ieee80211_stop_queues(wl->hw);
+ wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
if (wl->sched_scanning) {
ieee80211_sched_scan_stopped(wl->hw);
@@ -914,10 +956,8 @@ static void wl1271_recovery_work(struct work_struct *work)
vif = wl12xx_wlvif_to_vif(wlvif);
__wl1271_op_remove_interface(wl, vif, false);
}
- mutex_unlock(&wl->mutex);
- wl1271_op_stop(wl->hw);
- clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
+ wlcore_op_stop_locked(wl);
ieee80211_restart_hw(wl->hw);
@@ -925,26 +965,34 @@ static void wl1271_recovery_work(struct work_struct *work)
* Its safe to enable TX now - the queues are stopped after a request
* to restart the HW.
*/
- ieee80211_wake_queues(wl->hw);
- return;
+ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
+
out_unlock:
+ wl->watchdog_recovery = false;
+ clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
mutex_unlock(&wl->mutex);
}
-static void wl1271_fw_wakeup(struct wl1271 *wl)
+static int wlcore_fw_wakeup(struct wl1271 *wl)
{
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
}
static int wl1271_setup(struct wl1271 *wl)
{
- wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL);
- if (!wl->fw_status)
+ wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
+ sizeof(*wl->fw_status_2) +
+ wl->fw_status_priv_len, GFP_KERNEL);
+ if (!wl->fw_status_1)
return -ENOMEM;
+ wl->fw_status_2 = (struct wl_fw_status_2 *)
+ (((u8 *) wl->fw_status_1) +
+ WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
+
wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
if (!wl->tx_res_if) {
- kfree(wl->fw_status);
+ kfree(wl->fw_status_1);
return -ENOMEM;
}
@@ -963,13 +1011,21 @@ static int wl12xx_set_power_on(struct wl1271 *wl)
wl1271_io_reset(wl);
wl1271_io_init(wl);
- wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ goto fail;
/* ELP module wake up */
- wl1271_fw_wakeup(wl);
+ ret = wlcore_fw_wakeup(wl);
+ if (ret < 0)
+ goto fail;
out:
return ret;
+
+fail:
+ wl1271_power_off(wl);
+ return ret;
}
static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
@@ -987,13 +1043,12 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
* simplify the code and since the performance impact is
* negligible, we use the same block size for all different
* chip types.
+ *
+ * Check if the bus supports blocksize alignment and, if it
+ * doesn't, make sure we don't have the quirk.
*/
- if (wl1271_set_block_size(wl))
- wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
-
- ret = wl->ops->identify_chip(wl);
- if (ret < 0)
- goto out;
+ if (!wl1271_set_block_size(wl))
+ wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
/* TODO: make sure the lower driver has set things up correctly */
@@ -1005,21 +1060,21 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
if (ret < 0)
goto out;
- /* No NVS from netlink, try to get it from the filesystem */
- if (wl->nvs == NULL) {
- ret = wl1271_fetch_nvs(wl);
- if (ret < 0)
- goto out;
- }
-
out:
return ret;
}
-int wl1271_plt_start(struct wl1271 *wl)
+int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
{
int retries = WL1271_BOOT_RETRIES;
struct wiphy *wiphy = wl->hw->wiphy;
+
+ static const char* const PLT_MODE[] = {
+ "PLT_OFF",
+ "PLT_ON",
+ "PLT_FEM_DETECT"
+ };
+
int ret;
mutex_lock(&wl->mutex);
@@ -1033,23 +1088,23 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
+ /* Indicate to lower levels that we are now in PLT mode */
+ wl->plt = true;
+ wl->plt_mode = plt_mode;
+
while (retries) {
retries--;
ret = wl12xx_chip_wakeup(wl, true);
if (ret < 0)
goto power_off;
- ret = wl->ops->boot(wl);
+ ret = wl->ops->plt_init(wl);
if (ret < 0)
goto power_off;
- ret = wl1271_plt_init(wl);
- if (ret < 0)
- goto irq_disable;
-
- wl->plt = true;
wl->state = WL1271_STATE_ON;
- wl1271_notice("firmware booted in PLT mode (%s)",
+ wl1271_notice("firmware booted in PLT mode %s (%s)",
+ PLT_MODE[plt_mode],
wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
@@ -1059,23 +1114,13 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
-irq_disable:
- mutex_unlock(&wl->mutex);
- /* Unlocking the mutex in the middle of handling is
- inherently unsafe. In this case we deem it safe to do,
- because we need to let any possibly pending IRQ out of
- the system (and while we are WL1271_STATE_OFF the IRQ
- work function will not do anything.) Also, any other
- possible concurrent operations will fail due to the
- current state, hence the wl1271 struct should be safe. */
- wlcore_disable_interrupts(wl);
- wl1271_flush_deferred_work(wl);
- cancel_work_sync(&wl->netstack_work);
- mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
}
+ wl->plt = false;
+ wl->plt_mode = PLT_OFF;
+
wl1271_error("firmware boot in PLT mode failed despite %d retries",
WL1271_BOOT_RETRIES);
out:
@@ -1125,8 +1170,10 @@ int wl1271_plt_stop(struct wl1271 *wl)
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
wl->flags = 0;
+ wl->sleep_auth = WL1271_PSM_ILLEGAL;
wl->state = WL1271_STATE_OFF;
wl->plt = false;
+ wl->plt_mode = PLT_OFF;
wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
@@ -1154,9 +1201,16 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
spin_lock_irqsave(&wl->wl_lock, flags);
- /* queue the packet */
+ /*
+ * drop the packet if the link is invalid or the queue is stopped
+ * for any reason but watermark. Watermark is a "soft"-stop so we
+ * allow these packets through.
+ */
if (hlid == WL12XX_INVALID_LINK_ID ||
- (wlvif && !test_bit(hlid, wlvif->links_map))) {
+ (wlvif && !test_bit(hlid, wlvif->links_map)) ||
+ (wlcore_is_queue_stopped(wl, q) &&
+ !wlcore_is_queue_stopped_by_reason(wl, q,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
ieee80211_free_txskb(hw, skb);
goto out;
@@ -1172,10 +1226,12 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
+ !wlcore_is_queue_stopped_by_reason(wl, q,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
- ieee80211_stop_queue(wl->hw, mapping);
- set_bit(q, &wl->stopped_queues_map);
+ wlcore_stop_queue_locked(wl, q,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
/*
@@ -1209,7 +1265,7 @@ int wl1271_tx_dummy_packet(struct wl1271 *wl)
/* The FW is low on RX memory blocks, so send the dummy packet asap */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
- wl1271_tx_work_locked(wl);
+ return wlcore_tx_work_locked(wl);
/*
* If the FW TX is busy, TX work will be scheduled by the threaded
@@ -1476,8 +1532,15 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
int i, ret;
if (!wow || wow->any || !wow->n_patterns) {
- wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
- wl1271_rx_filter_clear_all(wl);
+ ret = wl1271_acx_default_rx_filter_enable(wl, 0,
+ FILTER_SIGNAL);
+ if (ret)
+ goto out;
+
+ ret = wl1271_rx_filter_clear_all(wl);
+ if (ret)
+ goto out;
+
return 0;
}
@@ -1493,8 +1556,13 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
}
}
- wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
- wl1271_rx_filter_clear_all(wl);
+ ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
+ if (ret)
+ goto out;
+
+ ret = wl1271_rx_filter_clear_all(wl);
+ if (ret)
+ goto out;
/* Translate WoWLAN patterns into filters */
for (i = 0; i < wow->n_patterns; i++) {
@@ -1532,11 +1600,20 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
+ if ((wl->conf.conn.suspend_wake_up_event ==
+ wl->conf.conn.wake_up_event) &&
+ (wl->conf.conn.suspend_listen_interval ==
+ wl->conf.conn.listen_interval))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- wl1271_configure_wowlan(wl, wow);
+ ret = wl1271_configure_wowlan(wl, wow);
+ if (ret < 0)
+ goto out_sleep;
+
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.suspend_wake_up_event,
wl->conf.conn.suspend_listen_interval);
@@ -1544,8 +1621,8 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (ret < 0)
wl1271_error("suspend: set wake up conditions failed: %d", ret);
+out_sleep:
wl1271_ps_elp_sleep(wl);
-
out:
return ret;
@@ -1592,6 +1669,13 @@ static void wl1271_configure_resume(struct wl1271 *wl,
if ((!is_ap) && (!is_sta))
return;
+ if (is_sta &&
+ ((wl->conf.conn.suspend_wake_up_event ==
+ wl->conf.conn.wake_up_event) &&
+ (wl->conf.conn.suspend_listen_interval ==
+ wl->conf.conn.listen_interval)))
+ return;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
return;
@@ -1624,6 +1708,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow);
+ /* we want to perform the recovery before suspending */
+ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
+ wl1271_warning("postponing suspend to perform recovery");
+ return -EBUSY;
+ }
+
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
@@ -1664,7 +1754,8 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
unsigned long flags;
- bool run_irq_work = false;
+ bool run_irq_work = false, pending_recovery;
+ int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
wl->wow_enabled);
@@ -1680,17 +1771,37 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
run_irq_work = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
+ mutex_lock(&wl->mutex);
+
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+
if (run_irq_work) {
wl1271_debug(DEBUG_MAC80211,
"run postponed irq_work directly");
- wl1271_irq(0, wl);
+
+ /* don't talk to the HW if recovery is pending */
+ if (!pending_recovery) {
+ ret = wlcore_irq_locked(wl);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
+ }
+
wlcore_enable_interrupts(wl);
}
- mutex_lock(&wl->mutex);
+ if (pending_recovery) {
+ wl1271_warning("queuing forgotten recovery on resume");
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ goto out;
+ }
+
wl12xx_for_each_wlvif(wl, wlvif) {
wl1271_configure_resume(wl, wlvif);
}
+
+out:
wl->wow_enabled = false;
mutex_unlock(&wl->mutex);
@@ -1716,29 +1827,15 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
return 0;
}
-static void wl1271_op_stop(struct ieee80211_hw *hw)
+static void wlcore_op_stop_locked(struct wl1271 *wl)
{
- struct wl1271 *wl = hw->priv;
int i;
- wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
-
- /*
- * Interrupts must be disabled before setting the state to OFF.
- * Otherwise, the interrupt handler might be called and exit without
- * reading the interrupt status.
- */
- wlcore_disable_interrupts(wl);
- mutex_lock(&wl->mutex);
if (wl->state == WL1271_STATE_OFF) {
- mutex_unlock(&wl->mutex);
+ if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags))
+ wlcore_enable_interrupts(wl);
- /*
- * This will not necessarily enable interrupts as interrupts
- * may have been disabled when op_stop was called. It will,
- * however, balance the above call to disable_interrupts().
- */
- wlcore_enable_interrupts(wl);
return;
}
@@ -1747,8 +1844,16 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
* functions don't perform further work.
*/
wl->state = WL1271_STATE_OFF;
+
+ /*
+ * Use the nosync variant to disable interrupts, so the mutex could be
+ * held while doing so without deadlocking.
+ */
+ wlcore_disable_interrupts_nosync(wl);
+
mutex_unlock(&wl->mutex);
+ wlcore_synchronize_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
@@ -1758,15 +1863,23 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&wl->connection_loss_work);
/* let's notify MAC80211 about the remaining pending TX frames */
- wl12xx_tx_reset(wl, true);
+ wl12xx_tx_reset(wl);
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
+ /*
+ * In case a recovery was scheduled, interrupts were disabled to avoid
+ * an interrupt storm. Now that the power is down, it is safe to
+ * re-enable interrupts to balance the disable depth
+ */
+ if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+ wlcore_enable_interrupts(wl);
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+ wl->channel_type = NL80211_CHAN_NO_HT;
wl->tx_blocks_available = 0;
wl->tx_allocated_blocks = 0;
wl->tx_results_count = 0;
@@ -1775,6 +1888,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->ap_fw_ps_map = 0;
wl->ap_ps_map = 0;
wl->sched_scanning = false;
+ wl->sleep_auth = WL1271_PSM_ILLEGAL;
memset(wl->roles_map, 0, sizeof(wl->roles_map));
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
@@ -1799,12 +1913,24 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl1271_debugfs_reset(wl);
- kfree(wl->fw_status);
- wl->fw_status = NULL;
+ kfree(wl->fw_status_1);
+ wl->fw_status_1 = NULL;
+ wl->fw_status_2 = NULL;
kfree(wl->tx_res_if);
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
+}
+
+static void wlcore_op_stop(struct ieee80211_hw *hw)
+{
+ struct wl1271 *wl = hw->priv;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+ mutex_lock(&wl->mutex);
+
+ wlcore_op_stop_locked(wl);
mutex_unlock(&wl->mutex);
}
@@ -1894,6 +2020,9 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+ wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
} else {
/* init ap data */
wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
@@ -1903,13 +2032,19 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
wl12xx_allocate_rate_policy(wl,
&wlvif->ap.ucast_rate_idx[i]);
+ wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
+ /*
+ * TODO: check if basic_rate shouldn't be
+ * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ * instead (the same thing for STA above).
+ */
+ wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
+ /* TODO: this seems to be used only for STA, check it */
+ wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
}
wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
- wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
- wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
- wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
/*
@@ -1919,6 +2054,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlvif->band = wl->band;
wlvif->channel = wl->channel;
wlvif->power_level = wl->power_level;
+ wlvif->channel_type = wl->channel_type;
INIT_WORK(&wlvif->rx_streaming_enable_work,
wl1271_rx_streaming_enable_work);
@@ -2170,6 +2306,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int i, ret;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
@@ -2250,11 +2387,33 @@ deinit:
wlvif->role_id = WL12XX_INVALID_ROLE_ID;
wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
- if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ if (is_ap)
wl->ap_count--;
else
wl->sta_count--;
+ /*
+ * Last AP, have more stations. Configure sleep auth according to STA.
+ * Don't do thin on unintended recovery.
+ */
+ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
+ !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
+ goto unlock;
+
+ if (wl->ap_count == 0 && is_ap && wl->sta_count) {
+ u8 sta_auth = wl->conf.conn.sta_sleep_auth;
+ /* Configure for power according to debugfs */
+ if (sta_auth != WL1271_PSM_ILLEGAL)
+ wl1271_acx_sleep_auth(wl, sta_auth);
+ /* Configure for power always on */
+ else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
+ wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ /* Configure for ELP power saving */
+ else
+ wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ }
+
+unlock:
mutex_unlock(&wl->mutex);
del_timer_sync(&wlvif->rx_streaming_timer);
@@ -2444,7 +2603,7 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
} else {
/* The current firmware only supports sched_scan in idle */
if (wl->sched_scanning) {
- wl1271_scan_sched_scan_stop(wl);
+ wl1271_scan_sched_scan_stop(wl, wlvif);
ieee80211_sched_scan_stopped(wl->hw);
}
@@ -2469,13 +2628,24 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
/* if the channel changes while joined, join again */
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
((wlvif->band != conf->channel->band) ||
- (wlvif->channel != channel))) {
+ (wlvif->channel != channel) ||
+ (wlvif->channel_type != conf->channel_type))) {
/* send all pending packets */
- wl1271_tx_work_locked(wl);
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0)
+ return ret;
+
wlvif->band = conf->channel->band;
wlvif->channel = channel;
+ wlvif->channel_type = conf->channel_type;
- if (!is_ap) {
+ if (is_ap) {
+ wl1271_set_band_rate(wl, wlvif);
+ ret = wl1271_init_ap_rates(wl, wlvif);
+ if (ret < 0)
+ wl1271_error("AP rate policy change failed %d",
+ ret);
+ } else {
/*
* FIXME: the mac80211 should really provide a fixed
* rate to use here. for now, just use the smallest
@@ -2583,8 +2753,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* frames, such as the deauth. To make sure those frames reach the air,
* wait here until the TX queue is fully flushed.
*/
- if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (conf->flags & IEEE80211_CONF_IDLE))
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
+ ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (conf->flags & IEEE80211_CONF_IDLE)))
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
@@ -2593,6 +2764,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
wl->band = conf->channel->band;
wl->channel = channel;
+ wl->channel_type = conf->channel_type;
}
if (changed & IEEE80211_CONF_CHANGE_POWER)
@@ -2825,17 +2997,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int ret;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
- /*
- * A role set to GEM cipher requires different Tx settings (namely
- * spare blocks). Note when we are in this mode so the HW can adjust.
- */
- if (key_type == KEY_GEM) {
- if (action == KEY_ADD_OR_REPLACE)
- wlvif->is_gem = true;
- else if (action == KEY_REMOVE)
- wlvif->is_gem = false;
- }
-
if (is_ap) {
struct wl1271_station *wl_sta;
u8 hlid;
@@ -2913,12 +3074,21 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return 0;
}
-static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
+
+ return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
+}
+
+int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u32 tx_seq_32 = 0;
@@ -3029,6 +3199,7 @@ out_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(wlcore_set_key);
static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -3167,6 +3338,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
@@ -3180,7 +3352,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- wl1271_scan_sched_scan_stop(wl);
+ wl1271_scan_sched_scan_stop(wl, wlvif);
wl1271_ps_elp_sleep(wl);
out:
@@ -3316,8 +3488,15 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
skb->data,
skb->len, 0,
rates);
-
dev_kfree_skb(skb);
+
+ if (ret < 0)
+ goto out;
+
+ wl1271_debug(DEBUG_AP, "probe response updated");
+ set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+
+out:
return ret;
}
@@ -3422,6 +3601,87 @@ out:
return ret;
}
+static int wlcore_set_beacon_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ bool is_ap)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct ieee80211_hdr *hdr;
+ u32 min_rate;
+ int ret;
+ int ieoffset = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+ u16 tmpl_id;
+
+ if (!beacon) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ wl1271_debug(DEBUG_MASTER, "beacon updated");
+
+ ret = wl1271_ssid_set(vif, beacon, ieoffset);
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+ min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
+ CMD_TEMPL_BEACON;
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ min_rate);
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+
+ /*
+ * In case we already have a probe-resp beacon set explicitly
+ * by usermode, don't use the beacon data.
+ */
+ if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+ goto end_bcn;
+
+ /* remove TIM ie from probe response */
+ wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
+
+ /*
+ * remove p2p ie from probe response.
+ * the fw reponds to probe requests that don't include
+ * the p2p ie. probe requests with p2p ie will be passed,
+ * and will be responded by the supplicant (the spec
+ * forbids including the p2p ie when responding to probe
+ * requests that didn't include it).
+ */
+ wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P, ieoffset);
+
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+ if (is_ap)
+ ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
+ beacon->data,
+ beacon->len,
+ min_rate);
+ else
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_PROBE_RESPONSE,
+ beacon->data,
+ beacon->len, 0,
+ min_rate);
+end_bcn:
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
+}
+
static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -3440,81 +3700,12 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
- wl1271_debug(DEBUG_AP, "probe response updated");
- set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
- }
+
+ wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
}
if ((changed & BSS_CHANGED_BEACON)) {
- struct ieee80211_hdr *hdr;
- u32 min_rate;
- int ieoffset = offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
- struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
- u16 tmpl_id;
-
- if (!beacon) {
- ret = -EINVAL;
- goto out;
- }
-
- wl1271_debug(DEBUG_MASTER, "beacon updated");
-
- ret = wl1271_ssid_set(vif, beacon, ieoffset);
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out;
- }
- min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
- CMD_TEMPL_BEACON;
- ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
- beacon->data,
- beacon->len, 0,
- min_rate);
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out;
- }
-
- /*
- * In case we already have a probe-resp beacon set explicitly
- * by usermode, don't use the beacon data.
- */
- if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
- goto end_bcn;
-
- /* remove TIM ie from probe response */
- wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
-
- /*
- * remove p2p ie from probe response.
- * the fw reponds to probe requests that don't include
- * the p2p ie. probe requests with p2p ie will be passed,
- * and will be responded by the supplicant (the spec
- * forbids including the p2p ie when responding to probe
- * requests that didn't include it).
- */
- wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
- WLAN_OUI_TYPE_WFA_P2P, ieoffset);
-
- hdr = (struct ieee80211_hdr *) beacon->data;
- hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_RESP);
- if (is_ap)
- ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
- beacon->data,
- beacon->len,
- min_rate);
- else
- ret = wl1271_cmd_template_set(wl, wlvif->role_id,
- CMD_TEMPL_PROBE_RESPONSE,
- beacon->data,
- beacon->len, 0,
- min_rate);
-end_bcn:
- dev_kfree_skb(beacon);
+ ret = wlcore_set_beacon_template(wl, vif, is_ap);
if (ret < 0)
goto out;
}
@@ -3551,6 +3742,14 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
ret = wl1271_ap_init_templates(wl, vif);
if (ret < 0)
goto out;
+
+ ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_set_beacon_template(wl, vif, true);
+ if (ret < 0)
+ goto out;
}
ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
@@ -3691,7 +3890,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
if (sta->ht_cap.ht_supported)
sta_rate_set |=
- (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
+ (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
+ (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
sta_ht_cap = sta->ht_cap;
sta_exists = true;
@@ -3704,13 +3904,11 @@ sta_not_found:
u32 rates;
int ieoffset;
wlvif->aid = bss_conf->aid;
+ wlvif->channel_type = bss_conf->channel_type;
wlvif->beacon_int = bss_conf->beacon_int;
do_join = true;
set_assoc = true;
- /* Cancel connection_loss_work */
- cancel_delayed_work_sync(&wl->connection_loss_work);
-
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
@@ -3960,6 +4158,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
(int)changed);
+ /*
+ * make sure to cancel pending disconnections if our association
+ * state changed
+ */
+ if (!is_ap && (changed & BSS_CHANGED_ASSOC))
+ cancel_delayed_work_sync(&wl->connection_loss_work);
+
+ if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
+ !bss_conf->enable_beacon)
+ wl1271_tx_flush(wl);
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -4068,16 +4277,13 @@ out:
static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct wl1271 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->channel;
- survey->filled = SURVEY_INFO_NOISE_DBM;
- survey->noise = wl->noise;
-
+ survey->filled = 0;
return 0;
}
@@ -4343,9 +4549,14 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
if (!(*ba_bitmap & BIT(tid))) {
- ret = -EINVAL;
- wl1271_error("no active RX BA session on tid: %d",
+ /*
+ * this happens on reconfig - so only output a debug
+ * message for now, and don't fail the function.
+ */
+ wl1271_debug(DEBUG_MAC80211,
+ "no active RX BA session on tid: %d",
tid);
+ ret = 0;
break;
}
@@ -4394,7 +4605,7 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ for (i = 0; i < WLCORE_NUM_BANDS; i++)
wlvif->bitrate_masks[i] =
wl1271_tx_enabled_rates_get(wl,
mask->control[i].legacy,
@@ -4462,6 +4673,13 @@ out:
mutex_unlock(&wl->mutex);
}
+static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct wl1271 *wl = hw->priv;
+
+ wl1271_tx_flush(wl);
+}
+
static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
@@ -4624,7 +4842,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
static const struct ieee80211_ops wl1271_ops = {
.start = wl1271_op_start,
- .stop = wl1271_op_stop,
+ .stop = wlcore_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
.change_interface = wl12xx_op_change_interface,
@@ -4636,7 +4854,7 @@ static const struct ieee80211_ops wl1271_ops = {
.prepare_multicast = wl1271_op_prepare_multicast,
.configure_filter = wl1271_op_configure_filter,
.tx = wl1271_op_tx,
- .set_key = wl1271_op_set_key,
+ .set_key = wlcore_op_set_key,
.hw_scan = wl1271_op_hw_scan,
.cancel_hw_scan = wl1271_op_cancel_hw_scan,
.sched_scan_start = wl1271_op_sched_scan_start,
@@ -4652,6 +4870,7 @@ static const struct ieee80211_ops wl1271_ops = {
.tx_frames_pending = wl1271_tx_frames_pending,
.set_bitrate_mask = wl12xx_set_bitrate_mask,
.channel_switch = wl12xx_op_channel_switch,
+ .flush = wlcore_op_flush,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -4882,18 +5101,22 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
if (ret < 0)
goto out;
- wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
+ ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
+ if (ret < 0)
+ goto out;
wl->fuse_oui_addr = 0;
wl->fuse_nic_addr = 0;
- wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
+ ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
+ if (ret < 0)
+ goto out;
if (wl->ops->get_mac)
- wl->ops->get_mac(wl);
+ ret = wl->ops->get_mac(wl);
- wl1271_power_off(wl);
out:
+ wl1271_power_off(wl);
return ret;
}
@@ -4905,14 +5128,8 @@ static int wl1271_register_hw(struct wl1271 *wl)
if (wl->mac80211_registered)
return 0;
- ret = wl12xx_get_hw_info(wl);
- if (ret < 0) {
- wl1271_error("couldn't get hw info");
- goto out;
- }
-
- ret = wl1271_fetch_nvs(wl);
- if (ret == 0) {
+ wl1271_fetch_nvs(wl);
+ if (wl->nvs != NULL) {
/* NOTE: The wl->nvs->nvs element must be first, in
* order to simplify the casting, we assume it is at
* the beginning of the wl->nvs structure.
@@ -4960,6 +5177,29 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
}
+static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wlcore_iface_combinations[] = {
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = wlcore_iface_limits,
+ .n_limits = ARRAY_SIZE(wlcore_iface_limits),
+ },
+};
+
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
static const u32 cipher_suites[] = {
@@ -4970,9 +5210,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
WL1271_CIPHER_SUITE_GEM,
};
- /* The tx descriptor buffer and the TKIP space. */
- wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP +
- sizeof(struct wl1271_tx_hw_descr);
+ /* The tx descriptor buffer */
+ wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
+
+ if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
+ wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
/* unit us */
/* FIXME: find a proper value */
@@ -5025,12 +5267,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
*/
memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
sizeof(wl1271_band_2ghz));
- memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap,
- sizeof(wl->ht_cap));
+ memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
+ &wl->ht_cap[IEEE80211_BAND_2GHZ],
+ sizeof(*wl->ht_cap));
memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
sizeof(wl1271_band_5ghz));
- memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap,
- sizeof(wl->ht_cap));
+ memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
+ &wl->ht_cap[IEEE80211_BAND_5GHZ],
+ sizeof(*wl->ht_cap));
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&wl->bands[IEEE80211_BAND_2GHZ];
@@ -5049,6 +5293,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+ /* allowed interface combinations */
+ wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
+ wl->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(wlcore_iface_combinations);
+
SET_IEEE80211_DEV(wl->hw, wl->dev);
wl->hw->sta_data_size = sizeof(struct wl1271_station);
@@ -5117,8 +5366,10 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->band = IEEE80211_BAND_2GHZ;
+ wl->channel_type = NL80211_CHAN_NO_HT;
wl->flags = 0;
wl->sg_enabled = true;
+ wl->sleep_auth = WL1271_PSM_ILLEGAL;
wl->hw_pg_ver = -1;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
@@ -5142,6 +5393,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
wl->state = WL1271_STATE_OFF;
wl->fw_type = WL12XX_FW_TYPE_NONE;
mutex_init(&wl->mutex);
+ mutex_init(&wl->flush_mutex);
order = get_order(WL1271_AGGR_BUFFER_SIZE);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -5222,7 +5474,7 @@ int wlcore_free_hw(struct wl1271 *wl)
kfree(wl->nvs);
wl->nvs = NULL;
- kfree(wl->fw_status);
+ kfree(wl->fw_status_1);
kfree(wl->tx_res_if);
destroy_workqueue(wl->freezable_wq);
@@ -5279,8 +5531,6 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
wlcore_adjust_conf(wl);
wl->irq = platform_get_irq(pdev, 0);
- wl->ref_clock = pdata->board_ref_clock;
- wl->tcxo_clock = pdata->board_tcxo_clock;
wl->platform_quirks = pdata->platform_quirks;
wl->set_power = pdata->set_power;
wl->dev = &pdev->dev;
@@ -5293,7 +5543,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
else
irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
- ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+ ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
irqflags,
pdev->name, wl);
if (ret < 0) {
@@ -5301,6 +5551,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
goto out_free_hw;
}
+#ifdef CONFIG_PM
ret = enable_irq_wake(wl->irq);
if (!ret) {
wl->irq_wake_enabled = true;
@@ -5314,8 +5565,19 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
WL1271_RX_FILTER_MAX_PATTERN_SIZE;
}
}
+#endif
disable_irq(wl->irq);
+ ret = wl12xx_get_hw_info(wl);
+ if (ret < 0) {
+ wl1271_error("couldn't get hw info");
+ goto out_irq;
+ }
+
+ ret = wl->ops->identify_chip(wl);
+ if (ret < 0)
+ goto out_irq;
+
ret = wl1271_init_ieee80211(wl);
if (ret)
goto out_irq;
@@ -5328,7 +5590,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
if (ret < 0) {
wl1271_error("failed to create sysfs file bt_coex_state");
- goto out_irq;
+ goto out_unreg;
}
/* Create sysfs file to get HW PG version */
@@ -5353,6 +5615,9 @@ out_hw_pg_ver:
out_bt_coex_state:
device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+out_unreg:
+ wl1271_unregister_hw(wl);
+
out_irq:
free_irq(wl->irq, wl);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 756eee2..46d36fd 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -28,11 +28,14 @@
#define WL1271_WAKEUP_TIMEOUT 500
+#define ELP_ENTRY_DELAY 5
+
void wl1271_elp_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
struct wl12xx_vif *wlvif;
+ int ret;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, elp_work);
@@ -61,7 +64,12 @@ void wl1271_elp_work(struct work_struct *work)
}
wl1271_debug(DEBUG_PSM, "chip to elp");
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
+
set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
out:
@@ -72,8 +80,9 @@ out:
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
struct wl12xx_vif *wlvif;
+ u32 timeout;
- if (wl->quirks & WLCORE_QUIRK_NO_ELP)
+ if (wl->sleep_auth != WL1271_PSM_ELP)
return;
/* we shouldn't get consecutive sleep requests */
@@ -89,8 +98,13 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
return;
}
+ if (wl->conf.conn.forced_ps)
+ timeout = ELP_ENTRY_DELAY;
+ else
+ timeout = wl->conf.conn.dynamic_ps_timeout;
+
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
- msecs_to_jiffies(wl->conf.conn.dynamic_ps_timeout));
+ msecs_to_jiffies(timeout));
}
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
@@ -127,7 +141,11 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
wl->elp_compl = &compl;
spin_unlock_irqrestore(&wl->wl_lock, flags);
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto err;
+ }
if (!pending) {
ret = wait_for_completion_timeout(
@@ -185,8 +203,12 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
- /* enable beacon early termination. Not relevant for 5GHz */
- if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ /*
+ * enable beacon early termination.
+ * Not relevant for 5GHz and for high rates.
+ */
+ if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+ (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, true);
if (ret < 0)
return ret;
@@ -196,7 +218,8 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+ (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 1f1d948..f55e2f9 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -127,7 +127,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
}
if (rx_align == WLCORE_RX_BUF_UNALIGNED)
- reserved = NET_IP_ALIGN;
+ reserved = RX_BUF_ALIGN;
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) data;
@@ -175,7 +175,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
*/
memcpy(buf, data + sizeof(*desc), pkt_data_len);
if (rx_align == WLCORE_RX_BUF_PADDED)
- skb_pull(skb, NET_IP_ALIGN);
+ skb_pull(skb, RX_BUF_ALIGN);
*hlid = desc->hlid;
@@ -186,6 +186,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
is_data = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+ wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
@@ -199,17 +200,18 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
return is_data;
}
-void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
{
unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
u32 buf_size;
- u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
- u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
+ u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
+ u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
u32 rx_counter;
u32 pkt_len, align_pkt_len;
u32 pkt_offset, des;
u8 hlid;
enum wl_rx_buf_align rx_align;
+ int ret = 0;
while (drv_rx_counter != fw_rx_counter) {
buf_size = 0;
@@ -223,7 +225,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
break;
buf_size += align_pkt_len;
rx_counter++;
- rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
+ rx_counter %= wl->num_rx_desc;
}
if (buf_size == 0) {
@@ -233,9 +235,14 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
/* Read all available packets at once */
des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
- wlcore_hw_prepare_read(wl, des, buf_size);
- wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
- buf_size, true);
+ ret = wlcore_hw_prepare_read(wl, des, buf_size);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
+ buf_size, true);
+ if (ret < 0)
+ goto out;
/* Split data into separate packets */
pkt_offset = 0;
@@ -263,7 +270,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
wl->rx_counter++;
drv_rx_counter++;
- drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
+ drv_rx_counter %= wl->num_rx_desc;
pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
}
}
@@ -272,13 +279,20 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
* Write the driver's packet counter to the FW. This is only required
* for older hardware revisions
*/
- if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
- wl1271_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
- wl->rx_counter);
+ if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
+ ret = wlcore_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
+ wl->rx_counter);
+ if (ret < 0)
+ goto out;
+ }
wl12xx_rearm_rx_streaming(wl, active_hlids);
+
+out:
+ return ret;
}
+#ifdef CONFIG_PM
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
struct wl12xx_rx_filter *filter)
@@ -304,13 +318,19 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
return 0;
}
-void wl1271_rx_filter_clear_all(struct wl1271 *wl)
+int wl1271_rx_filter_clear_all(struct wl1271 *wl)
{
- int i;
+ int i, ret = 0;
for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
if (!wl->rx_filter_enabled[i])
continue;
- wl1271_rx_filter_enable(wl, i, 0, NULL);
+ ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
+ if (ret)
+ goto out;
}
+
+out:
+ return ret;
}
+#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index e9a162a..71eba18 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -38,8 +38,6 @@
#define RX_DESC_PACKETID_SHIFT 11
#define RX_MAX_PACKET_ID 3
-#define NUM_RX_PKT_DESC_MOD_MASK 7
-
#define RX_DESC_VALID_FCS 0x0001
#define RX_DESC_MATCH_RXADDR1 0x0002
#define RX_DESC_MCAST 0x0004
@@ -102,6 +100,15 @@
/* If set, the start of IP payload is not 4 bytes aligned */
#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
+/* If set, the buffer was padded by the FW to be 4 bytes aligned */
+#define RX_BUF_PADDED_PAYLOAD BIT(30)
+
+/*
+ * Account for the padding inserted by the FW in case of RX_ALIGNMENT
+ * or for fixing alignment in case the packet wasn't aligned.
+ */
+#define RX_BUF_ALIGN 2
+
/* Describes the alignment state of a Rx buffer */
enum wl_rx_buf_align {
WLCORE_RX_BUF_ALIGNED,
@@ -136,11 +143,11 @@ struct wl1271_rx_descriptor {
u8 reserved;
} __packed;
-void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status);
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
struct wl12xx_rx_filter *filter);
-void wl1271_rx_filter_clear_all(struct wl1271 *wl);
+int wl1271_rx_filter_clear_all(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index ade21a0..dbeca1b 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -226,7 +226,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
cmd->params.role_id, band,
wl->scan.ssid, wl->scan.ssid_len,
wl->scan.req->ie,
- wl->scan.req->ie_len);
+ wl->scan.req->ie_len, false);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -411,7 +411,8 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct conn_scan_ch_params *channels,
u32 band, bool radar, bool passive,
- int start, int max_channels)
+ int start, int max_channels,
+ u8 *n_pactive_ch)
{
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, j;
@@ -479,6 +480,23 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
+ if ((band == IEEE80211_BAND_2GHZ) &&
+ (channels[j].channel >= 12) &&
+ (channels[j].channel <= 14) &&
+ (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ !force_passive) {
+ /* pactive channels treated as DFS */
+ channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;
+
+ /*
+ * n_pactive_ch is counted down from the end of
+ * the passive channel list
+ */
+ (*n_pactive_ch)++;
+ wl1271_debug(DEBUG_SCAN, "n_pactive_ch = %d",
+ *n_pactive_ch);
+ }
+
j++;
}
}
@@ -491,38 +509,47 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct wl1271_cmd_sched_scan_config *cfg)
{
+ u8 n_pactive_ch = 0;
+
cfg->passive[0] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
IEEE80211_BAND_2GHZ,
false, true, 0,
- MAX_CHANNELS_2GHZ);
+ MAX_CHANNELS_2GHZ,
+ &n_pactive_ch);
cfg->active[0] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
IEEE80211_BAND_2GHZ,
false, false,
cfg->passive[0],
- MAX_CHANNELS_2GHZ);
+ MAX_CHANNELS_2GHZ,
+ &n_pactive_ch);
cfg->passive[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
false, true, 0,
- MAX_CHANNELS_5GHZ);
+ MAX_CHANNELS_5GHZ,
+ &n_pactive_ch);
cfg->dfs =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
true, true,
cfg->passive[1],
- MAX_CHANNELS_5GHZ);
+ MAX_CHANNELS_5GHZ,
+ &n_pactive_ch);
cfg->active[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
false, false,
cfg->passive[1] + cfg->dfs,
- MAX_CHANNELS_5GHZ);
+ MAX_CHANNELS_5GHZ,
+ &n_pactive_ch);
/* 802.11j channels are not supported yet */
cfg->passive[2] = 0;
cfg->active[2] = 0;
+ cfg->n_pactive_ch = n_pactive_ch;
+
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
@@ -537,6 +564,7 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
/* Returns the scan type to be used or a negative value on error */
static int
wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req)
{
struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL;
@@ -565,6 +593,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
goto out;
}
+ cmd->role_id = wlvif->dev_role_id;
if (!n_match_ssids) {
/* No filter, with ssids */
type = SCAN_SSID_FILTER_DISABLED;
@@ -603,7 +632,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
continue;
for (j = 0; j < cmd->n_ssids; j++)
- if (!memcmp(req->ssids[i].ssid,
+ if ((req->ssids[i].ssid_len ==
+ cmd->ssids[j].len) &&
+ !memcmp(req->ssids[i].ssid,
cmd->ssids[j].ssid,
req->ssids[i].ssid_len)) {
cmd->ssids[j].type =
@@ -652,6 +683,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
if (!cfg)
return -ENOMEM;
+ cfg->role_id = wlvif->dev_role_id;
cfg->rssi_threshold = c->rssi_threshold;
cfg->snr_threshold = c->snr_threshold;
cfg->n_probe_reqs = c->num_probe_reqs;
@@ -669,7 +701,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
cfg->intervals[i] = cpu_to_le32(req->interval);
cfg->ssid_len = 0;
- ret = wl12xx_scan_sched_scan_ssid_list(wl, req);
+ ret = wl12xx_scan_sched_scan_ssid_list(wl, wlvif, req);
if (ret < 0)
goto out;
@@ -690,7 +722,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[band],
- ies->len[band]);
+ ies->len[band], true);
if (ret < 0) {
wl1271_error("2.4GHz PROBE request template failed");
goto out;
@@ -704,7 +736,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[band],
- ies->len[band]);
+ ies->len[band], true);
if (ret < 0) {
wl1271_error("5GHz PROBE request template failed");
goto out;
@@ -734,13 +766,15 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EOPNOTSUPP;
- if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return -EBUSY;
start = kzalloc(sizeof(*start), GFP_KERNEL);
if (!start)
return -ENOMEM;
+ start->role_id = wlvif->dev_role_id;
start->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -762,7 +796,7 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl)
ieee80211_sched_scan_results(wl->hw);
}
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
+void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_stop *stop;
int ret = 0;
@@ -776,6 +810,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
return;
}
+ stop->role_id = wlvif->dev_role_id;
stop->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 81ee36a..29f3c8d6 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -40,7 +40,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies);
int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
+void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl1271_scan_sched_scan_results(struct wl1271 *wl);
#define WL1271_SCAN_MAX_CHANNELS 24
@@ -142,7 +142,8 @@ enum {
SCAN_BSS_TYPE_ANY,
};
-#define SCAN_CHANNEL_FLAGS_DFS BIT(0)
+#define SCAN_CHANNEL_FLAGS_DFS BIT(0) /* channel is passive until an
+ activity is detected on it */
#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
struct conn_scan_ch_params {
@@ -185,7 +186,10 @@ struct wl1271_cmd_sched_scan_config {
u8 dfs;
- u8 padding[3];
+ u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
+ channels in BG band */
+ u8 role_id;
+ u8 padding[1];
struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
@@ -212,21 +216,24 @@ struct wl1271_cmd_sched_scan_ssid_list {
u8 n_ssids;
struct wl1271_ssid ssids[SCHED_SCAN_MAX_SSIDS];
- u8 padding[3];
+ u8 role_id;
+ u8 padding[2];
} __packed;
struct wl1271_cmd_sched_scan_start {
struct wl1271_cmd_header header;
u8 tag;
- u8 padding[3];
+ u8 role_id;
+ u8 padding[2];
} __packed;
struct wl1271_cmd_sched_scan_stop {
struct wl1271_cmd_header header;
u8 tag;
- u8 padding[3];
+ u8 role_id;
+ u8 padding[2];
} __packed;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 0a72347..73ace4b 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
+#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
@@ -32,6 +33,7 @@
#include <linux/gpio.h>
#include <linux/wl12xx.h>
#include <linux/pm_runtime.h>
+#include <linux/printk.h>
#include "wlcore.h"
#include "wl12xx_80211.h"
@@ -45,6 +47,8 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
+static bool dump = false;
+
struct wl12xx_sdio_glue {
struct device *dev;
struct platform_device *core;
@@ -67,8 +71,8 @@ static void wl1271_sdio_set_block_size(struct device *child,
sdio_release_host(func);
}
-static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
- size_t len, bool fixed)
+static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
int ret;
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
@@ -76,6 +80,13 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
sdio_claim_host(func);
+ if (unlikely(dump)) {
+ printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
+ print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buf, len, false);
+ }
+
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
@@ -92,12 +103,14 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
sdio_release_host(func);
- if (ret)
+ if (WARN_ON(ret))
dev_err(child->parent, "sdio read failed (%d)\n", ret);
+
+ return ret;
}
-static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
- size_t len, bool fixed)
+static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
int ret;
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
@@ -105,6 +118,13 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
sdio_claim_host(func);
+ if (unlikely(dump)) {
+ printk(KERN_DEBUG "wlcore_sdio: WRITE to 0x%04x\n", addr);
+ print_hex_dump(KERN_DEBUG, "wlcore_sdio: WRITE ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buf, len, false);
+ }
+
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
@@ -121,25 +141,30 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
sdio_release_host(func);
- if (ret)
+ if (WARN_ON(ret))
dev_err(child->parent, "sdio write failed (%d)\n", ret);
+
+ return ret;
}
static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
{
int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
+ struct mmc_card *card = func->card;
- /* If enabled, tell runtime PM not to power off the card */
- if (pm_runtime_enabled(&func->dev)) {
- ret = pm_runtime_get_sync(&func->dev);
- if (ret < 0)
- goto out;
- } else {
- /* Runtime PM is disabled: power up the card manually */
- ret = mmc_power_restore_host(func->card->host);
- if (ret < 0)
+ ret = pm_runtime_get_sync(&card->dev);
+ if (ret) {
+ /*
+ * Runtime PM might be temporarily disabled, or the device
+ * might have a positive reference counter. Make sure it is
+ * really powered on.
+ */
+ ret = mmc_power_restore_host(card->host);
+ if (ret < 0) {
+ pm_runtime_put_sync(&card->dev);
goto out;
+ }
}
sdio_claim_host(func);
@@ -154,20 +179,21 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
+ struct mmc_card *card = func->card;
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
- /* Power off the card manually, even if runtime PM is enabled. */
- ret = mmc_power_save_host(func->card->host);
+ /* Power off the card manually in case it wasn't powered off above */
+ ret = mmc_power_save_host(card->host);
if (ret < 0)
- return ret;
+ goto out;
- /* If enabled, let runtime PM know the card is powered off */
- if (pm_runtime_enabled(&func->dev))
- ret = pm_runtime_put_sync(&func->dev);
+ /* Let runtime PM know the card is powered off */
+ pm_runtime_put_sync(&card->dev);
+out:
return ret;
}
@@ -196,6 +222,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
struct resource res[1];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
+ const char *chip_family;
/* We are only able to handle the wlan function */
if (func->num != 0x02)
@@ -236,7 +263,18 @@ static int __devinit wl1271_probe(struct sdio_func *func,
/* Tell PM core that we don't need the card to be powered now */
pm_runtime_put_noidle(&func->dev);
- glue->core = platform_device_alloc("wl12xx", -1);
+ /*
+ * Due to a hardware bug, we can't differentiate wl18xx from
+ * wl12xx, because both report the same device ID. The only
+ * way to differentiate is by checking the SDIO revision,
+ * which is 3.00 on the wl18xx chips.
+ */
+ if (func->card->cccr.sdio_vsn == SDIO_SDIO_REV_3_00)
+ chip_family = "wl18xx";
+ else
+ chip_family = "wl12xx";
+
+ glue->core = platform_device_alloc(chip_family, -1);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
@@ -367,12 +405,9 @@ static void __exit wl1271_exit(void)
module_init(wl1271_init);
module_exit(wl1271_exit);
+module_param(dump, bool, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
-MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
-MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
-MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
-MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
-MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 553cd3c..8da4ed2 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -193,8 +193,8 @@ static int wl12xx_spi_read_busy(struct device *child)
return -ETIMEDOUT;
}
-static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
- size_t len, bool fixed)
+static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct wl1271 *wl = dev_get_drvdata(child);
@@ -238,7 +238,7 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
wl12xx_spi_read_busy(child)) {
memset(buf, 0, chunk_len);
- return;
+ return 0;
}
spi_message_init(&m);
@@ -256,10 +256,12 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
buf += chunk_len;
len -= chunk_len;
}
+
+ return 0;
}
-static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
- size_t len, bool fixed)
+static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
@@ -304,6 +306,8 @@ static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
}
spi_sync(to_spi_device(glue->dev), &m);
+
+ return 0;
}
static struct wl1271_if_operations spi_ops = {
@@ -431,10 +435,4 @@ module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
-MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
-MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
-MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
-MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
-MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 0e59ea2..49e5ee1 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -40,7 +40,7 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_CONFIGURE,
WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_SET_PLT_MODE,
- WL1271_TM_CMD_RECOVER,
+ WL1271_TM_CMD_RECOVER, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_GET_MAC,
__WL1271_TM_CMD_AFTER_LAST
@@ -108,6 +108,20 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
}
if (answer) {
+ /* If we got bip calibration answer print radio status */
+ struct wl1271_cmd_cal_p2g *params =
+ (struct wl1271_cmd_cal_p2g *) buf;
+
+ s16 radio_status = (s16) le16_to_cpu(params->radio_status);
+
+ if (params->test.id == TEST_CMD_P2G_CAL &&
+ radio_status < 0)
+ wl1271_warning("testmode cmd: radio status=%d",
+ radio_status);
+ else
+ wl1271_info("testmode cmd: radio status=%d",
+ radio_status);
+
len = nla_total_size(buf_len);
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
if (!skb) {
@@ -115,8 +129,12 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
goto out_sleep;
}
- if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf))
- goto nla_put_failure;
+ if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf)) {
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out_sleep;
+ }
+
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out_sleep;
@@ -128,11 +146,6 @@ out:
mutex_unlock(&wl->mutex);
return ret;
-
-nla_put_failure:
- kfree_skb(skb);
- ret = -EMSGSIZE;
- goto out_sleep;
}
static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -178,8 +191,12 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out_free;
}
- if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd))
- goto nla_put_failure;
+ if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd)) {
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out_free;
+ }
+
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out_free;
@@ -192,11 +209,6 @@ out:
mutex_unlock(&wl->mutex);
return ret;
-
-nla_put_failure:
- kfree_skb(skb);
- ret = -EMSGSIZE;
- goto out_free;
}
static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
@@ -231,6 +243,43 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
return 0;
}
+static int wl1271_tm_detect_fem(struct wl1271 *wl, struct nlattr *tb[])
+{
+ /* return FEM type */
+ int ret, len;
+ struct sk_buff *skb;
+
+ ret = wl1271_plt_start(wl, PLT_FEM_DETECT);
+ if (ret < 0)
+ goto out;
+
+ mutex_lock(&wl->mutex);
+
+ len = nla_total_size(sizeof(wl->fem_manuf));
+ skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_mutex;
+ }
+
+ if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(wl->fem_manuf),
+ &wl->fem_manuf)) {
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out_mutex;
+ }
+
+ ret = cfg80211_testmode_reply(skb);
+
+out_mutex:
+ mutex_unlock(&wl->mutex);
+
+ /* We always stop plt after DETECT mode */
+ wl1271_plt_stop(wl);
+out:
+ return ret;
+}
+
static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
{
u32 val;
@@ -244,11 +293,14 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]);
switch (val) {
- case 0:
+ case PLT_OFF:
ret = wl1271_plt_stop(wl);
break;
- case 1:
- ret = wl1271_plt_start(wl);
+ case PLT_ON:
+ ret = wl1271_plt_start(wl, PLT_ON);
+ break;
+ case PLT_FEM_DETECT:
+ ret = wl1271_tm_detect_fem(wl, tb);
break;
default:
ret = -EINVAL;
@@ -258,15 +310,6 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
return ret;
}
-static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
-{
- wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover");
-
- wl12xx_queue_recovery_work(wl);
-
- return 0;
-}
-
static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
{
struct sk_buff *skb;
@@ -298,8 +341,12 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr))
- goto nla_put_failure;
+ if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr)) {
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out;
@@ -307,11 +354,6 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
out:
mutex_unlock(&wl->mutex);
return ret;
-
-nla_put_failure:
- kfree_skb(skb);
- ret = -EMSGSIZE;
- goto out;
}
int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
@@ -336,8 +378,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
return wl1271_tm_cmd_configure(wl, tb);
case WL1271_TM_CMD_SET_PLT_MODE:
return wl1271_tm_cmd_set_plt_mode(wl, tb);
- case WL1271_TM_CMD_RECOVER:
- return wl1271_tm_cmd_recover(wl, tb);
case WL1271_TM_CMD_GET_MAC:
return wl12xx_tm_cmd_get_mac(wl, tb);
default:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 6893bc2..f0081f7 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -72,7 +72,7 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
return id;
}
-static void wl1271_free_tx_id(struct wl1271 *wl, int id)
+void wl1271_free_tx_id(struct wl1271 *wl, int id)
{
if (__test_and_clear_bit(id, wl->tx_frames_map)) {
if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
@@ -82,6 +82,7 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
wl->tx_frames_cnt--;
}
}
+EXPORT_SYMBOL(wl1271_free_tx_id);
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
struct sk_buff *skb)
@@ -127,6 +128,7 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
{
return wl->dummy_packet == skb;
}
+EXPORT_SYMBOL(wl12xx_is_dummy_packet);
u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb)
@@ -146,10 +148,10 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return wl->system_hlid;
hdr = (struct ieee80211_hdr *)skb->data;
- if (ieee80211_is_mgmt(hdr->frame_control))
- return wlvif->ap.global_hlid;
- else
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
return wlvif->ap.bcast_hlid;
+ else
+ return wlvif->ap.global_hlid;
}
}
@@ -176,37 +178,34 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length)
{
- if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
- return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
- else
+ if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
+ !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+ else
+ return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
}
EXPORT_SYMBOL(wlcore_calc_packet_alignment);
static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, u32 extra, u32 buf_offset,
- u8 hlid)
+ u8 hlid, bool is_gem)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
u32 total_blocks;
int id, ret = -EBUSY, ac;
- u32 spare_blocks = wl->normal_tx_spare;
- bool is_dummy = false;
+ u32 spare_blocks;
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
return -EAGAIN;
+ spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
+
/* allocate free identifier for the packet */
id = wl1271_alloc_tx_id(wl, skb);
if (id < 0)
return id;
- if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
- is_dummy = true;
- else if (wlvif->is_gem)
- spare_blocks = wl->gem_tx_spare;
-
total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
if (total_blocks <= wl->tx_blocks_available) {
@@ -228,7 +227,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
- if (!is_dummy && wlvif &&
+ if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
wlvif->bss_type == BSS_TYPE_AP_BSS &&
test_bit(hlid, wlvif->ap.sta_hlid_map))
wl->links[hlid].allocated_pkts++;
@@ -268,6 +267,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (extra) {
int hdrlen = ieee80211_hdrlen(frame_control);
memmove(frame_start, hdr, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) + extra);
}
/* configure packet life time */
@@ -305,19 +305,25 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (is_dummy || !wlvif)
rate_idx = 0;
else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
- /* if the packets are destined for AP (have a STA entry)
- send them with AP rate policies, otherwise use default
- basic rates */
- if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ /*
+ * if the packets are data packets
+ * send them with AP rate policies (EAPOLs are an exception),
+ * otherwise use default basic rates
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+ rate_idx = wlvif->sta.basic_rate_idx;
+ else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
rate_idx = wlvif->sta.p2p_rate_idx;
- else if (control->control.sta)
+ else if (ieee80211_is_data(frame_control))
rate_idx = wlvif->sta.ap_rate_idx;
else
rate_idx = wlvif->sta.basic_rate_idx;
} else {
if (hlid == wlvif->ap.global_hlid)
rate_idx = wlvif->ap.mgmt_rate_idx;
- else if (hlid == wlvif->ap.bcast_hlid)
+ else if (hlid == wlvif->ap.bcast_hlid ||
+ skb->protocol == cpu_to_be16(ETH_P_PAE))
+ /* send AP bcast and EAPOLs using the min basic rate */
rate_idx = wlvif->ap.bcast_rate_idx;
else
rate_idx = wlvif->ap.ucast_rate_idx[ac];
@@ -330,9 +336,9 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ieee80211_has_protected(frame_control))
tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
- desc->reserved = 0;
desc->tx_attr = cpu_to_le16(tx_attr);
+ wlcore_hw_set_tx_desc_csum(wl, desc, skb);
wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
}
@@ -346,16 +352,20 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u32 total_len;
u8 hlid;
bool is_dummy;
+ bool is_gem = false;
- if (!skb)
+ if (!skb) {
+ wl1271_error("discarding null skb");
return -EINVAL;
+ }
info = IEEE80211_SKB_CB(skb);
/* TODO: handle dummy packets on multi-vifs */
is_dummy = wl12xx_is_dummy_packet(wl, skb);
- if (info->control.hw_key &&
+ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
+ info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
extra = WL1271_EXTRA_SPACE_TKIP;
@@ -373,6 +383,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return ret;
wlvif->default_key = idx;
}
+
+ is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
}
hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
if (hlid == WL12XX_INVALID_LINK_ID) {
@@ -380,7 +392,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return -EINVAL;
}
- ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
+ ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
+ is_gem);
if (ret < 0)
return ret;
@@ -425,10 +438,10 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
rate_set >>= 1;
}
- /* MCS rates indication are on bits 16 - 23 */
+ /* MCS rates indication are on bits 16 - 31 */
rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
- for (bit = 0; bit < 8; bit++) {
+ for (bit = 0; bit < 16; bit++) {
if (rate_set & 0x1)
enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
rate_set >>= 1;
@@ -439,18 +452,15 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
- unsigned long flags;
int i;
for (i = 0; i < NUM_TX_QUEUES; i++) {
- if (test_bit(i, &wl->stopped_queues_map) &&
+ if (wlcore_is_queue_stopped_by_reason(wl, i,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
/* firmware buffer has space, restart queues */
- spin_lock_irqsave(&wl->wl_lock, flags);
- ieee80211_wake_queue(wl->hw,
- wl1271_tx_get_mac80211_queue(i));
- clear_bit(i, &wl->stopped_queues_map);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
+ wlcore_wake_queue(wl, i,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
}
}
@@ -656,18 +666,29 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
}
}
-void wl1271_tx_work_locked(struct wl1271 *wl)
+/*
+ * Returns failure values only in case of failed bus ops within this function.
+ * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
+ * triggering recovery by higher layers when not necessary.
+ * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
+ * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
+ * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
+ * within prepare_tx_frame code but there's nothing we should do about those
+ * as well.
+ */
+int wlcore_tx_work_locked(struct wl1271 *wl)
{
struct wl12xx_vif *wlvif;
struct sk_buff *skb;
struct wl1271_tx_hw_descr *desc;
- u32 buf_offset = 0;
+ u32 buf_offset = 0, last_len = 0;
bool sent_packets = false;
unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
- int ret;
+ int ret = 0;
+ int bus_ret = 0;
if (unlikely(wl->state == WL1271_STATE_OFF))
- return;
+ return 0;
while ((skb = wl1271_skb_dequeue(wl))) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -685,8 +706,14 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
* Flush buffer and try again.
*/
wl1271_skb_queue_head(wl, wlvif, skb);
- wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
- buf_offset, true);
+
+ buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
+ last_len);
+ bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
+ wl->aggr_buf, buf_offset, true);
+ if (bus_ret < 0)
+ goto out;
+
sent_packets = true;
buf_offset = 0;
continue;
@@ -710,7 +737,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
ieee80211_free_txskb(wl->hw, skb);
goto out_ack;
}
- buf_offset += ret;
+ last_len = ret;
+ buf_offset += last_len;
wl->tx_packets_count++;
if (has_data) {
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -720,8 +748,12 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
out_ack:
if (buf_offset) {
- wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
- buf_offset, true);
+ buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
+ bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
+ buf_offset, true);
+ if (bus_ret < 0)
+ goto out;
+
sent_packets = true;
}
if (sent_packets) {
@@ -729,13 +761,19 @@ out_ack:
* Interrupt the firmware with the new packets. This is only
* required for older hardware revisions
*/
- if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
- wl1271_write32(wl, WL12XX_HOST_WR_ACCESS,
- wl->tx_packets_count);
+ if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
+ bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
+ wl->tx_packets_count);
+ if (bus_ret < 0)
+ goto out;
+ }
wl1271_handle_tx_low_watermark(wl);
}
wl12xx_rearm_rx_streaming(wl, active_hlids);
+
+out:
+ return bus_ret;
}
void wl1271_tx_work(struct work_struct *work)
@@ -748,7 +786,11 @@ void wl1271_tx_work(struct work_struct *work)
if (ret < 0)
goto out;
- wl1271_tx_work_locked(wl);
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -849,7 +891,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
/* remove TKIP header space if present */
- if (info->control.hw_key &&
+ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
+ info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
@@ -869,22 +912,27 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
}
/* Called upon reception of a TX complete interrupt */
-void wl1271_tx_complete(struct wl1271 *wl)
+int wlcore_tx_complete(struct wl1271 *wl)
{
- struct wl1271_acx_mem_map *memmap =
- (struct wl1271_acx_mem_map *)wl->target_mem_map;
+ struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
u32 count, fw_counter;
u32 i;
+ int ret;
/* read the tx results from the chipset */
- wl1271_read(wl, le32_to_cpu(memmap->tx_result),
- wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
+ wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ if (ret < 0)
+ goto out;
+
fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
/* write host counter to chipset (to ack) */
- wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
- offsetof(struct wl1271_tx_hw_res_if,
- tx_result_host_counter), fw_counter);
+ ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
+ offsetof(struct wl1271_tx_hw_res_if,
+ tx_result_host_counter), fw_counter);
+ if (ret < 0)
+ goto out;
count = fw_counter - wl->tx_results_count;
wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
@@ -904,8 +952,11 @@ void wl1271_tx_complete(struct wl1271 *wl)
wl->tx_results_count++;
}
+
+out:
+ return ret;
}
-EXPORT_SYMBOL(wl1271_tx_complete);
+EXPORT_SYMBOL(wlcore_tx_complete);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
{
@@ -958,7 +1009,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
}
/* caller must hold wl->mutex and TX must be stopped */
-void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
@@ -973,15 +1024,12 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
wl->tx_queue_count[i] = 0;
}
- wl->stopped_queues_map = 0;
-
/*
* Make sure the driver is at a consistent state, in case this
* function is called from a context other than interface removal.
* This call will always wake the TX queues.
*/
- if (reset_tx_queues)
- wl1271_handle_tx_low_watermark(wl);
+ wl1271_handle_tx_low_watermark(wl);
for (i = 0; i < wl->num_tx_desc; i++) {
if (wl->tx_frames[i] == NULL)
@@ -998,7 +1046,8 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
*/
info = IEEE80211_SKB_CB(skb);
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
- if (info->control.hw_key &&
+ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
+ info->control.hw_key &&
info->control.hw_key->cipher ==
WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -1024,6 +1073,11 @@ void wl1271_tx_flush(struct wl1271 *wl)
int i;
timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
+ /* only one flush should be in progress, for consistent queue state */
+ mutex_lock(&wl->flush_mutex);
+
+ wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
+
while (!time_after(jiffies, timeout)) {
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
@@ -1032,7 +1086,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
if ((wl->tx_frames_cnt == 0) &&
(wl1271_tx_total_queue_count(wl) == 0)) {
mutex_unlock(&wl->mutex);
- return;
+ goto out;
}
mutex_unlock(&wl->mutex);
msleep(1);
@@ -1045,7 +1099,12 @@ void wl1271_tx_flush(struct wl1271 *wl)
for (i = 0; i < WL12XX_MAX_LINKS; i++)
wl1271_tx_reset_link_queues(wl, i);
mutex_unlock(&wl->mutex);
+
+out:
+ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
+ mutex_unlock(&wl->flush_mutex);
}
+EXPORT_SYMBOL_GPL(wl1271_tx_flush);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
{
@@ -1054,3 +1113,96 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
return BIT(__ffs(rate_set));
}
+
+void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason)
+{
+ bool stopped = !!wl->queue_stop_reasons[queue];
+
+ /* queue should not be stopped for this reason */
+ WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
+
+ if (stopped)
+ return;
+
+ ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+}
+
+void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wlcore_stop_queue_locked(wl, queue, reason);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+}
+
+void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+
+ /* queue should not be clear for this reason */
+ WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
+
+ if (wl->queue_stop_reasons[queue])
+ goto out;
+
+ ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+
+out:
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+}
+
+void wlcore_stop_queues(struct wl1271 *wl,
+ enum wlcore_queue_stop_reason reason)
+{
+ int i;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wlcore_stop_queue(wl, i, reason);
+}
+EXPORT_SYMBOL_GPL(wlcore_stop_queues);
+
+void wlcore_wake_queues(struct wl1271 *wl,
+ enum wlcore_queue_stop_reason reason)
+{
+ int i;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wlcore_wake_queue(wl, i, reason);
+}
+EXPORT_SYMBOL_GPL(wlcore_wake_queues);
+
+void wlcore_reset_stopped_queues(struct wl1271 *wl)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ if (!wl->queue_stop_reasons[i])
+ continue;
+
+ wl->queue_stop_reasons[i] = 0;
+ ieee80211_wake_queue(wl->hw,
+ wl1271_tx_get_mac80211_queue(i));
+ }
+
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+}
+
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason)
+{
+ return test_bit(reason, &wl->queue_stop_reasons[queue]);
+}
+
+bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
+{
+ return !!wl->queue_stop_reasons[queue];
+}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 2fd6e5d..1e939b0 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -85,6 +85,19 @@ struct wl128x_tx_mem {
u8 extra_bytes;
} __packed;
+struct wl18xx_tx_mem {
+ /*
+ * Total number of memory blocks allocated by the host for
+ * this packet.
+ */
+ u8 total_mem_blocks;
+
+ /*
+ * control bits
+ */
+ u8 ctrl;
+} __packed;
+
/*
* On wl128x based devices, when TX packets are aggregated, each packet
* size must be aligned to the SDIO block size. The maximum block size
@@ -100,6 +113,7 @@ struct wl1271_tx_hw_descr {
union {
struct wl127x_tx_mem wl127x_mem;
struct wl128x_tx_mem wl128x_mem;
+ struct wl18xx_tx_mem wl18xx_mem;
} __packed;
/* Device time (in us) when the packet arrived to the driver */
__le32 start_time;
@@ -116,7 +130,16 @@ struct wl1271_tx_hw_descr {
u8 tid;
/* host link ID (HLID) */
u8 hlid;
- u8 reserved;
+
+ union {
+ u8 wl12xx_reserved;
+
+ /*
+ * bit 0 -> 0 = udp, 1 = tcp
+ * bit 1:7 -> IP header offset
+ */
+ u8 wl18xx_checksum_data;
+ } __packed;
} __packed;
enum wl1271_tx_hw_res_status {
@@ -161,6 +184,13 @@ struct wl1271_tx_hw_res_if {
struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
} __packed;
+enum wlcore_queue_stop_reason {
+ WLCORE_QUEUE_STOP_REASON_WATERMARK,
+ WLCORE_QUEUE_STOP_REASON_FW_RESTART,
+ WLCORE_QUEUE_STOP_REASON_FLUSH,
+ WLCORE_QUEUE_STOP_REASON_SPARE_BLK, /* 18xx specific */
+};
+
static inline int wl1271_tx_get_queue(int queue)
{
switch (queue) {
@@ -204,10 +234,10 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
}
void wl1271_tx_work(struct work_struct *work);
-void wl1271_tx_work_locked(struct wl1271 *wl);
-void wl1271_tx_complete(struct wl1271 *wl);
+int wlcore_tx_work_locked(struct wl1271 *wl);
+int wlcore_tx_complete(struct wl1271 *wl);
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
@@ -223,6 +253,21 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length);
+void wl1271_free_tx_id(struct wl1271 *wl, int id);
+void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_stop_queues(struct wl1271 *wl,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_wake_queues(struct wl1271 *wl,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_reset_stopped_queues(struct wl1271 *wl);
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason);
+bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue);
/* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0b3f0b5..0ce7a8e 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -24,8 +24,9 @@
#include <linux/platform_device.h>
-#include "wl12xx.h"
+#include "wlcore_i.h"
#include "event.h"
+#include "boot.h"
/* The maximum number of Tx descriptors in all chip families */
#define WLCORE_MAX_TX_DESCRIPTORS 32
@@ -33,14 +34,16 @@
/* forward declaration */
struct wl1271_tx_hw_descr;
enum wl_rx_buf_align;
+struct wl1271_rx_descriptor;
struct wlcore_ops {
int (*identify_chip)(struct wl1271 *wl);
int (*identify_fw)(struct wl1271 *wl);
int (*boot)(struct wl1271 *wl);
- void (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
- void *buf, size_t len);
- void (*ack_event)(struct wl1271 *wl);
+ int (*plt_init)(struct wl1271 *wl);
+ int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
+ void *buf, size_t len);
+ int (*ack_event)(struct wl1271 *wl);
u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
void (*set_tx_desc_blocks)(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
@@ -50,17 +53,34 @@ struct wlcore_ops {
struct sk_buff *skb);
enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl,
u32 rx_desc);
- void (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
+ int (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data,
u32 data_len);
- void (*tx_delayed_compl)(struct wl1271 *wl);
+ int (*tx_delayed_compl)(struct wl1271 *wl);
void (*tx_immediate_compl)(struct wl1271 *wl);
int (*hw_init)(struct wl1271 *wl);
int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
- s8 (*get_pg_ver)(struct wl1271 *wl);
- void (*get_mac)(struct wl1271 *wl);
+ int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
+ int (*get_mac)(struct wl1271 *wl);
+ void (*set_tx_desc_csum)(struct wl1271 *wl,
+ struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb);
+ void (*set_rx_csum)(struct wl1271 *wl,
+ struct wl1271_rx_descriptor *desc,
+ struct sk_buff *skb);
+ u32 (*ap_get_mimo_wide_rate_mask)(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+ int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir);
+ int (*handle_static_data)(struct wl1271 *wl,
+ struct wl1271_static_data *static_data);
+ int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem);
+ int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf);
+ u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
};
enum wlcore_partitions {
@@ -109,6 +129,15 @@ enum wlcore_registers {
REG_TABLE_LEN,
};
+struct wl1271_stats {
+ void *fw_stats;
+ unsigned long fw_stats_update;
+ size_t fw_stats_len;
+
+ unsigned int retry_count;
+ unsigned int excessive_retries;
+};
+
struct wl1271 {
struct ieee80211_hw *hw;
bool mac80211_registered;
@@ -121,13 +150,14 @@ struct wl1271 {
void (*set_power)(bool enable);
int irq;
- int ref_clock;
spinlock_t wl_lock;
enum wl1271_state state;
enum wl12xx_fw_type fw_type;
bool plt;
+ enum plt_mode plt_mode;
+ u8 fem_manuf;
u8 last_vif_count;
struct mutex mutex;
@@ -186,7 +216,7 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
int tx_queue_count[NUM_TX_QUEUES];
- long stopped_queues_map;
+ unsigned long queue_stop_reasons[NUM_TX_QUEUES];
/* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue;
@@ -205,9 +235,6 @@ struct wl1271 {
/* FW Rx counter */
u32 rx_counter;
- /* Rx memory pool address */
- struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
-
/* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf;
@@ -228,6 +255,7 @@ struct wl1271 {
/* Hardware recovery work */
struct work_struct recovery_work;
+ bool watchdog_recovery;
/* Pointer that holds DMA-friendly block for the mailbox */
struct event_mailbox *mbox;
@@ -263,7 +291,8 @@ struct wl1271 {
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl_fw_status *fw_status;
+ struct wl_fw_status_1 *fw_status_1;
+ struct wl_fw_status_2 *fw_status_2;
struct wl1271_tx_hw_res_if *tx_res_if;
/* Current chipset configuration */
@@ -277,9 +306,7 @@ struct wl1271 {
s8 noise;
/* bands supported by this instance of wl12xx */
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
- int tcxo_clock;
+ struct ieee80211_supported_band bands[WLCORE_NUM_BANDS];
/*
* wowlan trigger was configured during suspend.
@@ -333,10 +360,8 @@ struct wl1271 {
/* number of TX descriptors the HW supports. */
u32 num_tx_desc;
-
- /* spare Tx blocks for normal/GEM operating modes */
- u32 normal_tx_spare;
- u32 gem_tx_spare;
+ /* number of RX descriptors the HW supports. */
+ u32 num_rx_desc;
/* translate HW Tx rates to standard rate-indices */
const u8 **band_rate_to_idx;
@@ -348,19 +373,57 @@ struct wl1271 {
u8 hw_min_ht_rate;
/* HW HT (11n) capabilities */
- struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
/* size of the private FW status data */
size_t fw_status_priv_len;
/* RX Data filter rule state - enabled/disabled */
bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
+
+ /* size of the private static data */
+ size_t static_data_priv_len;
+
+ /* the current channel type */
+ enum nl80211_channel_type channel_type;
+
+ /* mutex for protecting the tx_flush function */
+ struct mutex flush_mutex;
+
+ /* sleep auth value currently configured to FW */
+ int sleep_auth;
+
+ /* the minimum FW version required for the driver to work */
+ unsigned int min_fw_ver[NUM_FW_VER];
};
int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
int __devexit wlcore_remove(struct platform_device *pdev);
struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
int wlcore_free_hw(struct wl1271 *wl);
+int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf);
+
+static inline void
+wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
+}
+
+static inline void
+wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
+ unsigned int iftype, unsigned int major,
+ unsigned int subtype, unsigned int minor)
+{
+ wl->min_fw_ver[FW_VER_CHIP] = chip;
+ wl->min_fw_ver[FW_VER_IF_TYPE] = iftype;
+ wl->min_fw_ver[FW_VER_MAJOR] = major;
+ wl->min_fw_ver[FW_VER_SUBTYPE] = subtype;
+ wl->min_fw_ver[FW_VER_MINOR] = minor;
+}
/* Firmware image load chunk size */
#define CHUNK_SIZE 16384
@@ -385,6 +448,18 @@ int wlcore_free_hw(struct wl1271 *wl);
/* Some firmwares may not support ELP */
#define WLCORE_QUIRK_NO_ELP BIT(6)
+/* pad only the last frame in the aggregate buffer */
+#define WLCORE_QUIRK_TX_PAD_LAST_FRAME BIT(7)
+
+/* extra header space is required for TKIP */
+#define WLCORE_QUIRK_TKIP_HEADER_SPACE BIT(8)
+
+/* Some firmwares not support sched scans while connected */
+#define WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN BIT(9)
+
+/* separate probe response templates for one-shot and sched scans */
+#define WLCORE_QUIRK_DUAL_PROBE_TMPL BIT(10)
+
/* TODO: move to the lower drivers when all usages are abstracted */
#define CHIP_ID_1271_PG10 (0x4030101)
#define CHIP_ID_1271_PG20 (0x4030111)
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index f12bdf7..c050563 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __WL12XX_H__
-#define __WL12XX_H__
+#ifndef __WLCORE_I_H__
+#define __WLCORE_I_H__
#include <linux/mutex.h>
#include <linux/completion.h>
@@ -35,15 +35,6 @@
#include "conf.h"
#include "ini.h"
-#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin"
-#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin"
-
-#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin"
-#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
-
-#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin"
-#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
-
/*
* wl127x and wl128x are using the same NVS file name. However, the
* ini parameters between them are different. The driver validates
@@ -71,6 +62,9 @@
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+/* the driver supports the 2.4Ghz and 5Ghz bands */
+#define WLCORE_NUM_BANDS 2
+
#define WL12XX_MAX_RATE_POLICIES 16
/* Defined by FW as 0. Will not be freed or allocated. */
@@ -89,7 +83,7 @@
#define WL1271_AP_BSS_INDEX 0
#define WL1271_AP_DEF_BEACON_EXP 20
-#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE)
enum wl1271_state {
WL1271_STATE_OFF,
@@ -132,16 +126,7 @@ struct wl1271_chip {
unsigned int fw_ver[NUM_FW_VER];
};
-struct wl1271_stats {
- struct acx_statistics *fw_stats;
- unsigned long fw_stats_update;
-
- unsigned int retry_count;
- unsigned int excessive_retries;
-};
-
#define NUM_TX_QUEUES 4
-#define NUM_RX_PKT_DESC 8
#define AP_MAX_STATIONS 8
@@ -159,13 +144,26 @@ struct wl_fw_packet_counters {
} __packed;
/* FW status registers */
-struct wl_fw_status {
+struct wl_fw_status_1 {
__le32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
u8 reserved;
u8 tx_results_counter;
- __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
+ __le32 rx_pkt_descs[0];
+} __packed;
+
+/*
+ * Each HW arch has a different number of Rx descriptors.
+ * The length of the status depends on it, since it holds an array
+ * of descriptors.
+ */
+#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
+ (sizeof(struct wl_fw_status_1) + \
+ (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
+ num_rx_desc)
+
+struct wl_fw_status_2 {
__le32 fw_localtime;
/*
@@ -194,11 +192,6 @@ struct wl_fw_status {
u8 priv[0];
} __packed;
-struct wl1271_rx_mem_pool_addr {
- u32 addr;
- u32 addr_extra;
-};
-
#define WL1271_MAX_CHANNELS 64
struct wl1271_scan {
struct cfg80211_scan_request *req;
@@ -210,10 +203,10 @@ struct wl1271_scan {
};
struct wl1271_if_operations {
- void (*read)(struct device *child, int addr, void *buf, size_t len,
- bool fixed);
- void (*write)(struct device *child, int addr, void *buf, size_t len,
- bool fixed);
+ int __must_check (*read)(struct device *child, int addr, void *buf,
+ size_t len, bool fixed);
+ int __must_check (*write)(struct device *child, int addr, void *buf,
+ size_t len, bool fixed);
void (*reset)(struct device *child);
void (*init)(struct device *child);
int (*power)(struct device *child, bool enable);
@@ -248,6 +241,7 @@ enum wl12xx_flags {
WL1271_FLAG_RECOVERY_IN_PROGRESS,
WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
WL1271_FLAG_INTENDED_FW_RECOVERY,
+ WL1271_FLAG_IO_FAILED,
};
enum wl12xx_vif_flags {
@@ -299,6 +293,12 @@ enum rx_filter_action {
FILTER_FW_HANDLE = 2
};
+enum plt_mode {
+ PLT_OFF = 0,
+ PLT_ON = 1,
+ PLT_FEM_DETECT = 2,
+};
+
struct wl12xx_rx_filter_field {
__le16 offset;
u8 len;
@@ -367,8 +367,9 @@ struct wl12xx_vif {
/* The current band */
enum ieee80211_band band;
int channel;
+ enum nl80211_channel_type channel_type;
- u32 bitrate_masks[IEEE80211_NUM_BANDS];
+ u32 bitrate_masks[WLCORE_NUM_BANDS];
u32 basic_rate_set;
/*
@@ -417,9 +418,6 @@ struct wl12xx_vif {
struct work_struct rx_streaming_disable_work;
struct timer_list rx_streaming_timer;
- /* does the current role use GEM for encryption (AP or STA) */
- bool is_gem;
-
/*
* This struct must be last!
* data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -467,7 +465,7 @@ struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
#define wl12xx_for_each_wlvif_ap(wl, wlvif) \
wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
-int wl1271_plt_start(struct wl1271 *wl);
+int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode);
int wl1271_plt_stop(struct wl1271 *wl);
int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
@@ -501,7 +499,8 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
/* Macros to handle wl1271.sta_rate_set */
#define HW_BG_RATES_MASK 0xffff
#define HW_HT_RATES_OFFSET 16
+#define HW_MIMO_RATES_OFFSET 24
#define WL12XX_HW_BLOCK_SIZE 256
-#endif
+#endif /* __WLCORE_I_H__ */
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 117c412..7ab9222 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -827,7 +827,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values,
static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value,
const zd_addr_t addr)
{
- return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1);
+ return zd_ioread32v_locked(chip, value, &addr, 1);
}
static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value,
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 99193b4..45e3bb2 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -274,7 +274,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
const zd_addr_t addr)
{
- return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
+ return zd_usb_ioread16v(usb, value, &addr, 1);
}
void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 2596401..682633b 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -325,8 +325,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
unsigned int count;
int i, copy_off;
- count = DIV_ROUND_UP(
- offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
+ count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
copy_off = skb_headlen(skb) % PAGE_SIZE;
@@ -1364,8 +1363,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
INVALID_PENDING_IDX);
}
- __skb_queue_tail(&netbk->tx_queue, skb);
-
netbk->pending_cons++;
request_gop = xen_netbk_get_requests(netbk, vif,
@@ -1377,6 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
}
gop = request_gop;
+ __skb_queue_tail(&netbk->tx_queue, skb);
+
vif->tx.req_cons = idx;
xen_netbk_check_rx_xenvif(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 2027afe..3089990 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1935,14 +1935,14 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
- unregister_netdev(info->netdev);
-
xennet_disconnect_backend(info);
- del_timer_sync(&info->rx_refill_timer);
-
xennet_sysfs_delif(info->netdev);
+ unregister_netdev(info->netdev);
+
+ del_timer_sync(&info->rx_refill_timer);
+
free_percpu(info->stats);
free_netdev(info->netdev);
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 1f74a77..e7fd493 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -535,9 +535,10 @@ static int nfcwilink_probe(struct platform_device *pdev)
drv->pdev = pdev;
protocols = NFC_PROTO_JEWEL_MASK
- | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
- | NFC_PROTO_ISO14443_MASK
- | NFC_PROTO_NFC_DEP_MASK;
+ | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_ISO14443_B_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
drv->ndev = nci_allocate_device(&nfcwilink_ops,
protocols,
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 19110f0..d606f52 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -38,13 +38,51 @@
#define SCM_VENDOR_ID 0x4E6
#define SCL3711_PRODUCT_ID 0x5591
+#define SONY_VENDOR_ID 0x054c
+#define PASORI_PRODUCT_ID 0x02e1
+
+#define PN533_QUIRKS_TYPE_A BIT(0)
+#define PN533_QUIRKS_TYPE_F BIT(1)
+#define PN533_QUIRKS_DEP BIT(2)
+#define PN533_QUIRKS_RAW_EXCHANGE BIT(3)
+
+#define PN533_DEVICE_STD 0x1
+#define PN533_DEVICE_PASORI 0x2
+
+#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
+ NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
+ NFC_PROTO_NFC_DEP_MASK |\
+ NFC_PROTO_ISO14443_B_MASK)
+
+#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_NFC_DEP_MASK)
+
static const struct usb_device_id pn533_table[] = {
- { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID) },
- { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID) },
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = PN533_VENDOR_ID,
+ .idProduct = PN533_PRODUCT_ID,
+ .driver_info = PN533_DEVICE_STD,
+ },
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = SCM_VENDOR_ID,
+ .idProduct = SCL3711_PRODUCT_ID,
+ .driver_info = PN533_DEVICE_STD,
+ },
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = SONY_VENDOR_ID,
+ .idProduct = PASORI_PRODUCT_ID,
+ .driver_info = PN533_DEVICE_PASORI,
+ },
{ }
};
MODULE_DEVICE_TABLE(usb, pn533_table);
+/* How much time we spend listening for initiators */
+#define PN533_LISTEN_TIME 2
+
/* frame definitions */
#define PN533_FRAME_TAIL_SIZE 2
#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
@@ -69,11 +107,16 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
#define PN533_CMD_RF_CONFIGURATION 0x32
#define PN533_CMD_IN_DATA_EXCHANGE 0x40
+#define PN533_CMD_IN_COMM_THRU 0x42
#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
+#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
+#define PN533_CMD_TG_GET_DATA 0x86
+#define PN533_CMD_TG_SET_DATA 0x8e
+
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
/* PN533 Return codes */
@@ -81,6 +124,9 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_MI_MASK 0x40
#define PN533_CMD_RET_SUCCESS 0x00
+/* PN533 status codes */
+#define PN533_STATUS_TARGET_RELEASED 0x29
+
struct pn533;
typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg,
@@ -97,7 +143,14 @@ struct pn533_fw_version {
};
/* PN533_CMD_RF_CONFIGURATION */
+#define PN533_CFGITEM_TIMING 0x02
#define PN533_CFGITEM_MAX_RETRIES 0x05
+#define PN533_CFGITEM_PASORI 0x82
+
+#define PN533_CONFIG_TIMING_102 0xb
+#define PN533_CONFIG_TIMING_204 0xc
+#define PN533_CONFIG_TIMING_409 0xd
+#define PN533_CONFIG_TIMING_819 0xe
#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00
#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF
@@ -108,6 +161,12 @@ struct pn533_config_max_retries {
u8 mx_rty_passive_act;
} __packed;
+struct pn533_config_timing {
+ u8 rfu;
+ u8 atr_res_timeout;
+ u8 dep_timeout;
+} __packed;
+
/* PN533_CMD_IN_LIST_PASSIVE_TARGET */
/* felica commands opcode */
@@ -144,6 +203,7 @@ enum {
PN533_POLL_MOD_424KBPS_FELICA,
PN533_POLL_MOD_106KBPS_JEWEL,
PN533_POLL_MOD_847KBPS_B,
+ PN533_LISTEN_MOD,
__PN533_POLL_MOD_AFTER_LAST,
};
@@ -211,6 +271,9 @@ const struct pn533_poll_modulations poll_mod[] = {
},
.len = 3,
},
+ [PN533_LISTEN_MOD] = {
+ .len = 0,
+ },
};
/* PN533_CMD_IN_ATR */
@@ -237,7 +300,7 @@ struct pn533_cmd_jump_dep {
u8 active;
u8 baud;
u8 next;
- u8 gt[];
+ u8 data[];
} __packed;
struct pn533_cmd_jump_dep_response {
@@ -253,6 +316,29 @@ struct pn533_cmd_jump_dep_response {
u8 gt[];
} __packed;
+
+/* PN533_TG_INIT_AS_TARGET */
+#define PN533_INIT_TARGET_PASSIVE 0x1
+#define PN533_INIT_TARGET_DEP 0x2
+
+#define PN533_INIT_TARGET_RESP_FRAME_MASK 0x3
+#define PN533_INIT_TARGET_RESP_ACTIVE 0x1
+#define PN533_INIT_TARGET_RESP_DEP 0x4
+
+struct pn533_cmd_init_target {
+ u8 mode;
+ u8 mifare[6];
+ u8 felica[18];
+ u8 nfcid3[10];
+ u8 gb_len;
+ u8 gb[];
+} __packed;
+
+struct pn533_cmd_init_target_response {
+ u8 mode;
+ u8 cmd[];
+} __packed;
+
struct pn533 {
struct usb_device *udev;
struct usb_interface *interface;
@@ -270,22 +356,33 @@ struct pn533 {
struct workqueue_struct *wq;
struct work_struct cmd_work;
+ struct work_struct poll_work;
struct work_struct mi_work;
+ struct work_struct tg_work;
+ struct timer_list listen_timer;
struct pn533_frame *wq_in_frame;
int wq_in_error;
+ int cancel_listen;
pn533_cmd_complete_t cmd_complete;
void *cmd_complete_arg;
- struct semaphore cmd_lock;
+ struct mutex cmd_lock;
u8 cmd;
struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
u8 poll_mod_count;
u8 poll_mod_curr;
u32 poll_protocols;
+ u32 listen_protocols;
+
+ u8 *gb;
+ size_t gb_len;
u8 tgt_available_prots;
u8 tgt_active_prot;
+ u8 tgt_mode;
+
+ u32 device_type;
};
struct pn533_frame {
@@ -405,7 +502,7 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
PN533_FRAME_CMD_PARAMS_LEN(in_frame));
if (rc != -EINPROGRESS)
- up(&dev->cmd_lock);
+ mutex_unlock(&dev->cmd_lock);
}
static void pn533_recv_response(struct urb *urb)
@@ -583,7 +680,7 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (down_trylock(&dev->cmd_lock))
+ if (!mutex_trylock(&dev->cmd_lock))
return -EBUSY;
rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
@@ -593,7 +690,7 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
return 0;
error:
- up(&dev->cmd_lock);
+ mutex_unlock(&dev->cmd_lock);
return rc;
}
@@ -892,7 +989,7 @@ static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len))
return -EPROTO;
- nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK;
+ nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
return 0;
}
@@ -963,6 +1060,11 @@ static int pn533_target_found(struct pn533 *dev,
return 0;
}
+static inline void pn533_poll_next_mod(struct pn533 *dev)
+{
+ dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
+}
+
static void pn533_poll_reset_mod_list(struct pn533 *dev)
{
dev->poll_mod_count = 0;
@@ -975,102 +1077,283 @@ static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index)
dev->poll_mod_count++;
}
-static void pn533_poll_create_mod_list(struct pn533 *dev, u32 protocols)
+static void pn533_poll_create_mod_list(struct pn533 *dev,
+ u32 im_protocols, u32 tm_protocols)
{
pn533_poll_reset_mod_list(dev);
- if (protocols & NFC_PROTO_MIFARE_MASK
- || protocols & NFC_PROTO_ISO14443_MASK
- || protocols & NFC_PROTO_NFC_DEP_MASK)
+ if (im_protocols & NFC_PROTO_MIFARE_MASK
+ || im_protocols & NFC_PROTO_ISO14443_MASK
+ || im_protocols & NFC_PROTO_NFC_DEP_MASK)
pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A);
- if (protocols & NFC_PROTO_FELICA_MASK
- || protocols & NFC_PROTO_NFC_DEP_MASK) {
+ if (im_protocols & NFC_PROTO_FELICA_MASK
+ || im_protocols & NFC_PROTO_NFC_DEP_MASK) {
pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA);
pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA);
}
- if (protocols & NFC_PROTO_JEWEL_MASK)
+ if (im_protocols & NFC_PROTO_JEWEL_MASK)
pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL);
- if (protocols & NFC_PROTO_ISO14443_MASK)
+ if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B);
+
+ if (tm_protocols)
+ pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
}
-static void pn533_start_poll_frame(struct pn533_frame *frame,
- struct pn533_poll_modulations *mod)
+static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
{
+ struct pn533_poll_response *resp;
+ int rc;
- pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET);
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len);
- frame->datalen += mod->len;
+ resp = (struct pn533_poll_response *) params;
+ if (resp->nbtg) {
+ rc = pn533_target_found(dev, resp, params_len);
+
+ /* We must stop the poll after a valid target found */
+ if (rc == 0) {
+ pn533_poll_reset_mod_list(dev);
+ return 0;
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static int pn533_init_target_frame(struct pn533_frame *frame,
+ u8 *gb, size_t gb_len)
+{
+ struct pn533_cmd_init_target *cmd;
+ size_t cmd_len;
+ u8 felica_params[18] = {0x1, 0xfe, /* DEP */
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, /* random */
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0xff, 0xff}; /* System code */
+ u8 mifare_params[6] = {0x1, 0x1, /* SENS_RES */
+ 0x0, 0x0, 0x0,
+ 0x40}; /* SEL_RES for DEP */
+
+ cmd_len = sizeof(struct pn533_cmd_init_target) + gb_len + 1;
+ cmd = kzalloc(cmd_len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ pn533_tx_frame_init(frame, PN533_CMD_TG_INIT_AS_TARGET);
+
+ /* DEP support only */
+ cmd->mode |= PN533_INIT_TARGET_DEP;
+
+ /* Felica params */
+ memcpy(cmd->felica, felica_params, 18);
+ get_random_bytes(cmd->felica + 2, 6);
+
+ /* NFCID3 */
+ memset(cmd->nfcid3, 0, 10);
+ memcpy(cmd->nfcid3, cmd->felica, 8);
+
+ /* MIFARE params */
+ memcpy(cmd->mifare, mifare_params, 6);
+
+ /* General bytes */
+ cmd->gb_len = gb_len;
+ memcpy(cmd->gb, gb, gb_len);
+
+ /* Len Tk */
+ cmd->gb[gb_len] = 0;
+
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), cmd, cmd_len);
+
+ frame->datalen += cmd_len;
pn533_tx_frame_finish(frame);
+
+ kfree(cmd);
+
+ return 0;
}
-static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
- u8 *params, int params_len)
+#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
+#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
{
- struct pn533_poll_response *resp;
- struct pn533_poll_modulations *next_mod;
- int rc;
+ struct sk_buff *skb_resp = arg;
+ struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (params_len == -ENOENT) {
- nfc_dev_dbg(&dev->interface->dev, "Polling operation has been"
- " stopped");
- goto stop_poll;
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when starting as a target",
+ params_len);
+
+ return params_len;
+ }
+
+ if (params_len > 0 && params[0] != 0) {
+ nfc_tm_deactivated(dev->nfc_dev);
+
+ dev->tgt_mode = 0;
+
+ kfree_skb(skb_resp);
+ return 0;
}
+ skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
+ skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
+ skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
+
+ return nfc_tm_data_received(dev->nfc_dev, skb_resp);
+}
+
+static void pn533_wq_tg_get_data(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, tg_work);
+ struct pn533_frame *in_frame;
+ struct sk_buff *skb_resp;
+ size_t skb_resp_len;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
+ PN533_CMD_DATAEXCH_DATA_MAXLEN +
+ PN533_FRAME_TAIL_SIZE;
+
+ skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
+ if (!skb_resp)
+ return;
+
+ in_frame = (struct pn533_frame *)skb_resp->data;
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_TG_GET_DATA);
+ pn533_tx_frame_finish(dev->out_frame);
+
+ pn533_send_cmd_frame_async(dev, dev->out_frame, in_frame,
+ skb_resp_len,
+ pn533_tm_get_data_complete,
+ skb_resp, GFP_KERNEL);
+
+ return;
+}
+
+#define ATR_REQ_GB_OFFSET 17
+static int pn533_init_target_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_cmd_init_target_response *resp;
+ u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb;
+ size_t gb_len;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
if (params_len < 0) {
- nfc_dev_err(&dev->interface->dev, "Error %d when running poll",
- params_len);
- goto stop_poll;
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when starting as a target",
+ params_len);
+
+ return params_len;
}
- resp = (struct pn533_poll_response *) params;
- if (resp->nbtg) {
- rc = pn533_target_found(dev, resp, params_len);
+ if (params_len < ATR_REQ_GB_OFFSET + 1)
+ return -EINVAL;
- /* We must stop the poll after a valid target found */
- if (rc == 0)
- goto stop_poll;
+ resp = (struct pn533_cmd_init_target_response *) params;
+
+ nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x param len %d\n",
+ resp->mode, params_len);
+
+ frame = resp->mode & PN533_INIT_TARGET_RESP_FRAME_MASK;
+ if (frame == PN533_INIT_TARGET_RESP_ACTIVE)
+ comm_mode = NFC_COMM_ACTIVE;
- if (rc != -EAGAIN)
- nfc_dev_err(&dev->interface->dev, "The target found is"
- " not valid - continuing to poll");
+ /* Again, only DEP */
+ if ((resp->mode & PN533_INIT_TARGET_RESP_DEP) == 0)
+ return -EOPNOTSUPP;
+
+ gb = resp->cmd + ATR_REQ_GB_OFFSET;
+ gb_len = params_len - (ATR_REQ_GB_OFFSET + 1);
+
+ rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
+ comm_mode, gb, gb_len);
+ if (rc < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error when signaling target activation");
+ return rc;
}
- dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
+ dev->tgt_mode = 1;
- next_mod = dev->poll_mod_active[dev->poll_mod_curr];
+ queue_work(dev->wq, &dev->tg_work);
- nfc_dev_dbg(&dev->interface->dev, "Polling next modulation (0x%x)",
- dev->poll_mod_curr);
+ return 0;
+}
- pn533_start_poll_frame(dev->out_frame, next_mod);
+static void pn533_listen_mode_timer(unsigned long data)
+{
+ struct pn533 *dev = (struct pn533 *) data;
- /* Don't need to down the semaphore again */
- rc = __pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen, pn533_start_poll_complete,
- NULL, GFP_ATOMIC);
+ nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
+
+ /* An ack will cancel the last issued command (poll) */
+ pn533_send_ack(dev, GFP_ATOMIC);
+
+ dev->cancel_listen = 1;
+
+ mutex_unlock(&dev->cmd_lock);
+
+ pn533_poll_next_mod(dev);
+
+ queue_work(dev->wq, &dev->poll_work);
+}
+
+static int pn533_poll_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_poll_modulations *cur_mod;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (params_len == -ENOENT) {
+ if (dev->poll_mod_count != 0)
+ return 0;
+
+ nfc_dev_err(&dev->interface->dev,
+ "Polling operation has been stopped");
- if (rc == -EPERM) {
- nfc_dev_dbg(&dev->interface->dev, "Cannot poll next modulation"
- " because poll has been stopped");
goto stop_poll;
}
- if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error %d when trying to poll"
- " next modulation", rc);
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when running poll", params_len);
+
goto stop_poll;
}
- /* Inform caller function to do not up the semaphore */
- return -EINPROGRESS;
+ cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+ if (cur_mod->len == 0) {
+ del_timer(&dev->listen_timer);
+
+ return pn533_init_target_complete(dev, arg, params, params_len);
+ } else {
+ rc = pn533_start_poll_complete(dev, arg, params, params_len);
+ if (!rc)
+ return rc;
+ }
+
+ pn533_poll_next_mod(dev);
+
+ queue_work(dev->wq, &dev->poll_work);
+
+ return 0;
stop_poll:
pn533_poll_reset_mod_list(dev);
@@ -1078,61 +1361,104 @@ stop_poll:
return 0;
}
-static int pn533_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
+static void pn533_build_poll_frame(struct pn533 *dev,
+ struct pn533_frame *frame,
+ struct pn533_poll_modulations *mod)
{
- struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- struct pn533_poll_modulations *start_mod;
- int rc;
+ nfc_dev_dbg(&dev->interface->dev, "mod len %d\n", mod->len);
- nfc_dev_dbg(&dev->interface->dev, "%s - protocols=0x%x", __func__,
- protocols);
+ if (mod->len == 0) {
+ /* Listen mode */
+ pn533_init_target_frame(frame, dev->gb, dev->gb_len);
+ } else {
+ /* Polling mode */
+ pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET);
- if (dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev, "Polling operation already"
- " active");
- return -EBUSY;
- }
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len);
+ frame->datalen += mod->len;
- if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev, "Cannot poll with a target"
- " already activated");
- return -EBUSY;
+ pn533_tx_frame_finish(frame);
}
+}
+
+static int pn533_send_poll_frame(struct pn533 *dev)
+{
+ struct pn533_poll_modulations *cur_mod;
+ int rc;
- pn533_poll_create_mod_list(dev, protocols);
+ cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
- if (!dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev, "No valid protocols"
- " specified");
- rc = -EINVAL;
- goto error;
+ pn533_build_poll_frame(dev, dev->out_frame, cur_mod);
+
+ rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_poll_complete,
+ NULL, GFP_KERNEL);
+ if (rc)
+ nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
+
+ return rc;
+}
+
+static void pn533_wq_poll(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, poll_work);
+ struct pn533_poll_modulations *cur_mod;
+ int rc;
+
+ cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+ nfc_dev_dbg(&dev->interface->dev,
+ "%s cancel_listen %d modulation len %d",
+ __func__, dev->cancel_listen, cur_mod->len);
+
+ if (dev->cancel_listen == 1) {
+ dev->cancel_listen = 0;
+ usb_kill_urb(dev->in_urb);
}
- nfc_dev_dbg(&dev->interface->dev, "It will poll %d modulations types",
- dev->poll_mod_count);
+ rc = pn533_send_poll_frame(dev);
+ if (rc)
+ return;
- dev->poll_mod_curr = 0;
- start_mod = dev->poll_mod_active[dev->poll_mod_curr];
+ if (cur_mod->len == 0 && dev->poll_mod_count > 1)
+ mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
- pn533_start_poll_frame(dev->out_frame, start_mod);
+ return;
+}
- rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen, pn533_start_poll_complete,
- NULL, GFP_KERNEL);
+static int pn533_start_poll(struct nfc_dev *nfc_dev,
+ u32 im_protocols, u32 tm_protocols)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
- " start poll", rc);
- goto error;
+ nfc_dev_dbg(&dev->interface->dev,
+ "%s: im protocols 0x%x tm protocols 0x%x",
+ __func__, im_protocols, tm_protocols);
+
+ if (dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot poll with a target already activated");
+ return -EBUSY;
}
- dev->poll_protocols = protocols;
+ if (dev->tgt_mode) {
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot poll while already being activated");
+ return -EBUSY;
+ }
- return 0;
+ if (tm_protocols) {
+ dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
+ if (dev->gb == NULL)
+ tm_protocols = 0;
+ }
-error:
- pn533_poll_reset_mod_list(dev);
- return rc;
+ dev->poll_mod_curr = 0;
+ pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
+ dev->poll_protocols = im_protocols;
+ dev->listen_protocols = tm_protocols;
+
+ return pn533_send_poll_frame(dev);
}
static void pn533_stop_poll(struct nfc_dev *nfc_dev)
@@ -1141,6 +1467,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ del_timer(&dev->listen_timer);
+
if (!dev->poll_mod_count) {
nfc_dev_dbg(&dev->interface->dev, "Polling operation was not"
" running");
@@ -1152,6 +1480,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
/* prevent pn533_start_poll_complete to issue a new poll meanwhile */
usb_kill_urb(dev->in_urb);
+
+ pn533_poll_reset_mod_list(dev);
}
static int pn533_activate_target_nfcdep(struct pn533 *dev)
@@ -1349,13 +1679,29 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
return 0;
}
+static int pn533_mod_to_baud(struct pn533 *dev)
+{
+ switch (dev->poll_mod_curr) {
+ case PN533_POLL_MOD_106KBPS_A:
+ return 0;
+ case PN533_POLL_MOD_212KBPS_FELICA:
+ return 1;
+ case PN533_POLL_MOD_424KBPS_FELICA:
+ return 2;
+ default:
+ return -EINVAL;
+ }
+}
+
+#define PASSIVE_DATA_LEN 5
static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 comm_mode, u8* gb, size_t gb_len)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_cmd_jump_dep *cmd;
- u8 cmd_len;
- int rc;
+ u8 cmd_len, *data_ptr;
+ u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
+ int rc, baud;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1371,7 +1717,17 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
return -EBUSY;
}
+ baud = pn533_mod_to_baud(dev);
+ if (baud < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Invalid curr modulation %d", dev->poll_mod_curr);
+ return baud;
+ }
+
cmd_len = sizeof(struct pn533_cmd_jump_dep) + gb_len;
+ if (comm_mode == NFC_COMM_PASSIVE)
+ cmd_len += PASSIVE_DATA_LEN;
+
cmd = kzalloc(cmd_len, GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
@@ -1379,10 +1735,18 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
cmd->active = !comm_mode;
- cmd->baud = 0;
+ cmd->next = 0;
+ cmd->baud = baud;
+ data_ptr = cmd->data;
+ if (comm_mode == NFC_COMM_PASSIVE && cmd->baud > 0) {
+ memcpy(data_ptr, passive_data, PASSIVE_DATA_LEN);
+ cmd->next |= 1;
+ data_ptr += PASSIVE_DATA_LEN;
+ }
+
if (gb != NULL && gb_len > 0) {
- cmd->next = 4; /* We have some Gi */
- memcpy(cmd->gt, gb, gb_len);
+ cmd->next |= 4; /* We have some Gi */
+ memcpy(data_ptr, gb, gb_len);
} else {
cmd->next = 0;
}
@@ -1407,15 +1771,25 @@ out:
static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
{
- pn533_deactivate_target(nfc_dev, 0);
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+
+ pn533_poll_reset_mod_list(dev);
+
+ if (dev->tgt_mode || dev->tgt_active_prot) {
+ pn533_send_ack(dev, GFP_KERNEL);
+ usb_kill_urb(dev->in_urb);
+ }
+
+ dev->tgt_active_prot = 0;
+ dev->tgt_mode = 0;
+
+ skb_queue_purge(&dev->resp_q);
return 0;
}
-#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
-#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
-
-static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
+static int pn533_build_tx_frame(struct pn533 *dev, struct sk_buff *skb,
+ bool target)
{
int payload_len = skb->len;
struct pn533_frame *out_frame;
@@ -1432,14 +1806,37 @@ static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
return -ENOSYS;
}
- skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
- out_frame = (struct pn533_frame *) skb->data;
+ if (target == true) {
+ switch (dev->device_type) {
+ case PN533_DEVICE_PASORI:
+ if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
+ skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
+ out_frame = (struct pn533_frame *) skb->data;
+ pn533_tx_frame_init(out_frame,
+ PN533_CMD_IN_COMM_THRU);
+
+ break;
+ }
+
+ default:
+ skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
+ out_frame = (struct pn533_frame *) skb->data;
+ pn533_tx_frame_init(out_frame,
+ PN533_CMD_IN_DATA_EXCHANGE);
+ tg = 1;
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame),
+ &tg, sizeof(u8));
+ out_frame->datalen += sizeof(u8);
+
+ break;
+ }
- pn533_tx_frame_init(out_frame, PN533_CMD_IN_DATA_EXCHANGE);
+ } else {
+ skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
+ out_frame = (struct pn533_frame *) skb->data;
+ pn533_tx_frame_init(out_frame, PN533_CMD_TG_SET_DATA);
+ }
- tg = 1;
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame), &tg, sizeof(u8));
- out_frame->datalen += sizeof(u8);
/* The data is already in the out_frame, just update the datalen */
out_frame->datalen += payload_len;
@@ -1550,9 +1947,9 @@ error:
return 0;
}
-static int pn533_data_exchange(struct nfc_dev *nfc_dev,
- struct nfc_target *target, struct sk_buff *skb,
- data_exchange_cb_t cb, void *cb_context)
+static int pn533_transceive(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_frame *out_frame, *in_frame;
@@ -1570,7 +1967,7 @@ static int pn533_data_exchange(struct nfc_dev *nfc_dev,
goto error;
}
- rc = pn533_data_exchange_tx_frame(dev, skb);
+ rc = pn533_build_tx_frame(dev, skb, true);
if (rc)
goto error;
@@ -1618,6 +2015,63 @@ error:
return rc;
}
+static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when sending data",
+ params_len);
+
+ return params_len;
+ }
+
+ if (params_len > 0 && params[0] != 0) {
+ nfc_tm_deactivated(dev->nfc_dev);
+
+ dev->tgt_mode = 0;
+
+ return 0;
+ }
+
+ queue_work(dev->wq, &dev->tg_work);
+
+ return 0;
+}
+
+static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_frame *out_frame;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ rc = pn533_build_tx_frame(dev, skb, false);
+ if (rc)
+ goto error;
+
+ out_frame = (struct pn533_frame *) skb->data;
+
+ rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_tm_send_complete,
+ NULL, GFP_KERNEL);
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when trying to send data", rc);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ kfree_skb(skb);
+
+ return rc;
+}
+
static void pn533_wq_mi_recv(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, mi_work);
@@ -1638,7 +2092,7 @@ static void pn533_wq_mi_recv(struct work_struct *work)
skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN);
- rc = pn533_data_exchange_tx_frame(dev, skb_cmd);
+ rc = pn533_build_tx_frame(dev, skb_cmd, true);
if (rc)
goto error_frame;
@@ -1677,7 +2131,7 @@ error_cmd:
kfree(arg);
- up(&dev->cmd_lock);
+ mutex_unlock(&dev->cmd_lock);
}
static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
@@ -1703,7 +2157,28 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
return rc;
}
-struct nfc_ops pn533_nfc_ops = {
+static int pn533_fw_reset(struct pn533 *dev)
+{
+ int rc;
+ u8 *params;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ pn533_tx_frame_init(dev->out_frame, 0x18);
+
+ params = PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame);
+ params[0] = 0x1;
+ dev->out_frame->datalen += 1;
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen);
+
+ return rc;
+}
+
+static struct nfc_ops pn533_nfc_ops = {
.dev_up = NULL,
.dev_down = NULL,
.dep_link_up = pn533_dep_link_up,
@@ -1712,9 +2187,88 @@ struct nfc_ops pn533_nfc_ops = {
.stop_poll = pn533_stop_poll,
.activate_target = pn533_activate_target,
.deactivate_target = pn533_deactivate_target,
- .data_exchange = pn533_data_exchange,
+ .im_transceive = pn533_transceive,
+ .tm_send = pn533_tm_send,
};
+static int pn533_setup(struct pn533 *dev)
+{
+ struct pn533_config_max_retries max_retries;
+ struct pn533_config_timing timing;
+ u8 pasori_cfg[3] = {0x08, 0x01, 0x08};
+ int rc;
+
+ switch (dev->device_type) {
+ case PN533_DEVICE_STD:
+ max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
+ max_retries.mx_rty_psl = 2;
+ max_retries.mx_rty_passive_act =
+ PN533_CONFIG_MAX_RETRIES_NO_RETRY;
+
+ timing.rfu = PN533_CONFIG_TIMING_102;
+ timing.atr_res_timeout = PN533_CONFIG_TIMING_204;
+ timing.dep_timeout = PN533_CONFIG_TIMING_409;
+
+ break;
+
+ case PN533_DEVICE_PASORI:
+ max_retries.mx_rty_atr = 0x2;
+ max_retries.mx_rty_psl = 0x1;
+ max_retries.mx_rty_passive_act =
+ PN533_CONFIG_MAX_RETRIES_NO_RETRY;
+
+ timing.rfu = PN533_CONFIG_TIMING_102;
+ timing.atr_res_timeout = PN533_CONFIG_TIMING_102;
+ timing.dep_timeout = PN533_CONFIG_TIMING_204;
+
+ break;
+
+ default:
+ nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
+ dev->device_type);
+ return -EINVAL;
+ }
+
+ rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
+ (u8 *)&max_retries, sizeof(max_retries));
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error on setting MAX_RETRIES config");
+ return rc;
+ }
+
+
+ rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
+ (u8 *)&timing, sizeof(timing));
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error on setting RF timings");
+ return rc;
+ }
+
+ switch (dev->device_type) {
+ case PN533_DEVICE_STD:
+ break;
+
+ case PN533_DEVICE_PASORI:
+ pn533_fw_reset(dev);
+
+ rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
+ pasori_cfg, 3);
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error while settings PASORI config");
+ return rc;
+ }
+
+ pn533_fw_reset(dev);
+
+ break;
+ }
+
+ return 0;
+}
+
static int pn533_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -1722,7 +2276,6 @@ static int pn533_probe(struct usb_interface *interface,
struct pn533 *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
- struct pn533_config_max_retries max_retries;
int in_endpoint = 0;
int out_endpoint = 0;
int rc = -ENOMEM;
@@ -1735,7 +2288,7 @@ static int pn533_probe(struct usb_interface *interface,
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
- sema_init(&dev->cmd_lock, 1);
+ mutex_init(&dev->cmd_lock);
iface_desc = interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
@@ -1779,12 +2332,18 @@ static int pn533_probe(struct usb_interface *interface,
INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
+ INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
+ INIT_WORK(&dev->poll_work, pn533_wq_poll);
dev->wq = alloc_workqueue("pn533",
WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1);
if (dev->wq == NULL)
goto error;
+ init_timer(&dev->listen_timer);
+ dev->listen_timer.data = (unsigned long) dev;
+ dev->listen_timer.function = pn533_listen_mode_timer;
+
skb_queue_head_init(&dev->resp_q);
usb_set_intfdata(interface, dev);
@@ -1802,10 +2361,22 @@ static int pn533_probe(struct usb_interface *interface,
nfc_dev_info(&dev->interface->dev, "NXP PN533 firmware ver %d.%d now"
" attached", fw_ver->ver, fw_ver->rev);
- protocols = NFC_PROTO_JEWEL_MASK
- | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
- | NFC_PROTO_ISO14443_MASK
- | NFC_PROTO_NFC_DEP_MASK;
+ dev->device_type = id->driver_info;
+ switch (dev->device_type) {
+ case PN533_DEVICE_STD:
+ protocols = PN533_ALL_PROTOCOLS;
+ break;
+
+ case PN533_DEVICE_PASORI:
+ protocols = PN533_NO_TYPE_B_PROTOCOLS;
+ break;
+
+ default:
+ nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
+ dev->device_type);
+ rc = -EINVAL;
+ goto destroy_wq;
+ }
dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
PN533_CMD_DATAEXCH_HEAD_LEN,
@@ -1820,23 +2391,18 @@ static int pn533_probe(struct usb_interface *interface,
if (rc)
goto free_nfc_dev;
- max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
- max_retries.mx_rty_psl = 2;
- max_retries.mx_rty_passive_act = PN533_CONFIG_MAX_RETRIES_NO_RETRY;
-
- rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
- (u8 *) &max_retries, sizeof(max_retries));
-
- if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error on setting MAX_RETRIES"
- " config");
- goto free_nfc_dev;
- }
+ rc = pn533_setup(dev);
+ if (rc)
+ goto unregister_nfc_dev;
return 0;
+unregister_nfc_dev:
+ nfc_unregister_device(dev->nfc_dev);
+
free_nfc_dev:
nfc_free_device(dev->nfc_dev);
+
destroy_wq:
destroy_workqueue(dev->wq);
error:
@@ -1865,6 +2431,8 @@ static void pn533_disconnect(struct usb_interface *interface)
skb_queue_purge(&dev->resp_q);
+ del_timer(&dev->listen_timer);
+
kfree(dev->in_frame);
usb_free_urb(dev->in_urb);
kfree(dev->out_frame);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index 46f4a9f..aa71807 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -108,16 +108,22 @@ enum pn544_state {
#define PN544_NFC_WI_MGMT_GATE 0xA1
-static u8 pn544_custom_gates[] = {
- PN544_SYS_MGMT_GATE,
- PN544_SWP_MGMT_GATE,
- PN544_POLLING_LOOP_MGMT_GATE,
- PN544_NFC_WI_MGMT_GATE,
- PN544_RF_READER_F_GATE,
- PN544_RF_READER_JEWEL_GATE,
- PN544_RF_READER_ISO15693_GATE,
- PN544_RF_READER_NFCIP1_INITIATOR_GATE,
- PN544_RF_READER_NFCIP1_TARGET_GATE
+static struct nfc_hci_gate pn544_gates[] = {
+ {NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_LINK_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_SYS_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_SWP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_POLLING_LOOP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_NFC_WI_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_JEWEL_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_NFCIP1_TARGET_GATE, NFC_HCI_INVALID_PIPE}
};
/* Largest headroom needed for outgoing custom commands */
@@ -232,7 +238,7 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
static int check_crc(u8 *buf, int buflen)
{
- u8 len;
+ int len;
u16 crc;
len = buf[0] + 1;
@@ -377,6 +383,9 @@ static int pn544_hci_open(struct nfc_shdlc *shdlc)
r = pn544_hci_enable(info, HCI_MODE);
+ if (r == 0)
+ info->state = PN544_ST_READY;
+
out:
mutex_unlock(&info->info_lock);
return r;
@@ -393,6 +402,8 @@ static void pn544_hci_close(struct nfc_shdlc *shdlc)
pn544_hci_disable(info);
+ info->state = PN544_ST_COLD;
+
out:
mutex_unlock(&info->info_lock);
}
@@ -576,7 +587,8 @@ static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb)
return pn544_hci_i2c_write(client, skb->data, skb->len);
}
-static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
+static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
+ u32 im_protocols, u32 tm_protocols)
{
struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
u8 phases = 0;
@@ -584,7 +596,8 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
u8 duration[2];
u8 activated;
- pr_info(DRIVER_DESC ": %s protocols = %d\n", __func__, protocols);
+ pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
+ __func__, im_protocols, tm_protocols);
r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
NFC_HCI_EVT_END_OPERATION, NULL, 0);
@@ -604,10 +617,10 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
if (r < 0)
return r;
- if (protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
+ if (im_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
NFC_PROTO_JEWEL_MASK))
phases |= 1; /* Type A */
- if (protocols & NFC_PROTO_FELICA_MASK) {
+ if (im_protocols & NFC_PROTO_FELICA_MASK) {
phases |= (1 << 2); /* Type F 212 */
phases |= (1 << 3); /* Type F 424 */
}
@@ -842,10 +855,9 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
goto err_rti;
}
- init_data.gate_count = ARRAY_SIZE(pn544_custom_gates);
+ init_data.gate_count = ARRAY_SIZE(pn544_gates);
- memcpy(init_data.gates, pn544_custom_gates,
- ARRAY_SIZE(pn544_custom_gates));
+ memcpy(init_data.gates, pn544_gates, sizeof(pn544_gates));
/*
* TODO: Session id must include the driver name + some bus addr
@@ -857,6 +869,7 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
NFC_PROTO_MIFARE_MASK |
NFC_PROTO_FELICA_MASK |
NFC_PROTO_ISO14443_MASK |
+ NFC_PROTO_ISO14443_B_MASK |
NFC_PROTO_NFC_DEP_MASK;
info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d9bfd49..a14f109 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1051,7 +1051,8 @@ int prom_remove_property(struct device_node *np, struct property *prop)
}
/*
- * prom_update_property - Update a property in a node.
+ * prom_update_property - Update a property in a node, if the property does
+ * not exist, add it.
*
* Note that we don't actually remove it, since we have given out
* who-knows-how-many pointers to the data using get-property.
@@ -1059,13 +1060,19 @@ int prom_remove_property(struct device_node *np, struct property *prop)
* and add the new property to the property list
*/
int prom_update_property(struct device_node *np,
- struct property *newprop,
- struct property *oldprop)
+ struct property *newprop)
{
- struct property **next;
+ struct property **next, *oldprop;
unsigned long flags;
int found = 0;
+ if (!newprop->name)
+ return -EINVAL;
+
+ oldprop = of_find_property(np, newprop->name, NULL);
+ if (!oldprop)
+ return prom_add_property(np, newprop);
+
write_lock_irqsave(&devtree_lock, flags);
next = &np->properties;
while (*next) {
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 2574abd..8e6c25f 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -57,6 +57,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
const __be32 *paddr;
u32 addr;
int len;
+ bool is_c45;
/* A PHY must have a reg property in the range [0-31] */
paddr = of_get_property(child, "reg", &len);
@@ -79,11 +80,18 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
mdio->irq[addr] = PHY_POLL;
}
- phy = get_phy_device(mdio, addr);
+ is_c45 = of_device_is_compatible(child,
+ "ethernet-phy-ieee802.3-c45");
+ phy = get_phy_device(mdio, addr, is_c45);
+
if (!phy || IS_ERR(phy)) {
- dev_err(&mdio->dev, "error probing PHY at address %i\n",
- addr);
- continue;
+ phy = phy_device_create(mdio, addr, 0, false, NULL);
+ if (!phy || IS_ERR(phy)) {
+ dev_err(&mdio->dev,
+ "error creating PHY at address %i\n",
+ addr);
+ continue;
+ }
}
/* Associate the OF node with the device structure so it
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 9312516..6770538 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -15,7 +15,7 @@
* PCI tree until an device-node is found, at which point it will finish
* resolving using the OF tree walking.
*/
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
{
struct device_node *dn, *ppnode;
struct pci_dev *ppdev;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 343ad29..e44f8c2 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -317,10 +317,9 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
for(; lookup->compatible != NULL; lookup++) {
if (!of_device_is_compatible(np, lookup->compatible))
continue;
- if (of_address_to_resource(np, 0, &res))
- continue;
- if (res.start != lookup->phys_addr)
- continue;
+ if (!of_address_to_resource(np, 0, &res))
+ if (res.start != lookup->phys_addr)
+ continue;
pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
return lookup;
}
@@ -462,4 +461,5 @@ int of_platform_populate(struct device_node *root,
of_node_put(root);
return rc;
}
+EXPORT_SYMBOL_GPL(of_platform_populate);
#endif /* CONFIG_OF_ADDRESS */
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index da14432..f3cfa0b 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -1,5 +1,6 @@
/*
* Copyright 2010 ARM Ltd.
+ * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
*
* Perf-events backend for OProfile.
*/
@@ -25,7 +26,7 @@ static int oprofile_perf_enabled;
static DEFINE_MUTEX(oprofile_perf_mutex);
static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static DEFINE_PER_CPU(struct perf_event **, perf_events);
static int num_counters;
/*
@@ -38,7 +39,7 @@ static void op_overflow_handler(struct perf_event *event,
u32 cpu = smp_processor_id();
for (id = 0; id < num_counters; ++id)
- if (perf_events[cpu][id] == event)
+ if (per_cpu(perf_events, cpu)[id] == event)
break;
if (id != num_counters)
@@ -74,7 +75,7 @@ static int op_create_counter(int cpu, int event)
{
struct perf_event *pevent;
- if (!counter_config[event].enabled || perf_events[cpu][event])
+ if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
return 0;
pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
@@ -91,18 +92,18 @@ static int op_create_counter(int cpu, int event)
return -EBUSY;
}
- perf_events[cpu][event] = pevent;
+ per_cpu(perf_events, cpu)[event] = pevent;
return 0;
}
static void op_destroy_counter(int cpu, int event)
{
- struct perf_event *pevent = perf_events[cpu][event];
+ struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
if (pevent) {
perf_event_release_kernel(pevent);
- perf_events[cpu][event] = NULL;
+ per_cpu(perf_events, cpu)[event] = NULL;
}
}
@@ -257,12 +258,12 @@ void oprofile_perf_exit(void)
for_each_possible_cpu(cpu) {
for (id = 0; id < num_counters; ++id) {
- event = perf_events[cpu][id];
+ event = per_cpu(perf_events, cpu)[id];
if (event)
perf_event_release_kernel(event);
}
- kfree(perf_events[cpu]);
+ kfree(per_cpu(perf_events, cpu));
}
kfree(counter_config);
@@ -277,8 +278,6 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
if (ret)
return ret;
- memset(&perf_events, 0, sizeof(perf_events));
-
num_counters = perf_num_counters();
if (num_counters <= 0) {
pr_info("oprofile: no performance counters\n");
@@ -298,9 +297,9 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
}
for_each_possible_cpu(cpu) {
- perf_events[cpu] = kcalloc(num_counters,
+ per_cpu(perf_events, cpu) = kcalloc(num_counters,
sizeof(struct perf_event *), GFP_KERNEL);
- if (!perf_events[cpu]) {
+ if (!per_cpu(perf_events, cpu)) {
pr_info("oprofile: failed to allocate %d perf events "
"for cpu %d\n", num_counters, cpu);
ret = -ENOMEM;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index bf0cee6..099f46c 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -748,6 +748,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_pm_set_unknown_state(pci_dev);
+ /*
+ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+ * hasn't been quiesced and tries to turn it off. If the controller
+ * is already in D3, this can hang or cause memory corruption.
+ *
+ * Since the value of the COMMAND register doesn't matter once the
+ * device has been suspended, we can safely set it to 0 here.
+ */
+ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8f16900..447e834 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2370,7 +2370,7 @@ void pci_enable_acs(struct pci_dev *dev)
* number is always 0 (see the Implementation Note in section 2.2.8.1 of
* the PCI Express Base Specification, Revision 2.1)
*/
-u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
{
int slot;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2a75216..d41d4c5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2143,9 +2143,9 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
-#ifdef CONFIG_TILE
+#ifdef CONFIG_TILEPRO
/*
- * The Tilera TILEmpower platform needs to set the link speed
+ * The Tilera TILEmpower tilepro platform needs to set the link speed
* to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
* setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
* capability register of the PEX8624 PCIe switch. The switch
@@ -2160,7 +2160,7 @@ static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
-#endif /* CONFIG_TILE */
+#endif /* CONFIG_TILEPRO */
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index c3b331b..0cc053a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps);
list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
_i_ < _maps_node_->num_maps; \
- i++, _map_ = &_maps_node_->maps[_i_])
+ _i_++, _map_ = &_maps_node_->maps[_i_])
/**
* pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index f6e7c67..90c837f 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -27,16 +27,16 @@
#include "core.h"
#include "pinctrl-imx.h"
-#define IMX_PMX_DUMP(info, p, m, c, n) \
-{ \
- int i, j; \
- printk("Format: Pin Mux Config\n"); \
- for (i = 0; i < n; i++) { \
- j = p[i]; \
- printk("%s %d 0x%lx\n", \
- info->pins[j].name, \
- m[i], c[i]); \
- } \
+#define IMX_PMX_DUMP(info, p, m, c, n) \
+{ \
+ int i, j; \
+ printk(KERN_DEBUG "Format: Pin Mux Config\n"); \
+ for (i = 0; i < n; i++) { \
+ j = p[i]; \
+ printk(KERN_DEBUG "%s %d 0x%lx\n", \
+ info->pins[j].name, \
+ m[i], c[i]); \
+ } \
}
/* The bits in CONFIG cell defined in binding doc*/
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create mux map */
parent = of_get_parent(np);
- if (!parent)
+ if (!parent) {
+ kfree(new_map);
return -EINVAL;
+ }
new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
new_map[0].data.mux.function = parent->name;
new_map[0].data.mux.group = np->name;
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
}
dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
- new_map->data.mux.function, new_map->data.mux.group, map_num);
+ (*map)->data.mux.function, (*map)->data.mux.group, map_num);
return 0;
}
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
static void imx_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map, unsigned num_maps)
{
- int i;
-
- for (i = 0; i < num_maps; i++)
- kfree(map);
+ kfree(map);
}
static struct pinctrl_ops imx_pctrl_ops = {
@@ -478,6 +477,7 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
#ifdef DEBUG
IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
#endif
+
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/pinctrl-imx6q.c
index 7737d4d..e9bf71f 100644
--- a/drivers/pinctrl/pinctrl-imx6q.c
+++ b/drivers/pinctrl/pinctrl-imx6q.c
@@ -1950,6 +1950,8 @@ static struct imx_pin_reg imx6q_pin_regs[] = {
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
+ IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */
+ IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */
};
/* Pad names for the pinmux subsystem */
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index 556e45a..4ba4636 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
/* Compose group name */
group = kzalloc(length, GFP_KERNEL);
- if (!group)
- return -ENOMEM;
+ if (!group) {
+ ret = -ENOMEM;
+ goto free;
+ }
snprintf(group, length, "%s.%d", np->name, reg);
new_map[i].data.mux.group = group;
i++;
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
if (!pconfig) {
ret = -ENOMEM;
- goto free;
+ goto free_group;
}
new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
return 0;
+free_group:
+ if (!purecfg)
+ kfree(group);
free:
kfree(new_map);
return ret;
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
return 0;
err:
+ platform_set_drvdata(pdev, NULL);
iounmap(d->base);
return ret;
}
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev)
{
struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
pinctrl_unregister(d->pctl);
iounmap(d->base);
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index b8e01c3..dd9e6f2 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf.h>
@@ -672,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
* wakeup is anyhow controlled by the RIMSC and FIMSC registers.
*/
if (nmk_chip->sleepmode && on) {
- __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base,
+ __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
NMK_GPIO_SLPM_WAKEUP_ENABLE);
}
@@ -1197,7 +1198,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
if (!pdata)
return -ENOMEM;
- if (of_get_property(np, "supports-sleepmode", NULL))
+ if (of_get_property(np, "st,supports-sleepmode", NULL))
pdata->supports_sleepmode = true;
if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
@@ -1245,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
ret = PTR_ERR(clk);
goto out_unmap;
}
+ clk_prepare(clk);
nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
if (!nmk_chip) {
@@ -1436,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
- /* Handle this special glitch on altfunction C */
+ /*
+ * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
+ * we may pass through an undesired state. In this case we take
+ * some extra care.
+ *
+ * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
+ * - Save SLPM registers (since we have a shadow register in the
+ * nmk_chip we're using that as backup)
+ * - Set SLPM=0 for the IOs you want to switch and others to 1
+ * - Configure the GPIO registers for the IOs that are being switched
+ * - Set IOFORCE=1
+ * - Modify the AFLSA/B registers for the IOs that are being switched
+ * - Set IOFORCE=0
+ * - Restore SLPM registers
+ * - Any spurious wake up event during switch sequence to be ignored
+ * and cleared
+ *
+ * We REALLY need to save ALL slpm registers, because the external
+ * IOFORCE will switch *all* ports to their sleepmode setting to as
+ * to avoid glitches. (Not just one port!)
+ */
glitch = (g->altsetting == NMK_GPIO_ALT_C);
if (glitch) {
@@ -1688,18 +1710,34 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
.owner = THIS_MODULE,
};
+static const struct of_device_id nmk_pinctrl_match[] = {
+ {
+ .compatible = "stericsson,nmk_pinctrl",
+ .data = (void *)PINCTRL_NMK_DB8500,
+ },
+ {},
+};
+
static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
{
const struct platform_device_id *platid = platform_get_device_id(pdev);
+ struct device_node *np = pdev->dev.of_node;
struct nmk_pinctrl *npct;
+ unsigned int version = 0;
int i;
npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
if (!npct)
return -ENOMEM;
+ if (platid)
+ version = platid->driver_data;
+ else if (np)
+ version = (unsigned int)
+ of_match_device(nmk_pinctrl_match, &pdev->dev)->data;
+
/* Poke in other ASIC variants here */
- if (platid->driver_data == PINCTRL_NMK_DB8500)
+ if (version == PINCTRL_NMK_DB8500)
nmk_pinctrl_db8500_init(&npct->soc);
/*
@@ -1758,6 +1796,7 @@ static struct platform_driver nmk_pinctrl_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "pinctrl-nomadik",
+ .of_match_table = nmk_pinctrl_match,
},
.probe = nmk_pinctrl_probe,
.id_table = nmk_pinctrl_id,
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index ba15b1a..2aae8a8 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -8,24 +8,61 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/irqdomain.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#define DRIVER_NAME "pinmux-sirf"
#define SIRFSOC_NUM_PADS 622
-#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
#define SIRFSOC_RSC_PIN_MUX 0x4
+#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
+#define SIRFSOC_GPIO_CTRL(g, i) ((g)*0x100 + (i)*4)
+#define SIRFSOC_GPIO_DSP_EN0 (0x80)
+#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
+#define SIRFSOC_GPIO_INT_STATUS(g) ((g)*0x100 + 0x8C)
+
+#define SIRFSOC_GPIO_CTL_INTR_LOW_MASK 0x1
+#define SIRFSOC_GPIO_CTL_INTR_HIGH_MASK 0x2
+#define SIRFSOC_GPIO_CTL_INTR_TYPE_MASK 0x4
+#define SIRFSOC_GPIO_CTL_INTR_EN_MASK 0x8
+#define SIRFSOC_GPIO_CTL_INTR_STS_MASK 0x10
+#define SIRFSOC_GPIO_CTL_OUT_EN_MASK 0x20
+#define SIRFSOC_GPIO_CTL_DATAOUT_MASK 0x40
+#define SIRFSOC_GPIO_CTL_DATAIN_MASK 0x80
+#define SIRFSOC_GPIO_CTL_PULL_MASK 0x100
+#define SIRFSOC_GPIO_CTL_PULL_HIGH 0x200
+#define SIRFSOC_GPIO_CTL_DSP_INT 0x400
+
+#define SIRFSOC_GPIO_NO_OF_BANKS 5
+#define SIRFSOC_GPIO_BANK_SIZE 32
+#define SIRFSOC_GPIO_NUM(bank, index) (((bank)*(32)) + (index))
+
+struct sirfsoc_gpio_bank {
+ struct of_mm_gpio_chip chip;
+ struct irq_domain *domain;
+ int id;
+ int parent_irq;
+ spinlock_t lock;
+};
+
+static struct sirfsoc_gpio_bank sgpio_bank[SIRFSOC_GPIO_NO_OF_BANKS];
+static DEFINE_SPINLOCK(sgpio_lock);
+
/*
* pad list for the pinmux subsystem
* refer to CS-131858-DC-6A.xls
@@ -1184,7 +1221,7 @@ out_no_gpio_remap:
return ret;
}
-static const struct of_device_id pinmux_ids[] = {
+static const struct of_device_id pinmux_ids[] __devinitconst = {
{ .compatible = "sirf,prima2-gpio-pinmux" },
{}
};
@@ -1204,7 +1241,457 @@ static int __init sirfsoc_pinmux_init(void)
}
arch_initcall(sirfsoc_pinmux_init);
+static inline int sirfsoc_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct sirfsoc_gpio_bank *bank = container_of(to_of_mm_gpio_chip(chip),
+ struct sirfsoc_gpio_bank, chip);
+
+ return irq_find_mapping(bank->domain, offset);
+}
+
+static inline int sirfsoc_gpio_to_offset(unsigned int gpio)
+{
+ return gpio % SIRFSOC_GPIO_BANK_SIZE;
+}
+
+static inline struct sirfsoc_gpio_bank *sirfsoc_gpio_to_bank(unsigned int gpio)
+{
+ return &sgpio_bank[gpio / SIRFSOC_GPIO_BANK_SIZE];
+}
+
+void sirfsoc_gpio_set_pull(unsigned gpio, unsigned mode)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(gpio);
+ int idx = sirfsoc_gpio_to_offset(gpio);
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+
+ switch (mode) {
+ case SIRFSOC_GPIO_PULL_NONE:
+ val &= ~SIRFSOC_GPIO_CTL_PULL_MASK;
+ break;
+ case SIRFSOC_GPIO_PULL_UP:
+ val |= SIRFSOC_GPIO_CTL_PULL_MASK;
+ val |= SIRFSOC_GPIO_CTL_PULL_HIGH;
+ break;
+ case SIRFSOC_GPIO_PULL_DOWN:
+ val |= SIRFSOC_GPIO_CTL_PULL_MASK;
+ val &= ~SIRFSOC_GPIO_CTL_PULL_HIGH;
+ break;
+ default:
+ break;
+ }
+
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+}
+EXPORT_SYMBOL(sirfsoc_gpio_set_pull);
+
+static inline struct sirfsoc_gpio_bank *sirfsoc_irqchip_to_bank(struct gpio_chip *chip)
+{
+ return container_of(to_of_mm_gpio_chip(chip), struct sirfsoc_gpio_bank, chip);
+}
+
+static void sirfsoc_gpio_irq_ack(struct irq_data *d)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE;
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+}
+
+static void __sirfsoc_gpio_irq_mask(struct sirfsoc_gpio_bank *bank, int idx)
+{
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+ val &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK;
+ val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+}
+
+static void sirfsoc_gpio_irq_mask(struct irq_data *d)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ __sirfsoc_gpio_irq_mask(bank, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
+}
+
+static void sirfsoc_gpio_irq_unmask(struct irq_data *d)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE;
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+ val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
+ val |= SIRFSOC_GPIO_CTL_INTR_EN_MASK;
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+}
+
+static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE;
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+ val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ val &= ~SIRFSOC_GPIO_CTL_INTR_LOW_MASK;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val &= ~SIRFSOC_GPIO_CTL_INTR_HIGH_MASK;
+ val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
+ SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val &= ~(SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
+ val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK;
+ val &= ~(SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
+ break;
+ }
+
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip sirfsoc_irq_chip = {
+ .name = "sirf-gpio-irq",
+ .irq_ack = sirfsoc_gpio_irq_ack,
+ .irq_mask = sirfsoc_gpio_irq_mask,
+ .irq_unmask = sirfsoc_gpio_irq_unmask,
+ .irq_set_type = sirfsoc_gpio_irq_type,
+};
+
+static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct sirfsoc_gpio_bank *bank = irq_get_handler_data(irq);
+ u32 status, ctrl;
+ int idx = 0;
+ unsigned int first_irq;
+
+ status = readl(bank->chip.regs + SIRFSOC_GPIO_INT_STATUS(bank->id));
+ if (!status) {
+ printk(KERN_WARNING
+ "%s: gpio id %d status %#x no interrupt is flaged\n",
+ __func__, bank->id, status);
+ handle_bad_irq(irq, desc);
+ return;
+ }
+
+ first_irq = bank->domain->revmap_data.legacy.first_irq;
+
+ while (status) {
+ ctrl = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, idx));
+
+ /*
+ * Here we must check whether the corresponding GPIO's interrupt
+ * has been enabled, otherwise just skip it
+ */
+ if ((status & 0x1) && (ctrl & SIRFSOC_GPIO_CTL_INTR_EN_MASK)) {
+ pr_debug("%s: gpio id %d idx %d happens\n",
+ __func__, bank->id, idx);
+ generic_handle_irq(first_irq + idx);
+ }
+
+ idx++;
+ status = status >> 1;
+ }
+}
+
+static inline void sirfsoc_gpio_set_input(struct sirfsoc_gpio_bank *bank, unsigned ctrl_offset)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl(bank->chip.regs + ctrl_offset);
+ val &= ~SIRFSOC_GPIO_CTL_OUT_EN_MASK;
+ writel(val, bank->chip.regs + ctrl_offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static int sirfsoc_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ unsigned long flags;
+
+ if (pinctrl_request_gpio(chip->base + offset))
+ return -ENODEV;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ /*
+ * default status:
+ * set direction as input and mask irq
+ */
+ sirfsoc_gpio_set_input(bank, SIRFSOC_GPIO_CTRL(bank->id, offset));
+ __sirfsoc_gpio_irq_mask(bank, offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static void sirfsoc_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ __sirfsoc_gpio_irq_mask(bank, offset);
+ sirfsoc_gpio_set_input(bank, SIRFSOC_GPIO_CTRL(bank->id, offset));
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int sirfsoc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ int idx = sirfsoc_gpio_to_offset(gpio);
+ unsigned long flags;
+ unsigned offset;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ sirfsoc_gpio_set_input(bank, offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static inline void sirfsoc_gpio_set_output(struct sirfsoc_gpio_bank *bank, unsigned offset,
+ int value)
+{
+ u32 out_ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ out_ctrl = readl(bank->chip.regs + offset);
+ if (value)
+ out_ctrl |= SIRFSOC_GPIO_CTL_DATAOUT_MASK;
+ else
+ out_ctrl &= ~SIRFSOC_GPIO_CTL_DATAOUT_MASK;
+
+ out_ctrl &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK;
+ out_ctrl |= SIRFSOC_GPIO_CTL_OUT_EN_MASK;
+ writel(out_ctrl, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static int sirfsoc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ int idx = sirfsoc_gpio_to_offset(gpio);
+ u32 offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ sirfsoc_gpio_set_output(bank, offset, value);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return !!(val & SIRFSOC_GPIO_CTL_DATAIN_MASK);
+}
+
+static void sirfsoc_gpio_set_value(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ ctrl = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
+ if (value)
+ ctrl |= SIRFSOC_GPIO_CTL_DATAOUT_MASK;
+ else
+ ctrl &= ~SIRFSOC_GPIO_CTL_DATAOUT_MASK;
+ writel(ctrl, bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+int sirfsoc_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct sirfsoc_gpio_bank *bank = d->host_data;
+
+ if (!bank)
+ return -EINVAL;
+
+ irq_set_chip(irq, &sirfsoc_irq_chip);
+ irq_set_handler(irq, handle_level_irq);
+ irq_set_chip_data(irq, bank);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+const struct irq_domain_ops sirfsoc_gpio_irq_simple_ops = {
+ .map = sirfsoc_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __devinit sirfsoc_gpio_probe(struct device_node *np)
+{
+ int i, err = 0;
+ struct sirfsoc_gpio_bank *bank;
+ void *regs;
+ struct platform_device *pdev;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return -ENODEV;
+
+ regs = of_iomap(np, 0);
+ if (!regs)
+ return -ENOMEM;
+
+ for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
+ bank = &sgpio_bank[i];
+ spin_lock_init(&bank->lock);
+ bank->chip.gc.request = sirfsoc_gpio_request;
+ bank->chip.gc.free = sirfsoc_gpio_free;
+ bank->chip.gc.direction_input = sirfsoc_gpio_direction_input;
+ bank->chip.gc.get = sirfsoc_gpio_get_value;
+ bank->chip.gc.direction_output = sirfsoc_gpio_direction_output;
+ bank->chip.gc.set = sirfsoc_gpio_set_value;
+ bank->chip.gc.to_irq = sirfsoc_gpio_to_irq;
+ bank->chip.gc.base = i * SIRFSOC_GPIO_BANK_SIZE;
+ bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;
+ bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);
+ bank->chip.gc.of_node = np;
+ bank->chip.regs = regs;
+ bank->id = i;
+ bank->parent_irq = platform_get_irq(pdev, i);
+ if (bank->parent_irq < 0) {
+ err = bank->parent_irq;
+ goto out;
+ }
+
+ err = gpiochip_add(&bank->chip.gc);
+ if (err) {
+ pr_err("%s: error in probe function with status %d\n",
+ np->full_name, err);
+ goto out;
+ }
+
+ bank->domain = irq_domain_add_legacy(np, SIRFSOC_GPIO_BANK_SIZE,
+ SIRFSOC_GPIO_IRQ_START + i * SIRFSOC_GPIO_BANK_SIZE, 0,
+ &sirfsoc_gpio_irq_simple_ops, bank);
+
+ if (!bank->domain) {
+ pr_err("%s: Failed to create irqdomain\n", np->full_name);
+ err = -ENOSYS;
+ goto out;
+ }
+
+ irq_set_chained_handler(bank->parent_irq, sirfsoc_gpio_handle_irq);
+ irq_set_handler_data(bank->parent_irq, bank);
+ }
+
+out:
+ iounmap(regs);
+ return err;
+}
+
+static int __init sirfsoc_gpio_init(void)
+{
+
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, pinmux_ids);
+
+ if (!np)
+ return -ENODEV;
+
+ return sirfsoc_gpio_probe(np);
+}
+subsys_initcall(sirfsoc_gpio_init);
+
MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+ "Yuping Luo <yuping.luo@csr.com>, "
"Barry Song <baohua.song@csr.com>");
MODULE_DESCRIPTION("SIRFSOC pin control driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 5ae50aa..b3f6b28 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* Inspired from:
* - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 9155783..d950eb7 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
* Driver header file for the ST Microelectronics SPEAr pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index fff168b..d6cca8c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr1310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -2192,7 +2192,7 @@ static void __exit spear1310_pinctrl_exit(void)
}
module_exit(spear1310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index a8ab2a6..a0eb057 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr1340 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -1983,7 +1983,7 @@ static void __exit spear1340_pinctrl_exit(void)
}
module_exit(spear1340_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 9c82a35..4dfc284 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr300 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void)
}
module_exit(spear300_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 1a97076..9688369 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr310 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void)
}
module_exit(spear310_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index de726e6..020b1e0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr320 pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void)
}
module_exit(spear320_pinctrl_exit);
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 91c883b..0242378 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
* Driver for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 5d5fdd8..31f4434 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
* Header file for the ST Microelectronics SPEAr3xx pinmux
*
* Copyright (C) 2012 ST Microelectronics
- * Viresh Kumar <viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c1a3fd8..c8f40c9 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -523,6 +523,30 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
},
},
+ {
+ .callback = video_set_backlight_video_vendor,
+ .ident = "Acer Extensa 5235",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
+ },
+ },
+ {
+ .callback = video_set_backlight_video_vendor,
+ .ident = "Acer TravelMate 5760",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
+ },
+ },
+ {
+ .callback = video_set_backlight_video_vendor,
+ .ident = "Acer Aspire 5750",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
+ },
+ },
{}
};
@@ -1853,8 +1877,7 @@ static int acer_platform_remove(struct platform_device *device)
return 0;
}
-static int acer_platform_suspend(struct platform_device *dev,
-pm_message_t state)
+static int acer_suspend(struct device *dev)
{
u32 value;
struct acer_data *data = &interface->data;
@@ -1876,7 +1899,7 @@ pm_message_t state)
return 0;
}
-static int acer_platform_resume(struct platform_device *device)
+static int acer_resume(struct device *dev)
{
struct acer_data *data = &interface->data;
@@ -1892,6 +1915,8 @@ static int acer_platform_resume(struct platform_device *device)
return 0;
}
+static SIMPLE_DEV_PM_OPS(acer_pm, acer_suspend, acer_resume);
+
static void acer_platform_shutdown(struct platform_device *device)
{
struct acer_data *data = &interface->data;
@@ -1907,11 +1932,10 @@ static struct platform_driver acer_platform_driver = {
.driver = {
.name = "acer-wmi",
.owner = THIS_MODULE,
+ .pm = &acer_pm,
},
.probe = acer_platform_probe,
.remove = acer_platform_remove,
- .suspend = acer_platform_suspend,
- .resume = acer_platform_resume,
.shutdown = acer_platform_shutdown,
};
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 639db4d..2fd9d36 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -5,7 +5,7 @@
*
* (C) 2009 - Peter Feuerer peter (a) piie.net
* http://piie.net
- * 2009 Borislav Petkov <petkovbb@gmail.com>
+ * 2009 Borislav Petkov bp (a) alien8.de
*
* Inspired by and many thanks to:
* o acerfand - Rachel Greenham
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 8a582bd..694a15a 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -87,6 +87,9 @@ static int gmux_update_status(struct backlight_device *bd)
struct apple_gmux_data *gmux_data = bl_get_data(bd);
u32 brightness = bd->props.brightness;
+ if (bd->props.state & BL_CORE_SUSPENDED)
+ return 0;
+
/*
* Older gmux versions require writing out lower bytes first then
* setting the upper byte to 0 to flush the values. Newer versions
@@ -102,6 +105,7 @@ static int gmux_update_status(struct backlight_device *bd)
}
static const struct backlight_ops gmux_bl_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
.get_brightness = gmux_get_brightness,
.update_status = gmux_update_status,
};
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 94f93b6..e2230a2 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -362,15 +362,18 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type)
return cmpc_remove_acpi_notify_device(acpi);
}
-static int cmpc_tablet_resume(struct acpi_device *acpi)
+static int cmpc_tablet_resume(struct device *dev)
{
- struct input_dev *inputdev = dev_get_drvdata(&acpi->dev);
+ struct input_dev *inputdev = dev_get_drvdata(dev);
+
unsigned long long val = 0;
- if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val)))
+ if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val)))
input_report_switch(inputdev, SW_TABLET_MODE, !val);
return 0;
}
+static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume);
+
static const struct acpi_device_id cmpc_tablet_device_ids[] = {
{CMPC_TABLET_HID, 0},
{"", 0}
@@ -384,9 +387,9 @@ static struct acpi_driver cmpc_tablet_acpi_driver = {
.ops = {
.add = cmpc_tablet_add,
.remove = cmpc_tablet_remove,
- .resume = cmpc_tablet_resume,
.notify = cmpc_tablet_handler,
- }
+ },
+ .drv.pm = &cmpc_tablet_pm,
};
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index e6c08ee..5f78aac 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -21,7 +21,6 @@
#include <linux/err.h>
#include <linux/dmi.h>
#include <linux/io.h>
-#include <linux/rfkill.h>
#include <linux/power_supply.h>
#include <linux/acpi.h>
#include <linux/mm.h>
@@ -90,11 +89,8 @@ static struct platform_driver platform_driver = {
static struct platform_device *platform_device;
static struct backlight_device *dell_backlight_device;
-static struct rfkill *wifi_rfkill;
-static struct rfkill *bluetooth_rfkill;
-static struct rfkill *wwan_rfkill;
-static const struct dmi_system_id __initdata dell_device_table[] = {
+static const struct dmi_system_id dell_device_table[] __initconst = {
{
.ident = "Dell laptop",
.matches = {
@@ -119,96 +115,94 @@ static const struct dmi_system_id __initdata dell_device_table[] = {
};
MODULE_DEVICE_TABLE(dmi, dell_device_table);
-static struct dmi_system_id __devinitdata dell_blacklist[] = {
- /* Supported by compal-laptop */
- {
- .ident = "Dell Mini 9",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"),
- },
- },
+static struct dmi_system_id __devinitdata dell_quirks[] = {
{
- .ident = "Dell Mini 10",
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V130",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
{
- .ident = "Dell Mini 10v",
+ .callback = dmi_matched,
+ .ident = "Dell Vostro V131",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
{
- .ident = "Dell Mini 1012",
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3350",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
{
- .ident = "Dell Inspiron 11z",
+ .callback = dmi_matched,
+ .ident = "Dell Vostro 3555",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
{
- .ident = "Dell Mini 12",
+ .callback = dmi_matched,
+ .ident = "Dell Inspiron N311z",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
},
+ .driver_data = &quirk_dell_vostro_v130,
},
- {}
-};
-
-static struct dmi_system_id __devinitdata dell_quirks[] = {
{
.callback = dmi_matched,
- .ident = "Dell Vostro V130",
+ .ident = "Dell Inspiron M5110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
- .ident = "Dell Vostro V131",
+ .ident = "Dell Vostro 3360",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
- .ident = "Dell Vostro 3555",
+ .ident = "Dell Vostro 3460",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
- .ident = "Dell Inspiron N311z",
+ .ident = "Dell Vostro 3560",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
},
.driver_data = &quirk_dell_vostro_v130,
},
{
.callback = dmi_matched,
- .ident = "Dell Inspiron M5110",
+ .ident = "Dell Vostro 3450",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
},
.driver_data = &quirk_dell_vostro_v130,
},
@@ -305,94 +299,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
-/* Derived from information in DellWirelessCtl.cpp:
- Class 17, select 11 is radio control. It returns an array of 32-bit values.
-
- Input byte 0 = 0: Wireless information
-
- result[0]: return code
- result[1]:
- Bit 0: Hardware switch supported
- Bit 1: Wifi locator supported
- Bit 2: Wifi is supported
- Bit 3: Bluetooth is supported
- Bit 4: WWAN is supported
- Bit 5: Wireless keyboard supported
- Bits 6-7: Reserved
- Bit 8: Wifi is installed
- Bit 9: Bluetooth is installed
- Bit 10: WWAN is installed
- Bits 11-15: Reserved
- Bit 16: Hardware switch is on
- Bit 17: Wifi is blocked
- Bit 18: Bluetooth is blocked
- Bit 19: WWAN is blocked
- Bits 20-31: Reserved
- result[2]: NVRAM size in bytes
- result[3]: NVRAM format version number
-
- Input byte 0 = 2: Wireless switch configuration
- result[0]: return code
- result[1]:
- Bit 0: Wifi controlled by switch
- Bit 1: Bluetooth controlled by switch
- Bit 2: WWAN controlled by switch
- Bits 3-6: Reserved
- Bit 7: Wireless switch config locked
- Bit 8: Wifi locator enabled
- Bits 9-14: Reserved
- Bit 15: Wifi locator setting locked
- Bits 16-31: Reserved
-*/
-
-static int dell_rfkill_set(void *data, bool blocked)
-{
- int disable = blocked ? 1 : 0;
- unsigned long radio = (unsigned long)data;
- int hwswitch_bit = (unsigned long)data - 1;
- int ret = 0;
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
-
- /* If the hardware switch controls this radio, and the hardware
- switch is disabled, don't allow changing the software state */
- if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16))) {
- ret = -EINVAL;
- goto out;
- }
-
- buffer->input[0] = (1 | (radio<<8) | (disable << 16));
- dell_send_request(buffer, 17, 11);
-
-out:
- release_buffer();
- return ret;
-}
-
-static void dell_rfkill_query(struct rfkill *rfkill, void *data)
-{
- int status;
- int bit = (unsigned long)data + 16;
- int hwswitch_bit = (unsigned long)data - 1;
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
- status = buffer->output[1];
- release_buffer();
-
- rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
-
- if (hwswitch_state & (BIT(hwswitch_bit)))
- rfkill_set_hw_state(rfkill, !(status & BIT(16)));
-}
-
-static const struct rfkill_ops dell_rfkill_ops = {
- .set_block = dell_rfkill_set,
- .query = dell_rfkill_query,
-};
-
static struct dentry *dell_laptop_dir;
static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -462,108 +368,6 @@ static const struct file_operations dell_debugfs_fops = {
.release = single_release,
};
-static void dell_update_rfkill(struct work_struct *ignored)
-{
- if (wifi_rfkill)
- dell_rfkill_query(wifi_rfkill, (void *)1);
- if (bluetooth_rfkill)
- dell_rfkill_query(bluetooth_rfkill, (void *)2);
- if (wwan_rfkill)
- dell_rfkill_query(wwan_rfkill, (void *)3);
-}
-static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
-
-
-static int __init dell_setup_rfkill(void)
-{
- int status;
- int ret;
-
- if (dmi_check_system(dell_blacklist)) {
- pr_info("Blacklisted hardware detected - not enabling rfkill\n");
- return 0;
- }
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
- status = buffer->output[1];
- buffer->input[0] = 0x2;
- dell_send_request(buffer, 17, 11);
- hwswitch_state = buffer->output[1];
- release_buffer();
-
- if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
- wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
- RFKILL_TYPE_WLAN,
- &dell_rfkill_ops, (void *) 1);
- if (!wifi_rfkill) {
- ret = -ENOMEM;
- goto err_wifi;
- }
- ret = rfkill_register(wifi_rfkill);
- if (ret)
- goto err_wifi;
- }
-
- if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
- bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
- &platform_device->dev,
- RFKILL_TYPE_BLUETOOTH,
- &dell_rfkill_ops, (void *) 2);
- if (!bluetooth_rfkill) {
- ret = -ENOMEM;
- goto err_bluetooth;
- }
- ret = rfkill_register(bluetooth_rfkill);
- if (ret)
- goto err_bluetooth;
- }
-
- if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
- wwan_rfkill = rfkill_alloc("dell-wwan",
- &platform_device->dev,
- RFKILL_TYPE_WWAN,
- &dell_rfkill_ops, (void *) 3);
- if (!wwan_rfkill) {
- ret = -ENOMEM;
- goto err_wwan;
- }
- ret = rfkill_register(wwan_rfkill);
- if (ret)
- goto err_wwan;
- }
-
- return 0;
-err_wwan:
- rfkill_destroy(wwan_rfkill);
- if (bluetooth_rfkill)
- rfkill_unregister(bluetooth_rfkill);
-err_bluetooth:
- rfkill_destroy(bluetooth_rfkill);
- if (wifi_rfkill)
- rfkill_unregister(wifi_rfkill);
-err_wifi:
- rfkill_destroy(wifi_rfkill);
-
- return ret;
-}
-
-static void dell_cleanup_rfkill(void)
-{
- if (wifi_rfkill) {
- rfkill_unregister(wifi_rfkill);
- rfkill_destroy(wifi_rfkill);
- }
- if (bluetooth_rfkill) {
- rfkill_unregister(bluetooth_rfkill);
- rfkill_destroy(bluetooth_rfkill);
- }
- if (wwan_rfkill) {
- rfkill_unregister(wwan_rfkill);
- rfkill_destroy(wwan_rfkill);
- }
-}
-
static int dell_send_intensity(struct backlight_device *bd)
{
int ret = 0;
@@ -655,30 +459,6 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
-static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
- struct serio *port)
-{
- static bool extended;
-
- if (str & 0x20)
- return false;
-
- if (unlikely(data == 0xe0)) {
- extended = true;
- return false;
- } else if (unlikely(extended)) {
- switch (data) {
- case 0x8:
- schedule_delayed_work(&dell_rfkill_work,
- round_jiffies_relative(HZ));
- break;
- }
- extended = false;
- }
-
- return false;
-}
-
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -720,26 +500,10 @@ static int __init dell_init(void)
goto fail_buffer;
buffer = page_address(bufferpage);
- ret = dell_setup_rfkill();
-
- if (ret) {
- pr_warn("Unable to setup rfkill\n");
- goto fail_rfkill;
- }
-
- ret = i8042_install_filter(dell_laptop_i8042_filter);
- if (ret) {
- pr_warn("Unable to install key filter\n");
- goto fail_filter;
- }
-
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
- if (dell_laptop_dir != NULL)
- debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
- &dell_debugfs_fops);
#ifdef CONFIG_ACPI
/* In the event of an ACPI backlight being available, don't
@@ -782,11 +546,6 @@ static int __init dell_init(void)
return 0;
fail_backlight:
- i8042_remove_filter(dell_laptop_i8042_filter);
- cancel_delayed_work_sync(&dell_rfkill_work);
-fail_filter:
- dell_cleanup_rfkill();
-fail_rfkill:
free_page((unsigned long)bufferpage);
fail_buffer:
platform_device_del(platform_device);
@@ -804,10 +563,7 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
- i8042_remove_filter(dell_laptop_i8042_filter);
- cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
- dell_cleanup_rfkill();
if (platform_device) {
platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 580d80a..d2e4173 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -16,6 +16,8 @@
* 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -34,7 +36,8 @@
#define ACPI_FUJITSU_CLASS "fujitsu"
#define INVERT_TABLET_MODE_BIT 0x01
-#define FORCE_TABLET_MODE_IF_UNDOCK 0x02
+#define INVERT_DOCK_STATE_BIT 0x02
+#define FORCE_TABLET_MODE_IF_UNDOCK 0x04
#define KEYMAP_LEN 16
@@ -161,6 +164,8 @@ static void fujitsu_send_state(void)
state = fujitsu_read_register(0xdd);
dock = state & 0x02;
+ if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT)
+ dock = !dock;
if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
tablet_mode = 1;
@@ -221,9 +226,6 @@ static int __devinit input_fujitsu_setup(struct device *parent,
input_set_capability(idev, EV_SW, SW_DOCK);
input_set_capability(idev, EV_SW, SW_TABLET_MODE);
- input_set_capability(idev, EV_SW, SW_DOCK);
- input_set_capability(idev, EV_SW, SW_TABLET_MODE);
-
error = input_register_device(idev);
if (error) {
input_free_device(idev);
@@ -275,25 +277,31 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi)
+static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi)
{
- printk(KERN_INFO MODULENAME ": %s\n", dmi->ident);
+ pr_info("%s\n", dmi->ident);
memcpy(fujitsu.config.keymap, dmi->driver_data,
sizeof(fujitsu.config.keymap));
+}
+
+static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
+{
+ fujitsu_dmi_common(dmi);
+ fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
return 1;
}
static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
{
- fujitsu_dmi_default(dmi);
+ fujitsu_dmi_common(dmi);
fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
- fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
+ fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT;
return 1;
}
static struct dmi_system_id dmi_ids[] __initconst = {
{
- .callback = fujitsu_dmi_default,
+ .callback = fujitsu_dmi_lifebook,
.ident = "Fujitsu Siemens P/T Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -302,7 +310,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
.driver_data = keymap_Lifebook_Tseries
},
{
- .callback = fujitsu_dmi_default,
+ .callback = fujitsu_dmi_lifebook,
.ident = "Fujitsu Lifebook T Series",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -320,7 +328,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
.driver_data = keymap_Stylistic_Tseries
},
{
- .callback = fujitsu_dmi_default,
+ .callback = fujitsu_dmi_lifebook,
.ident = "Fujitsu LifeBook U810",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -347,7 +355,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
.driver_data = keymap_Stylistic_ST5xxx
},
{
- .callback = fujitsu_dmi_default,
+ .callback = fujitsu_dmi_lifebook,
.ident = "Unknown (using defaults)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, ""),
@@ -432,12 +440,14 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
return 0;
}
-static int acpi_fujitsu_resume(struct acpi_device *adev)
+static int acpi_fujitsu_resume(struct device *dev)
{
fujitsu_reset();
return 0;
}
+static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume);
+
static struct acpi_driver acpi_fujitsu_driver = {
.name = MODULENAME,
.class = "hotkey",
@@ -445,8 +455,8 @@ static struct acpi_driver acpi_fujitsu_driver = {
.ops = {
.add = acpi_fujitsu_add,
.remove = acpi_fujitsu_remove,
- .resume = acpi_fujitsu_resume,
- }
+ },
+ .drv.pm = &acpi_fujitsu_pm,
};
static int __init fujitsu_module_init(void)
@@ -473,6 +483,6 @@ module_exit(fujitsu_module_exit);
MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION("2.4");
+MODULE_VERSION("2.5");
MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 7387f97..d9ab6f6 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -2,7 +2,7 @@
* hdaps.c - driver for IBM's Hard Drive Active Protection System
*
* Copyright (C) 2005 Robert Love <rml@novell.com>
- * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com>
+ * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
*
* The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
* starting with the R40, T41, and X40. It provides a basic two-axis
@@ -305,17 +305,19 @@ static int hdaps_probe(struct platform_device *dev)
return 0;
}
-static int hdaps_resume(struct platform_device *dev)
+static int hdaps_resume(struct device *dev)
{
return hdaps_device_init();
}
+static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume);
+
static struct platform_driver hdaps_driver = {
.probe = hdaps_probe,
- .resume = hdaps_resume,
.driver = {
.name = "hdaps",
.owner = THIS_MODULE,
+ .pm = &hdaps_pm,
},
};
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index e2faa3c..387183a 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -634,6 +634,8 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
RFKILL_TYPE_WLAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WIFI);
+ if (!wifi_rfkill)
+ return -ENOMEM;
rfkill_init_sw_state(wifi_rfkill,
hp_wmi_get_sw_state(HPWMI_WIFI));
rfkill_set_hw_state(wifi_rfkill,
@@ -648,6 +650,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
RFKILL_TYPE_BLUETOOTH,
&hp_wmi_rfkill_ops,
(void *) HPWMI_BLUETOOTH);
+ if (!bluetooth_rfkill) {
+ err = -ENOMEM;
+ goto register_wifi_error;
+ }
rfkill_init_sw_state(bluetooth_rfkill,
hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
rfkill_set_hw_state(bluetooth_rfkill,
@@ -662,6 +668,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
RFKILL_TYPE_WWAN,
&hp_wmi_rfkill_ops,
(void *) HPWMI_WWAN);
+ if (!wwan_rfkill) {
+ err = -ENOMEM;
+ goto register_bluetooth_error;
+ }
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
rfkill_set_hw_state(wwan_rfkill,
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 22b2dfa..f4d9115 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -353,20 +353,22 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
#ifdef CONFIG_PM
-static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
+static int lis3lv02d_suspend(struct device *dev)
{
/* make sure the device is off when we suspend */
lis3lv02d_poweroff(&lis3_dev);
return 0;
}
-static int lis3lv02d_resume(struct acpi_device *device)
+static int lis3lv02d_resume(struct device *dev)
{
return lis3lv02d_poweron(&lis3_dev);
}
+
+static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+#define HP_ACCEL_PM (&hp_accel_pm)
#else
-#define lis3lv02d_suspend NULL
-#define lis3lv02d_resume NULL
+#define HP_ACCEL_PM NULL
#endif
/* For the HP MDPS aka 3D Driveguard */
@@ -377,9 +379,8 @@ static struct acpi_driver lis3lv02d_driver = {
.ops = {
.add = lis3lv02d_add,
.remove = lis3lv02d_remove,
- .suspend = lis3lv02d_suspend,
- .resume = lis3lv02d_resume,
- }
+ },
+ .drv.pm = HP_ACCEL_PM,
};
static int __init lis3lv02d_init_module(void)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ac902f7..17f6dfd 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -194,7 +194,6 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
/*
* debugfs
*/
-#define DEBUGFS_EVENT_LEN (4096)
static int debugfs_status_show(struct seq_file *s, void *data)
{
unsigned long value;
@@ -315,7 +314,7 @@ static int __devinit ideapad_debugfs_init(struct ideapad_private *priv)
node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
&debugfs_status_fops);
if (!node) {
- pr_err("failed to create event in debugfs");
+ pr_err("failed to create status in debugfs");
goto errout;
}
@@ -695,10 +694,10 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
int ret, i;
- unsigned long cfg;
+ int cfg;
struct ideapad_private *priv;
- if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
+ if (read_method_int(adevice->handle, "_CFG", &cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -722,7 +721,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
goto input_failed;
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
- if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(adevice, i);
else
priv->rfk[i] = NULL;
@@ -785,6 +784,10 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
case 9:
ideapad_sync_rfk_state(priv);
break;
+ case 13:
+ case 6:
+ ideapad_input_report(priv, vpc_bit);
+ break;
case 4:
ideapad_backlight_notify_brightness(priv);
break;
@@ -795,7 +798,7 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
ideapad_backlight_notify_power(priv);
break;
default:
- ideapad_input_report(priv, vpc_bit);
+ pr_info("Unknown event: %lu\n", vpc_bit);
}
}
}
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 0ffdb3c..5051aa9 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -72,6 +72,7 @@
#include <linux/string.h>
#include <linux/tick.h>
#include <linux/timer.h>
+#include <linux/dmi.h>
#include <drm/i915_drm.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -1485,6 +1486,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
MODULE_DEVICE_TABLE(pci, ips_id_table);
+static int ips_blacklist_callback(const struct dmi_system_id *id)
+{
+ pr_info("Blacklisted intel_ips for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id ips_blacklist[] = {
+ {
+ .callback = ips_blacklist_callback,
+ .ident = "HP ProBook",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
+ },
+ },
+ { } /* terminating entry */
+};
+
static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
u64 platform_info;
@@ -1494,6 +1513,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
u16 htshi, trc, trc_required_mask;
u8 tse;
+ if (dmi_check_system(ips_blacklist))
+ return -ENODEV;
+
ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
if (!ips)
return -ENOMEM;
@@ -1697,21 +1719,6 @@ static void ips_remove(struct pci_dev *dev)
dev_dbg(&dev->dev, "IPS driver removed\n");
}
-#ifdef CONFIG_PM
-static int ips_suspend(struct pci_dev *dev, pm_message_t state)
-{
- return 0;
-}
-
-static int ips_resume(struct pci_dev *dev)
-{
- return 0;
-}
-#else
-#define ips_suspend NULL
-#define ips_resume NULL
-#endif /* CONFIG_PM */
-
static void ips_shutdown(struct pci_dev *dev)
{
}
@@ -1721,8 +1728,6 @@ static struct pci_driver ips_pci_driver = {
.id_table = ips_id_table,
.probe = ips_probe,
.remove = ips_remove,
- .suspend = ips_suspend,
- .resume = ips_resume,
.shutdown = ips_shutdown,
};
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 5ae9cd9..ea7422f 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -418,23 +418,23 @@ static struct thermal_device_info *initialize_sensor(int index)
/**
* mid_thermal_resume - resume routine
- * @pdev: platform device structure
+ * @dev: device structure
*
* mid thermal resume: re-initializes the adc. Can sleep.
*/
-static int mid_thermal_resume(struct platform_device *pdev)
+static int mid_thermal_resume(struct device *dev)
{
- return mid_initialize_adc(&pdev->dev);
+ return mid_initialize_adc(dev);
}
/**
* mid_thermal_suspend - suspend routine
- * @pdev: platform device structure
+ * @dev: device structure
*
* mid thermal suspend implements the suspend functionality
* by stopping the ADC. Can sleep.
*/
-static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
+static int mid_thermal_suspend(struct device *dev)
{
/*
* This just stops the ADC and does not disable it.
@@ -444,6 +444,9 @@ static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
return configure_adc(0);
}
+static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
+ mid_thermal_suspend, mid_thermal_resume);
+
/**
* read_curr_temp - reads the current temperature and stores in temp
* @temp: holds the current temperature value after reading
@@ -557,10 +560,9 @@ static struct platform_driver mid_thermal_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &mid_thermal_pm,
},
.probe = mid_thermal_probe,
- .suspend = mid_thermal_suspend,
- .resume = mid_thermal_resume,
.remove = __devexit_p(mid_thermal_remove),
.id_table = therm_id_table,
};
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index bb51321..f644418 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -85,7 +85,8 @@
#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4
#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
-static int msi_laptop_resume(struct platform_device *device);
+static int msi_laptop_resume(struct device *device);
+static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
@@ -437,8 +438,8 @@ static struct platform_driver msipf_driver = {
.driver = {
.name = "msi-laptop-pf",
.owner = THIS_MODULE,
+ .pm = &msi_laptop_pm,
},
- .resume = msi_laptop_resume,
};
static struct platform_device *msipf_device;
@@ -752,7 +753,7 @@ err_bluetooth:
return retval;
}
-static int msi_laptop_resume(struct platform_device *device)
+static int msi_laptop_resume(struct device *device)
{
u8 data;
int result;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index ffff8b4..2448007 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -177,7 +177,6 @@ enum SINF_BITS { SINF_NUM_BATTERIES = 0,
static int acpi_pcc_hotkey_add(struct acpi_device *device);
static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type);
-static int acpi_pcc_hotkey_resume(struct acpi_device *device);
static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id pcc_device_ids[] = {
@@ -189,6 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
+static int acpi_pcc_hotkey_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume);
+
static struct acpi_driver acpi_pcc_driver = {
.name = ACPI_PCC_DRIVER_NAME,
.class = ACPI_PCC_CLASS,
@@ -196,9 +198,9 @@ static struct acpi_driver acpi_pcc_driver = {
.ops = {
.add = acpi_pcc_hotkey_add,
.remove = acpi_pcc_hotkey_remove,
- .resume = acpi_pcc_hotkey_resume,
.notify = acpi_pcc_hotkey_notify,
},
+ .drv.pm = &acpi_pcc_hotkey_pm,
};
static const struct key_entry panasonic_keymap[] = {
@@ -538,11 +540,15 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc)
/* kernel module interface */
-static int acpi_pcc_hotkey_resume(struct acpi_device *device)
+static int acpi_pcc_hotkey_resume(struct device *dev)
{
- struct pcc_acpi *pcc = acpi_driver_data(device);
+ struct pcc_acpi *pcc;
+
+ if (!dev)
+ return -EINVAL;
- if (device == NULL || pcc == NULL)
+ pcc = acpi_driver_data(to_acpi_device(dev));
+ if (!pcc)
return -EINVAL;
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n",
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8a51795..9363969 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -141,6 +141,27 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
"(default: 0)");
static void sony_nc_kbd_backlight_resume(void);
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_battery_care_cleanup(struct platform_device *pd);
+
+static int sony_nc_thermal_setup(struct platform_device *pd);
+static void sony_nc_thermal_cleanup(struct platform_device *pd);
+static void sony_nc_thermal_resume(void);
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd);
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_touchpad_cleanup(struct platform_device *pd);
enum sony_nc_rfkill {
SONY_WIFI,
@@ -153,6 +174,9 @@ enum sony_nc_rfkill {
static int sony_rfkill_handle;
static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+ unsigned int handle);
+static void sony_nc_rfkill_cleanup(void);
static void sony_nc_rfkill_update(void);
/*********** Input Devices ***********/
@@ -691,59 +715,97 @@ static struct acpi_device *sony_nc_acpi_device = NULL;
/*
* acpi_evaluate_object wrappers
+ * all useful calls into SNC methods take one or zero parameters and return
+ * integers or arrays.
*/
-static int acpi_callgetfunc(acpi_handle handle, char *name, int *result)
+static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
+ u64 *value)
{
- struct acpi_buffer output;
- union acpi_object out_obj;
+ union acpi_object *result = NULL;
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
+ if (value) {
+ struct acpi_object_list params;
+ union acpi_object in;
+ in.type = ACPI_TYPE_INTEGER;
+ in.integer.value = *value;
+ params.count = 1;
+ params.pointer = &in;
+ status = acpi_evaluate_object(handle, method, &params, &output);
+ dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method,
+ (unsigned int)(*value >> 32),
+ (unsigned int)*value & 0xffffffff);
+ } else {
+ status = acpi_evaluate_object(handle, method, NULL, &output);
+ dprintk("__call_snc_method: [%s]\n", method);
+ }
- status = acpi_evaluate_object(handle, name, NULL, &output);
- if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) {
- *result = out_obj.integer.value;
- return 0;
+ if (ACPI_FAILURE(status)) {
+ pr_err("Failed to evaluate [%s]\n", method);
+ return NULL;
}
- pr_warn("acpi_callreadfunc failed\n");
+ result = (union acpi_object *) output.pointer;
+ if (!result)
+ dprintk("No return object [%s]\n", method);
- return -1;
+ return result;
}
-static int acpi_callsetfunc(acpi_handle handle, char *name, int value,
- int *result)
+static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
+ int *result)
{
- struct acpi_object_list params;
- union acpi_object in_obj;
- struct acpi_buffer output;
- union acpi_object out_obj;
- acpi_status status;
-
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = value;
+ union acpi_object *object = NULL;
+ if (value) {
+ u64 v = *value;
+ object = __call_snc_method(handle, name, &v);
+ } else
+ object = __call_snc_method(handle, name, NULL);
- output.length = sizeof(out_obj);
- output.pointer = &out_obj;
+ if (!object)
+ return -EINVAL;
- status = acpi_evaluate_object(handle, name, &params, &output);
- if (status == AE_OK) {
- if (result != NULL) {
- if (out_obj.type != ACPI_TYPE_INTEGER) {
- pr_warn("acpi_evaluate_object bad return type\n");
- return -1;
- }
- *result = out_obj.integer.value;
- }
- return 0;
+ if (object->type != ACPI_TYPE_INTEGER) {
+ pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+ ACPI_TYPE_INTEGER, object->type);
+ kfree(object);
+ return -EINVAL;
}
- pr_warn("acpi_evaluate_object failed\n");
+ if (result)
+ *result = object->integer.value;
- return -1;
+ kfree(object);
+ return 0;
+}
+
+#define MIN(a, b) (a > b ? b : a)
+static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
+ void *buffer, size_t buflen)
+{
+ size_t len = len;
+ union acpi_object *object = __call_snc_method(handle, name, value);
+
+ if (!object)
+ return -EINVAL;
+
+ if (object->type == ACPI_TYPE_BUFFER)
+ len = MIN(buflen, object->buffer.length);
+
+ else if (object->type == ACPI_TYPE_INTEGER)
+ len = MIN(buflen, sizeof(object->integer.value));
+
+ else {
+ pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
+ ACPI_TYPE_BUFFER, object->type);
+ kfree(object);
+ return -EINVAL;
+ }
+
+ memcpy(buffer, object->buffer.pointer, len);
+ kfree(object);
+ return 0;
}
struct sony_nc_handles {
@@ -770,16 +832,17 @@ static ssize_t sony_nc_handles_show(struct device *dev,
static int sony_nc_handles_setup(struct platform_device *pd)
{
- int i;
- int result;
+ int i, r, result, arg;
handles = kzalloc(sizeof(*handles), GFP_KERNEL);
if (!handles)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
- if (!acpi_callsetfunc(sony_nc_acpi_handle,
- "SN00", i + 0x20, &result)) {
+ arg = i + 0x20;
+ r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg,
+ &result);
+ if (!r) {
dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
result, i);
handles->cap[i] = result;
@@ -819,8 +882,8 @@ static int sony_find_snc_handle(int handle)
int i;
/* not initialized yet, return early */
- if (!handles)
- return -1;
+ if (!handles || !handle)
+ return -EINVAL;
for (i = 0; i < 0x10; i++) {
if (handles->cap[i] == handle) {
@@ -830,21 +893,20 @@ static int sony_find_snc_handle(int handle)
}
}
dprintk("handle 0x%.4x not found\n", handle);
- return -1;
+ return -EINVAL;
}
static int sony_call_snc_handle(int handle, int argument, int *result)
{
- int ret = 0;
+ int arg, ret = 0;
int offset = sony_find_snc_handle(handle);
if (offset < 0)
- return -1;
+ return offset;
- ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument,
- result);
- dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument,
- *result);
+ arg = offset | argument;
+ ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result);
+ dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result);
return ret;
}
@@ -889,14 +951,16 @@ static int boolean_validate(const int direction, const int value)
static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
char *buffer)
{
- int value;
+ int value, ret = 0;
struct sony_nc_value *item =
container_of(attr, struct sony_nc_value, devattr);
if (!*item->acpiget)
return -EIO;
- if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0)
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL,
+ &value);
+ if (ret < 0)
return -EIO;
if (item->validate)
@@ -910,6 +974,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
const char *buffer, size_t count)
{
int value;
+ int ret = 0;
struct sony_nc_value *item =
container_of(attr, struct sony_nc_value, devattr);
@@ -919,7 +984,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
if (count > 31)
return -EINVAL;
- value = simple_strtoul(buffer, NULL, 10);
+ if (kstrtoint(buffer, 10, &value))
+ return -EINVAL;
if (item->validate)
value = item->validate(SNC_VALIDATE_IN, value);
@@ -927,8 +993,11 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
if (value < 0)
return value;
- if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0)
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+ &value, NULL);
+ if (ret < 0)
return -EIO;
+
item->value = value;
item->valid = 1;
return count;
@@ -941,6 +1010,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
struct sony_backlight_props {
struct backlight_device *dev;
int handle;
+ int cmd_base;
u8 offset;
u8 maxlvl;
};
@@ -948,15 +1018,15 @@ struct sony_backlight_props sony_bl_props;
static int sony_backlight_update_status(struct backlight_device *bd)
{
- return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
- bd->props.brightness + 1, NULL);
+ int arg = bd->props.brightness + 1;
+ return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL);
}
static int sony_backlight_get_brightness(struct backlight_device *bd)
{
int value;
- if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value))
+ if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value))
return 0;
/* brightness levels are 1-based, while backlight ones are 0-based */
return value - 1;
@@ -968,7 +1038,7 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
- sony_call_snc_handle(sdev->handle, 0x0200, &result);
+ sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result);
return (result & 0xff) - sdev->offset;
}
@@ -980,7 +1050,8 @@ static int sony_nc_update_status_ng(struct backlight_device *bd)
(struct sony_backlight_props *)bl_get_data(bd);
value = bd->props.brightness + sdev->offset;
- if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
+ if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10),
+ &result))
return -EIO;
return value;
@@ -1024,10 +1095,14 @@ static struct sony_nc_event sony_100_events[] = {
{ 0x06, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x87, SONYPI_EVENT_FNKEY_F7 },
{ 0x07, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x88, SONYPI_EVENT_FNKEY_F8 },
+ { 0x08, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x89, SONYPI_EVENT_FNKEY_F9 },
{ 0x09, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x8A, SONYPI_EVENT_FNKEY_F10 },
{ 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
+ { 0x8B, SONYPI_EVENT_FNKEY_F11 },
+ { 0x0B, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x8C, SONYPI_EVENT_FNKEY_F12 },
{ 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
{ 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
@@ -1063,63 +1138,139 @@ static struct sony_nc_event sony_127_events[] = {
{ 0, 0 },
};
+static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
+{
+ int ret = -EINVAL;
+ unsigned int result = 0;
+ struct sony_nc_event *key_event;
+
+ if (sony_call_snc_handle(handle, 0x200, &result)) {
+ dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle,
+ event);
+ return -EINVAL;
+ }
+
+ result &= 0xFF;
+
+ if (handle == 0x0100)
+ key_event = sony_100_events;
+ else
+ key_event = sony_127_events;
+
+ for (; key_event->data; key_event++) {
+ if (key_event->data == result) {
+ ret = key_event->event;
+ break;
+ }
+ }
+
+ if (!key_event->data)
+ pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n",
+ event, result, handle);
+
+ return ret;
+}
+
/*
* ACPI callbacks
*/
+enum event_types {
+ HOTKEY = 1,
+ KILLSWITCH,
+ GFX_SWITCH
+};
static void sony_nc_notify(struct acpi_device *device, u32 event)
{
- u32 ev = event;
+ u32 real_ev = event;
+ u8 ev_type = 0;
+ dprintk("sony_nc_notify, event: 0x%.2x\n", event);
+
+ if (event >= 0x90) {
+ unsigned int result = 0;
+ unsigned int arg = 0;
+ unsigned int handle = 0;
+ unsigned int offset = event - 0x90;
+
+ if (offset >= ARRAY_SIZE(handles->cap)) {
+ pr_err("Event 0x%x outside of capabilities list\n",
+ event);
+ return;
+ }
+ handle = handles->cap[offset];
+
+ /* list of handles known for generating events */
+ switch (handle) {
+ /* hotkey event */
+ case 0x0100:
+ case 0x0127:
+ ev_type = HOTKEY;
+ real_ev = sony_nc_hotkeys_decode(event, handle);
+
+ if (real_ev > 0)
+ sony_laptop_report_input_event(real_ev);
+ else
+ /* restore the original event for reporting */
+ real_ev = event;
- if (ev >= 0x90) {
- /* New-style event */
- int result;
- int key_handle = 0;
- ev -= 0x90;
-
- if (sony_find_snc_handle(0x100) == ev)
- key_handle = 0x100;
- if (sony_find_snc_handle(0x127) == ev)
- key_handle = 0x127;
-
- if (key_handle) {
- struct sony_nc_event *key_event;
-
- if (sony_call_snc_handle(key_handle, 0x200, &result)) {
- dprintk("sony_nc_notify, unable to decode"
- " event 0x%.2x 0x%.2x\n", key_handle,
- ev);
- /* restore the original event */
- ev = event;
- } else {
- ev = result & 0xFF;
-
- if (key_handle == 0x100)
- key_event = sony_100_events;
- else
- key_event = sony_127_events;
-
- for (; key_event->data; key_event++) {
- if (key_event->data == ev) {
- ev = key_event->event;
- break;
- }
- }
+ break;
- if (!key_event->data)
- pr_info("Unknown event: 0x%x 0x%x\n",
- key_handle, ev);
- else
- sony_laptop_report_input_event(ev);
- }
- } else if (sony_find_snc_handle(sony_rfkill_handle) == ev) {
- sony_nc_rfkill_update();
- return;
+ /* wlan switch */
+ case 0x0124:
+ case 0x0135:
+ /* events on this handle are reported when the
+ * switch changes position or for battery
+ * events. We'll notify both of them but only
+ * update the rfkill device status when the
+ * switch is moved.
+ */
+ ev_type = KILLSWITCH;
+ sony_call_snc_handle(handle, 0x0100, &result);
+ real_ev = result & 0x03;
+
+ /* hw switch event */
+ if (real_ev == 1)
+ sony_nc_rfkill_update();
+
+ break;
+
+ case 0x0128:
+ case 0x0146:
+ /* Hybrid GFX switching */
+ sony_call_snc_handle(handle, 0x0000, &result);
+ dprintk("GFX switch event received (reason: %s)\n",
+ (result & 0x01) ?
+ "switch change" : "unknown");
+
+ /* verify the switch state
+ * 1: discrete GFX
+ * 0: integrated GFX
+ */
+ sony_call_snc_handle(handle, 0x0100, &result);
+
+ ev_type = GFX_SWITCH;
+ real_ev = result & 0xff;
+ break;
+
+ default:
+ dprintk("Unknown event 0x%x for handle 0x%x\n",
+ event, handle);
+ break;
}
- } else
- sony_laptop_report_input_event(ev);
- dprintk("sony_nc_notify, event: 0x%.2x\n", ev);
- acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev);
+ /* clear the event (and the event reason when present) */
+ arg = 1 << offset;
+ sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result);
+
+ } else {
+ /* old style event */
+ ev_type = HOTKEY;
+ sony_laptop_report_input_event(real_ev);
+ }
+
+ acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
+
+ acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
+ dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
}
static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
@@ -1140,23 +1291,193 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
/*
* ACPI device
*/
-static int sony_nc_function_setup(struct acpi_device *device)
+static void sony_nc_function_setup(struct acpi_device *device,
+ struct platform_device *pf_device)
{
- int result;
+ unsigned int i, result, bitmask, arg;
+
+ if (!handles)
+ return;
+
+ /* setup found handles here */
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+ unsigned int handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ dprintk("setting up handle 0x%.4x\n", handle);
+
+ switch (handle) {
+ case 0x0100:
+ case 0x0101:
+ case 0x0127:
+ /* setup hotkeys */
+ sony_call_snc_handle(handle, 0, &result);
+ break;
+ case 0x0102:
+ /* setup hotkeys */
+ sony_call_snc_handle(handle, 0x100, &result);
+ break;
+ case 0x0105:
+ case 0x0148:
+ /* touchpad enable/disable */
+ result = sony_nc_touchpad_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up touchpad control function (%d)\n",
+ result);
+ break;
+ case 0x0115:
+ case 0x0136:
+ case 0x013f:
+ result = sony_nc_battery_care_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up battery care function (%d)\n",
+ result);
+ break;
+ case 0x0119:
+ result = sony_nc_lid_resume_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up lid resume function (%d)\n",
+ result);
+ break;
+ case 0x0122:
+ result = sony_nc_thermal_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up thermal profile function (%d)\n",
+ result);
+ break;
+ case 0x0131:
+ result = sony_nc_highspeed_charging_setup(pf_device);
+ if (result)
+ pr_err("couldn't set up high speed charging function (%d)\n",
+ result);
+ break;
+ case 0x0124:
+ case 0x0135:
+ result = sony_nc_rfkill_setup(device, handle);
+ if (result)
+ pr_err("couldn't set up rfkill support (%d)\n",
+ result);
+ break;
+ case 0x0137:
+ case 0x0143:
+ result = sony_nc_kbd_backlight_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up keyboard backlight function (%d)\n",
+ result);
+ break;
+ default:
+ continue;
+ }
+ }
/* Enable all events */
- acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result);
+ arg = 0x10;
+ if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+ sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+ &result);
+}
+
+static void sony_nc_function_cleanup(struct platform_device *pd)
+{
+ unsigned int i, result, bitmask, handle;
- /* Setup hotkeys */
- sony_call_snc_handle(0x0100, 0, &result);
- sony_call_snc_handle(0x0101, 0, &result);
- sony_call_snc_handle(0x0102, 0x100, &result);
- sony_call_snc_handle(0x0127, 0, &result);
+ /* get enabled events and disable them */
+ sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
+ sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
- return 0;
+ /* cleanup handles here */
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+
+ handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ switch (handle) {
+ case 0x0105:
+ case 0x0148:
+ sony_nc_touchpad_cleanup(pd);
+ break;
+ case 0x0115:
+ case 0x0136:
+ case 0x013f:
+ sony_nc_battery_care_cleanup(pd);
+ break;
+ case 0x0119:
+ sony_nc_lid_resume_cleanup(pd);
+ break;
+ case 0x0122:
+ sony_nc_thermal_cleanup(pd);
+ break;
+ case 0x0131:
+ sony_nc_highspeed_charging_cleanup(pd);
+ break;
+ case 0x0124:
+ case 0x0135:
+ sony_nc_rfkill_cleanup();
+ break;
+ case 0x0137:
+ case 0x0143:
+ sony_nc_kbd_backlight_cleanup(pd);
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* finally cleanup the handles list */
+ sony_nc_handles_cleanup(pd);
}
-static int sony_nc_resume(struct acpi_device *device)
+static void sony_nc_function_resume(void)
+{
+ unsigned int i, result, bitmask, arg;
+
+ dprintk("Resuming SNC device\n");
+
+ for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
+ unsigned int handle = handles->cap[i];
+
+ if (!handle)
+ continue;
+
+ switch (handle) {
+ case 0x0100:
+ case 0x0101:
+ case 0x0127:
+ /* re-enable hotkeys */
+ sony_call_snc_handle(handle, 0, &result);
+ break;
+ case 0x0102:
+ /* re-enable hotkeys */
+ sony_call_snc_handle(handle, 0x100, &result);
+ break;
+ case 0x0122:
+ sony_nc_thermal_resume();
+ break;
+ case 0x0124:
+ case 0x0135:
+ sony_nc_rfkill_update();
+ break;
+ case 0x0137:
+ case 0x0143:
+ sony_nc_kbd_backlight_resume();
+ break;
+ default:
+ continue;
+ }
+ }
+
+ /* Enable all events */
+ arg = 0x10;
+ if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
+ sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
+ &result);
+}
+
+static int sony_nc_resume(struct device *dev)
{
struct sony_nc_value *item;
acpi_handle handle;
@@ -1166,8 +1487,8 @@ static int sony_nc_resume(struct acpi_device *device)
if (!item->valid)
continue;
- ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
- item->value, NULL);
+ ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
+ &item->value, NULL);
if (ret < 0) {
pr_err("%s: %d\n", __func__, ret);
break;
@@ -1176,25 +1497,20 @@ static int sony_nc_resume(struct acpi_device *device)
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
&handle))) {
- if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+ int arg = 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
- &handle))) {
- dprintk("Doing SNC setup\n");
- sony_nc_function_setup(device);
- }
-
- /* re-read rfkill state */
- sony_nc_rfkill_update();
-
- /* restore kbd backlight states */
- sony_nc_kbd_backlight_resume();
+ &handle)))
+ sony_nc_function_resume();
return 0;
}
+static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume);
+
static void sony_nc_rfkill_cleanup(void)
{
int i;
@@ -1213,7 +1529,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
int argument = sony_rfkill_address[(long) data] + 0x100;
if (!blocked)
- argument |= 0xff0000;
+ argument |= 0x030000;
return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
}
@@ -1230,7 +1546,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
enum rfkill_type type;
const char *name;
int result;
- bool hwblock;
+ bool hwblock, swblock;
switch (nc_type) {
case SONY_WIFI:
@@ -1258,8 +1574,21 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
if (!rfk)
return -ENOMEM;
- sony_call_snc_handle(sony_rfkill_handle, 0x200, &result);
+ if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
+ rfkill_destroy(rfk);
+ return -1;
+ }
hwblock = !(result & 0x1);
+
+ if (sony_call_snc_handle(sony_rfkill_handle,
+ sony_rfkill_address[nc_type],
+ &result) < 0) {
+ rfkill_destroy(rfk);
+ return -1;
+ }
+ swblock = !(result & 0x2);
+
+ rfkill_init_sw_state(rfk, swblock);
rfkill_set_hw_state(rfk, hwblock);
err = rfkill_register(rfk);
@@ -1295,101 +1624,79 @@ static void sony_nc_rfkill_update(void)
sony_call_snc_handle(sony_rfkill_handle, argument, &result);
rfkill_set_states(sony_rfkill_devices[i],
- !(result & 0xf), false);
+ !(result & 0x2), false);
}
}
-static void sony_nc_rfkill_setup(struct acpi_device *device)
+static int sony_nc_rfkill_setup(struct acpi_device *device,
+ unsigned int handle)
{
- int offset;
- u8 dev_code, i;
- acpi_status status;
- struct acpi_object_list params;
- union acpi_object in_obj;
- union acpi_object *device_enum;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-
- offset = sony_find_snc_handle(0x124);
- if (offset == -1) {
- offset = sony_find_snc_handle(0x135);
- if (offset == -1)
- return;
- else
- sony_rfkill_handle = 0x135;
- } else
- sony_rfkill_handle = 0x124;
- dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
-
- /* need to read the whole buffer returned by the acpi call to SN06
- * here otherwise we may miss some features
- */
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = offset;
- status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
- &buffer);
- if (ACPI_FAILURE(status)) {
- dprintk("Radio device enumeration failed\n");
- return;
- }
-
- device_enum = (union acpi_object *) buffer.pointer;
- if (!device_enum) {
- pr_err("No SN06 return object\n");
- goto out_no_enum;
- }
- if (device_enum->type != ACPI_TYPE_BUFFER) {
- pr_err("Invalid SN06 return object 0x%.2x\n",
- device_enum->type);
- goto out_no_enum;
- }
+ u64 offset;
+ int i;
+ unsigned char buffer[32] = { 0 };
- /* the buffer is filled with magic numbers describing the devices
- * available, 0xff terminates the enumeration
+ offset = sony_find_snc_handle(handle);
+ sony_rfkill_handle = handle;
+
+ i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+ 32);
+ if (i < 0)
+ return i;
+
+ /* The buffer is filled with magic numbers describing the devices
+ * available, 0xff terminates the enumeration.
+ * Known codes:
+ * 0x00 WLAN
+ * 0x10 BLUETOOTH
+ * 0x20 WWAN GPRS-EDGE
+ * 0x21 WWAN HSDPA
+ * 0x22 WWAN EV-DO
+ * 0x23 WWAN GPS
+ * 0x25 Gobi WWAN no GPS
+ * 0x26 Gobi WWAN + GPS
+ * 0x28 Gobi WWAN no GPS
+ * 0x29 Gobi WWAN + GPS
+ * 0x30 WIMAX
+ * 0x50 Gobi WWAN no GPS
+ * 0x51 Gobi WWAN + GPS
+ * 0x70 no SIM card slot
+ * 0x71 SIM card slot
*/
- for (i = 0; i < device_enum->buffer.length; i++) {
+ for (i = 0; i < ARRAY_SIZE(buffer); i++) {
- dev_code = *(device_enum->buffer.pointer + i);
- if (dev_code == 0xff)
+ if (buffer[i] == 0xff)
break;
- dprintk("Radio devices, looking at 0x%.2x\n", dev_code);
+ dprintk("Radio devices, found 0x%.2x\n", buffer[i]);
- if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI])
+ if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI])
sony_nc_setup_rfkill(device, SONY_WIFI);
- if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
+ if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
- if ((0xf0 & dev_code) == 0x20 &&
+ if (((0xf0 & buffer[i]) == 0x20 ||
+ (0xf0 & buffer[i]) == 0x50) &&
!sony_rfkill_devices[SONY_WWAN])
sony_nc_setup_rfkill(device, SONY_WWAN);
- if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
+ if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
sony_nc_setup_rfkill(device, SONY_WIMAX);
}
-
-out_no_enum:
- kfree(buffer.pointer);
- return;
+ return 0;
}
/* Keyboard backlight feature */
-#define KBDBL_HANDLER 0x137
-#define KBDBL_PRESENT 0xB00
-#define SET_MODE 0xC00
-#define SET_STATE 0xD00
-#define SET_TIMEOUT 0xE00
-
struct kbd_backlight {
- int mode;
- int timeout;
+ unsigned int handle;
+ unsigned int base;
+ unsigned int mode;
+ unsigned int timeout;
struct device_attribute mode_attr;
struct device_attribute timeout_attr;
};
-static struct kbd_backlight *kbdbl_handle;
+static struct kbd_backlight *kbdbl_ctl;
static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
{
@@ -1398,15 +1705,15 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
if (value > 1)
return -EINVAL;
- if (sony_call_snc_handle(KBDBL_HANDLER,
- (value << 0x10) | SET_MODE, &result))
+ if (sony_call_snc_handle(kbdbl_ctl->handle,
+ (value << 0x10) | (kbdbl_ctl->base), &result))
return -EIO;
/* Try to turn the light on/off immediately */
- sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE,
- &result);
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ (value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
- kbdbl_handle->mode = value;
+ kbdbl_ctl->mode = value;
return 0;
}
@@ -1421,7 +1728,7 @@ static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
if (count > 31)
return -EINVAL;
- if (strict_strtoul(buffer, 10, &value))
+ if (kstrtoul(buffer, 10, &value))
return -EINVAL;
ret = __sony_nc_kbd_backlight_mode_set(value);
@@ -1435,7 +1742,7 @@ static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
- count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode);
+ count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode);
return count;
}
@@ -1446,11 +1753,11 @@ static int __sony_nc_kbd_backlight_timeout_set(u8 value)
if (value > 3)
return -EINVAL;
- if (sony_call_snc_handle(KBDBL_HANDLER,
- (value << 0x10) | SET_TIMEOUT, &result))
+ if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) |
+ (kbdbl_ctl->base + 0x200), &result))
return -EIO;
- kbdbl_handle->timeout = value;
+ kbdbl_ctl->timeout = value;
return 0;
}
@@ -1465,7 +1772,7 @@ static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
if (count > 31)
return -EINVAL;
- if (strict_strtoul(buffer, 10, &value))
+ if (kstrtoul(buffer, 10, &value))
return -EINVAL;
ret = __sony_nc_kbd_backlight_timeout_set(value);
@@ -1479,39 +1786,58 @@ static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
- count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout);
+ count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout);
return count;
}
-static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
+static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
+ unsigned int handle)
{
int result;
+ int ret = 0;
- if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result))
- return 0;
- if (!(result & 0x02))
+ /* verify the kbd backlight presence, these handles are not used for
+ * keyboard backlight only
+ */
+ ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100,
+ &result);
+ if (ret)
+ return ret;
+
+ if ((handle == 0x0137 && !(result & 0x02)) ||
+ !(result & 0x01)) {
+ dprintk("no backlight keyboard found\n");
return 0;
+ }
- kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL);
- if (!kbdbl_handle)
+ kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
+ if (!kbdbl_ctl)
return -ENOMEM;
- sysfs_attr_init(&kbdbl_handle->mode_attr.attr);
- kbdbl_handle->mode_attr.attr.name = "kbd_backlight";
- kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
- kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show;
- kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store;
+ kbdbl_ctl->handle = handle;
+ if (handle == 0x0137)
+ kbdbl_ctl->base = 0x0C00;
+ else
+ kbdbl_ctl->base = 0x4000;
+
+ sysfs_attr_init(&kbdbl_ctl->mode_attr.attr);
+ kbdbl_ctl->mode_attr.attr.name = "kbd_backlight";
+ kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+ kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show;
+ kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store;
- sysfs_attr_init(&kbdbl_handle->timeout_attr.attr);
- kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout";
- kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
- kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
- kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
+ sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr);
+ kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout";
+ kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
+ kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
+ kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
- if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr))
+ ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr);
+ if (ret)
goto outkzalloc;
- if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr))
+ ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr);
+ if (ret)
goto outmode;
__sony_nc_kbd_backlight_mode_set(kbd_backlight);
@@ -1520,113 +1846,708 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
return 0;
outmode:
- device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
+ device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
outkzalloc:
- kfree(kbdbl_handle);
- kbdbl_handle = NULL;
- return -1;
+ kfree(kbdbl_ctl);
+ kbdbl_ctl = NULL;
+ return ret;
}
-static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
+static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
{
- if (kbdbl_handle) {
+ if (kbdbl_ctl) {
int result;
- device_remove_file(&pd->dev, &kbdbl_handle->mode_attr);
- device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr);
+ device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
+ device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
/* restore the default hw behaviour */
- sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result);
- sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result);
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ kbdbl_ctl->base | 0x10000, &result);
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ kbdbl_ctl->base + 0x200, &result);
- kfree(kbdbl_handle);
+ kfree(kbdbl_ctl);
+ kbdbl_ctl = NULL;
}
- return 0;
}
static void sony_nc_kbd_backlight_resume(void)
{
int ignore = 0;
- if (!kbdbl_handle)
+ if (!kbdbl_ctl)
return;
- if (kbdbl_handle->mode == 0)
- sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore);
-
- if (kbdbl_handle->timeout != 0)
- sony_call_snc_handle(KBDBL_HANDLER,
- (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
+ if (kbdbl_ctl->mode == 0)
+ sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
&ignore);
+
+ if (kbdbl_ctl->timeout != 0)
+ sony_call_snc_handle(kbdbl_ctl->handle,
+ (kbdbl_ctl->base + 0x200) |
+ (kbdbl_ctl->timeout << 0x10), &ignore);
+}
+
+struct battery_care_control {
+ struct device_attribute attrs[2];
+ unsigned int handle;
+};
+static struct battery_care_control *bcare_ctl;
+
+static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result, cmd;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
+ /* limit values (2 bits):
+ * 00 - none
+ * 01 - 80%
+ * 10 - 50%
+ * 11 - 100%
+ *
+ * bit 0: 0 disable BCL, 1 enable BCL
+ * bit 1: 1 tell to store the battery limit (see bits 6,7) too
+ * bits 2,3: reserved
+ * bits 4,5: store the limit into the EC
+ * bits 6,7: store the limit into the battery
+ */
+ cmd = 0;
+
+ if (value > 0) {
+ if (value <= 50)
+ cmd = 0x20;
+
+ else if (value <= 80)
+ cmd = 0x10;
+
+ else if (value <= 100)
+ cmd = 0x30;
+
+ else
+ return -EINVAL;
+
+ /*
+ * handle 0x0115 should allow storing on battery too;
+ * handle 0x0136 same as 0x0115 + health status;
+ * handle 0x013f, same as 0x0136 but no storing on the battery
+ */
+ if (bcare_ctl->handle != 0x013f)
+ cmd = cmd | (cmd << 2);
+
+ cmd = (cmd | 0x1) << 0x10;
+ }
+
+ if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result, status;
+
+ if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result))
+ return -EIO;
+
+ status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0;
+ switch (status) {
+ case 1:
+ status = 80;
+ break;
+ case 2:
+ status = 50;
+ break;
+ case 3:
+ status = 100;
+ break;
+ default:
+ status = 0;
+ break;
+ }
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+}
+
+static ssize_t sony_nc_battery_care_health_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ ssize_t count = 0;
+ unsigned int health;
+
+ if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
+ return -EIO;
+
+ count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff);
+
+ return count;
+}
+
+static int sony_nc_battery_care_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ int ret = 0;
+
+ bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL);
+ if (!bcare_ctl)
+ return -ENOMEM;
+
+ bcare_ctl->handle = handle;
+
+ sysfs_attr_init(&bcare_ctl->attrs[0].attr);
+ bcare_ctl->attrs[0].attr.name = "battery_care_limiter";
+ bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+ bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show;
+ bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store;
+
+ ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]);
+ if (ret)
+ goto outkzalloc;
+
+ /* 0x0115 is for models with no health reporting capability */
+ if (handle == 0x0115)
+ return 0;
+
+ sysfs_attr_init(&bcare_ctl->attrs[1].attr);
+ bcare_ctl->attrs[1].attr.name = "battery_care_health";
+ bcare_ctl->attrs[1].attr.mode = S_IRUGO;
+ bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show;
+
+ ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]);
+ if (ret)
+ goto outlimiter;
+
+ return 0;
+
+outlimiter:
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+
+outkzalloc:
+ kfree(bcare_ctl);
+ bcare_ctl = NULL;
+
+ return ret;
+}
+
+static void sony_nc_battery_care_cleanup(struct platform_device *pd)
+{
+ if (bcare_ctl) {
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
+ if (bcare_ctl->handle != 0x0115)
+ device_remove_file(&pd->dev, &bcare_ctl->attrs[1]);
+
+ kfree(bcare_ctl);
+ bcare_ctl = NULL;
+ }
+}
+
+struct snc_thermal_ctrl {
+ unsigned int mode;
+ unsigned int profiles;
+ struct device_attribute mode_attr;
+ struct device_attribute profiles_attr;
+};
+static struct snc_thermal_ctrl *th_handle;
+
+#define THM_PROFILE_MAX 3
+static const char * const snc_thermal_profiles[] = {
+ "balanced",
+ "silent",
+ "performance"
+};
+
+static int sony_nc_thermal_mode_set(unsigned short mode)
+{
+ unsigned int result;
+
+ /* the thermal profile seems to be a two bit bitmask:
+ * lsb -> silent
+ * msb -> performance
+ * no bit set is the normal operation and is always valid
+ * Some vaio models only have "balanced" and "performance"
+ */
+ if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result))
+ return -EIO;
+
+ th_handle->mode = mode;
+
+ return 0;
+}
+
+static int sony_nc_thermal_mode_get(void)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0122, 0x0100, &result))
+ return -EIO;
+
+ return result & 0xff;
+}
+
+static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ short cnt;
+ size_t idx = 0;
+
+ for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
+ if (!cnt || (th_handle->profiles & cnt))
+ idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
+ snc_thermal_profiles[cnt]);
+ }
+ idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n");
+
+ return idx;
+}
+
+static ssize_t sony_nc_thermal_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned short cmd;
+ size_t len = count;
+
+ if (count == 0)
+ return -EINVAL;
+
+ /* skip the newline if present */
+ if (buffer[len - 1] == '\n')
+ len--;
+
+ for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++)
+ if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0)
+ break;
+
+ if (sony_nc_thermal_mode_set(cmd))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_thermal_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ ssize_t count = 0;
+ int mode = sony_nc_thermal_mode_get();
+
+ if (mode < 0)
+ return mode;
+
+ count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]);
+
+ return count;
+}
+
+static int sony_nc_thermal_setup(struct platform_device *pd)
+{
+ int ret = 0;
+ th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL);
+ if (!th_handle)
+ return -ENOMEM;
+
+ ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles);
+ if (ret) {
+ pr_warn("couldn't to read the thermal profiles\n");
+ goto outkzalloc;
+ }
+
+ ret = sony_nc_thermal_mode_get();
+ if (ret < 0) {
+ pr_warn("couldn't to read the current thermal profile");
+ goto outkzalloc;
+ }
+ th_handle->mode = ret;
+
+ sysfs_attr_init(&th_handle->profiles_attr.attr);
+ th_handle->profiles_attr.attr.name = "thermal_profiles";
+ th_handle->profiles_attr.attr.mode = S_IRUGO;
+ th_handle->profiles_attr.show = sony_nc_thermal_profiles_show;
+
+ sysfs_attr_init(&th_handle->mode_attr.attr);
+ th_handle->mode_attr.attr.name = "thermal_control";
+ th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
+ th_handle->mode_attr.show = sony_nc_thermal_mode_show;
+ th_handle->mode_attr.store = sony_nc_thermal_mode_store;
+
+ ret = device_create_file(&pd->dev, &th_handle->profiles_attr);
+ if (ret)
+ goto outkzalloc;
+
+ ret = device_create_file(&pd->dev, &th_handle->mode_attr);
+ if (ret)
+ goto outprofiles;
+
+ return 0;
+
+outprofiles:
+ device_remove_file(&pd->dev, &th_handle->profiles_attr);
+outkzalloc:
+ kfree(th_handle);
+ th_handle = NULL;
+ return ret;
+}
+
+static void sony_nc_thermal_cleanup(struct platform_device *pd)
+{
+ if (th_handle) {
+ device_remove_file(&pd->dev, &th_handle->profiles_attr);
+ device_remove_file(&pd->dev, &th_handle->mode_attr);
+ kfree(th_handle);
+ th_handle = NULL;
+ }
+}
+
+static void sony_nc_thermal_resume(void)
+{
+ unsigned int status = sony_nc_thermal_mode_get();
+
+ if (status != th_handle->mode)
+ sony_nc_thermal_mode_set(th_handle->mode);
+}
+
+/* resume on LID open */
+struct snc_lid_resume_control {
+ struct device_attribute attrs[3];
+ unsigned int status;
+};
+static struct snc_lid_resume_control *lid_ctl;
+
+static ssize_t sony_nc_lid_resume_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result, pos;
+ unsigned long value;
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ /* the value we have to write to SNC is a bitmask:
+ * +--------------+
+ * | S3 | S4 | S5 |
+ * +--------------+
+ * 2 1 0
+ */
+ if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+ pos = 2;
+ else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+ pos = 1;
+ else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+ pos = 0;
+ else
+ return -EINVAL;
+
+ if (value)
+ value = lid_ctl->status | (1 << pos);
+ else
+ value = lid_ctl->status & ~(1 << pos);
+
+ if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
+ return -EIO;
+
+ lid_ctl->status = value;
+
+ return count;
+}
+
+static ssize_t sony_nc_lid_resume_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int pos;
+
+ if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
+ pos = 2;
+ else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
+ pos = 1;
+ else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
+ pos = 0;
+ else
+ return -EINVAL;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n",
+ (lid_ctl->status >> pos) & 0x01);
+}
+
+static int sony_nc_lid_resume_setup(struct platform_device *pd)
+{
+ unsigned int result;
+ int i;
+
+ if (sony_call_snc_handle(0x0119, 0x0000, &result))
+ return -EIO;
+
+ lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
+ if (!lid_ctl)
+ return -ENOMEM;
+
+ lid_ctl->status = result & 0x7;
+
+ sysfs_attr_init(&lid_ctl->attrs[0].attr);
+ lid_ctl->attrs[0].attr.name = "lid_resume_S3";
+ lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
+
+ sysfs_attr_init(&lid_ctl->attrs[1].attr);
+ lid_ctl->attrs[1].attr.name = "lid_resume_S4";
+ lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
+
+ sysfs_attr_init(&lid_ctl->attrs[2].attr);
+ lid_ctl->attrs[2].attr.name = "lid_resume_S5";
+ lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
+ lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
+ lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
+
+ for (i = 0; i < 3; i++) {
+ result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
+ if (result)
+ goto liderror;
+ }
+
+ return 0;
+
+liderror:
+ for (; i > 0; i--)
+ device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+ kfree(lid_ctl);
+ lid_ctl = NULL;
+
+ return result;
+}
+
+static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
+{
+ int i;
+
+ if (lid_ctl) {
+ for (i = 0; i < 3; i++)
+ device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+
+ kfree(lid_ctl);
+ lid_ctl = NULL;
+ }
+}
+
+/* High speed charging function */
+static struct device_attribute *hsc_handle;
+
+static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0131, 0x0100, &result))
+ return -EIO;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) {
+ /* some models advertise the handle but have no implementation
+ * for it
+ */
+ pr_info("No High Speed Charging capability found\n");
+ return 0;
+ }
+
+ hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+ if (!hsc_handle)
+ return -ENOMEM;
+
+ sysfs_attr_init(&hsc_handle->attr);
+ hsc_handle->attr.name = "battery_highspeed_charging";
+ hsc_handle->attr.mode = S_IRUGO | S_IWUSR;
+ hsc_handle->show = sony_nc_highspeed_charging_show;
+ hsc_handle->store = sony_nc_highspeed_charging_store;
+
+ result = device_create_file(&pd->dev, hsc_handle);
+ if (result) {
+ kfree(hsc_handle);
+ hsc_handle = NULL;
+ return result;
+ }
+
+ return 0;
+}
+
+static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
+{
+ if (hsc_handle) {
+ device_remove_file(&pd->dev, hsc_handle);
+ kfree(hsc_handle);
+ hsc_handle = NULL;
+ }
+}
+
+/* Touchpad enable/disable */
+struct touchpad_control {
+ struct device_attribute attr;
+ int handle;
+};
+static struct touchpad_control *tp_ctl;
+
+static ssize_t sony_nc_touchpad_store(struct device *dev,
+ struct device_attribute *attr, const char *buffer, size_t count)
+{
+ unsigned int result;
+ unsigned long value;
+
+ if (count > 31)
+ return -EINVAL;
+
+ if (kstrtoul(buffer, 10, &value) || value > 1)
+ return -EINVAL;
+
+ /* sysfs: 0 disabled, 1 enabled
+ * EC: 0 enabled, 1 disabled
+ */
+ if (sony_call_snc_handle(tp_ctl->handle,
+ (!value << 0x10) | 0x100, &result))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t sony_nc_touchpad_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
+ return -EINVAL;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01));
+}
+
+static int sony_nc_touchpad_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ int ret = 0;
+
+ tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL);
+ if (!tp_ctl)
+ return -ENOMEM;
+
+ tp_ctl->handle = handle;
+
+ sysfs_attr_init(&tp_ctl->attr.attr);
+ tp_ctl->attr.attr.name = "touchpad";
+ tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR;
+ tp_ctl->attr.show = sony_nc_touchpad_show;
+ tp_ctl->attr.store = sony_nc_touchpad_store;
+
+ ret = device_create_file(&pd->dev, &tp_ctl->attr);
+ if (ret) {
+ kfree(tp_ctl);
+ tp_ctl = NULL;
+ }
+
+ return ret;
+}
+
+static void sony_nc_touchpad_cleanup(struct platform_device *pd)
+{
+ if (tp_ctl) {
+ device_remove_file(&pd->dev, &tp_ctl->attr);
+ kfree(tp_ctl);
+ tp_ctl = NULL;
+ }
}
static void sony_nc_backlight_ng_read_limits(int handle,
struct sony_backlight_props *props)
{
- int offset;
- acpi_status status;
- u8 brlvl, i;
+ u64 offset;
+ int i;
+ int lvl_table_len = 0;
u8 min = 0xff, max = 0x00;
- struct acpi_object_list params;
- union acpi_object in_obj;
- union acpi_object *lvl_enum;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ unsigned char buffer[32] = { 0 };
props->handle = handle;
props->offset = 0;
props->maxlvl = 0xff;
offset = sony_find_snc_handle(handle);
- if (offset < 0)
- return;
/* try to read the boundaries from ACPI tables, if we fail the above
* defaults should be reasonable
*/
- params.count = 1;
- params.pointer = &in_obj;
- in_obj.type = ACPI_TYPE_INTEGER;
- in_obj.integer.value = offset;
- status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
- &buffer);
- if (ACPI_FAILURE(status))
+ i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
+ 32);
+ if (i < 0)
return;
- lvl_enum = (union acpi_object *) buffer.pointer;
- if (!lvl_enum) {
- pr_err("No SN06 return object.");
- return;
- }
- if (lvl_enum->type != ACPI_TYPE_BUFFER) {
- pr_err("Invalid SN06 return object 0x%.2x\n",
- lvl_enum->type);
- goto out_invalid;
+ switch (handle) {
+ case 0x012f:
+ case 0x0137:
+ lvl_table_len = 9;
+ break;
+ case 0x143:
+ lvl_table_len = 16;
+ break;
}
/* the buffer lists brightness levels available, brightness levels are
- * from 0 to 8 in the array, other values are used by ALS control.
+ * from position 0 to 8 in the array, other values are used by ALS
+ * control.
*/
- for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
+ for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) {
- brlvl = *(lvl_enum->buffer.pointer + i);
- dprintk("Brightness level: %d\n", brlvl);
+ dprintk("Brightness level: %d\n", buffer[i]);
- if (!brlvl)
+ if (!buffer[i])
break;
- if (brlvl > max)
- max = brlvl;
- if (brlvl < min)
- min = brlvl;
+ if (buffer[i] > max)
+ max = buffer[i];
+ if (buffer[i] < min)
+ min = buffer[i];
}
props->offset = min;
props->maxlvl = max;
dprintk("Brightness levels: min=%d max=%d\n", props->offset,
props->maxlvl);
-
-out_invalid:
- kfree(buffer.pointer);
- return;
}
static void sony_nc_backlight_setup(void)
@@ -1636,16 +2557,24 @@ static void sony_nc_backlight_setup(void)
const struct backlight_ops *ops = NULL;
struct backlight_properties props;
- if (sony_find_snc_handle(0x12f) != -1) {
+ if (sony_find_snc_handle(0x12f) >= 0) {
ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x0100;
sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
- } else if (sony_find_snc_handle(0x137) != -1) {
+ } else if (sony_find_snc_handle(0x137) >= 0) {
ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x0100;
sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+ } else if (sony_find_snc_handle(0x143) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&unused))) {
ops = &sony_backlight_ops;
@@ -1713,32 +2642,29 @@ static int sony_nc_add(struct acpi_device *device)
}
}
+ result = sony_laptop_setup_input(device);
+ if (result) {
+ pr_err("Unable to create input devices\n");
+ goto outplatform;
+ }
+
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
&handle))) {
- if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
+ int arg = 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
dprintk("ECON Method failed\n");
}
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
&handle))) {
dprintk("Doing SNC setup\n");
+ /* retrieve the available handles */
result = sony_nc_handles_setup(sony_pf_device);
- if (result)
- goto outpresent;
- result = sony_nc_kbd_backlight_setup(sony_pf_device);
- if (result)
- goto outsnc;
- sony_nc_function_setup(device);
- sony_nc_rfkill_setup(device);
+ if (!result)
+ sony_nc_function_setup(device, sony_pf_device);
}
/* setup input devices and helper fifo */
- result = sony_laptop_setup_input(device);
- if (result) {
- pr_err("Unable to create input devices\n");
- goto outkbdbacklight;
- }
-
if (acpi_video_backlight_support()) {
pr_info("brightness ignored, must be controlled by ACPI video driver\n");
} else {
@@ -1786,24 +2712,21 @@ static int sony_nc_add(struct acpi_device *device)
return 0;
- out_sysfs:
+out_sysfs:
for (item = sony_nc_values; item->name; ++item) {
device_remove_file(&sony_pf_device->dev, &item->devattr);
}
sony_nc_backlight_cleanup();
+ sony_nc_function_cleanup(sony_pf_device);
+ sony_nc_handles_cleanup(sony_pf_device);
+outplatform:
sony_laptop_remove_input();
- outkbdbacklight:
- sony_nc_kbd_backlight_cleanup(sony_pf_device);
-
- outsnc:
- sony_nc_handles_cleanup(sony_pf_device);
-
- outpresent:
+outpresent:
sony_pf_remove();
- outwalk:
+outwalk:
sony_nc_rfkill_cleanup();
return result;
}
@@ -1820,11 +2743,10 @@ static int sony_nc_remove(struct acpi_device *device, int type)
device_remove_file(&sony_pf_device->dev, &item->devattr);
}
- sony_nc_kbd_backlight_cleanup(sony_pf_device);
+ sony_nc_function_cleanup(sony_pf_device);
sony_nc_handles_cleanup(sony_pf_device);
sony_pf_remove();
sony_laptop_remove_input();
- sony_nc_rfkill_cleanup();
dprintk(SONY_NC_DRIVER_NAME " removed.\n");
return 0;
@@ -1850,9 +2772,9 @@ static struct acpi_driver sony_nc_driver = {
.ops = {
.add = sony_nc_add,
.remove = sony_nc_remove,
- .resume = sony_nc_resume,
.notify = sony_nc_notify,
},
+ .drv.pm = &sony_nc_pm,
};
/*********** SPIC (SNY6001) Device ***********/
@@ -2437,7 +3359,9 @@ static ssize_t sony_pic_wwanpower_store(struct device *dev,
if (count > 31)
return -EINVAL;
- value = simple_strtoul(buffer, NULL, 10);
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
mutex_lock(&spic_dev.lock);
__sony_pic_set_wwanpower(value);
mutex_unlock(&spic_dev.lock);
@@ -2474,7 +3398,9 @@ static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
if (count > 31)
return -EINVAL;
- value = simple_strtoul(buffer, NULL, 10);
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
mutex_lock(&spic_dev.lock);
__sony_pic_set_bluetoothpower(value);
mutex_unlock(&spic_dev.lock);
@@ -2513,7 +3439,9 @@ static ssize_t sony_pic_fanspeed_store(struct device *dev,
if (count > 31)
return -EINVAL;
- value = simple_strtoul(buffer, NULL, 10);
+ if (kstrtoul(buffer, 10, &value))
+ return -EINVAL;
+
if (sony_pic_set_fanspeed(value))
return -EIO;
@@ -2671,7 +3599,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
ret = -EIO;
break;
}
- if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) {
+ if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL,
+ &value)) {
ret = -EIO;
break;
}
@@ -2688,8 +3617,9 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
ret = -EFAULT;
break;
}
- if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
- (val8 >> 5) + 1, NULL)) {
+ value = (val8 >> 5) + 1;
+ if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value,
+ NULL)) {
ret = -EIO;
break;
}
@@ -3357,19 +4287,22 @@ err_free_resources:
return result;
}
-static int sony_pic_suspend(struct acpi_device *device, pm_message_t state)
+static int sony_pic_suspend(struct device *dev)
{
- if (sony_pic_disable(device))
+ if (sony_pic_disable(to_acpi_device(dev)))
return -ENXIO;
return 0;
}
-static int sony_pic_resume(struct acpi_device *device)
+static int sony_pic_resume(struct device *dev)
{
- sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
+ sony_pic_enable(to_acpi_device(dev),
+ spic_dev.cur_ioport, spic_dev.cur_irq);
return 0;
}
+static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume);
+
static const struct acpi_device_id sony_pic_device_ids[] = {
{SONY_PIC_HID, 0},
{"", 0},
@@ -3383,9 +4316,8 @@ static struct acpi_driver sony_pic_driver = {
.ops = {
.add = sony_pic_add,
.remove = sony_pic_remove,
- .suspend = sony_pic_suspend,
- .resume = sony_pic_resume,
},
+ .drv.pm = &sony_pic_pm,
};
static struct dmi_system_id __initdata sonypi_dmi_table[] = {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d68c000..d5fd4a1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -277,7 +277,7 @@ struct ibm_struct {
int (*write) (char *);
void (*exit) (void);
void (*resume) (void);
- void (*suspend) (pm_message_t state);
+ void (*suspend) (void);
void (*shutdown) (void);
struct list_head all_drivers;
@@ -922,8 +922,7 @@ static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
static LIST_HEAD(tpacpi_all_drivers);
-static int tpacpi_suspend_handler(struct platform_device *pdev,
- pm_message_t state)
+static int tpacpi_suspend_handler(struct device *dev)
{
struct ibm_struct *ibm, *itmp;
@@ -931,13 +930,13 @@ static int tpacpi_suspend_handler(struct platform_device *pdev,
&tpacpi_all_drivers,
all_drivers) {
if (ibm->suspend)
- (ibm->suspend)(state);
+ (ibm->suspend)();
}
return 0;
}
-static int tpacpi_resume_handler(struct platform_device *pdev)
+static int tpacpi_resume_handler(struct device *dev)
{
struct ibm_struct *ibm, *itmp;
@@ -951,6 +950,9 @@ static int tpacpi_resume_handler(struct platform_device *pdev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(tpacpi_pm,
+ tpacpi_suspend_handler, tpacpi_resume_handler);
+
static void tpacpi_shutdown_handler(struct platform_device *pdev)
{
struct ibm_struct *ibm, *itmp;
@@ -967,9 +969,8 @@ static struct platform_driver tpacpi_pdriver = {
.driver = {
.name = TPACPI_DRVR_NAME,
.owner = THIS_MODULE,
+ .pm = &tpacpi_pm,
},
- .suspend = tpacpi_suspend_handler,
- .resume = tpacpi_resume_handler,
.shutdown = tpacpi_shutdown_handler,
};
@@ -3402,7 +3403,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
/* Do not issue duplicate brightness change events to
* userspace. tpacpi_detect_brightness_capabilities() must have
* been called before this point */
- if (tp_features.bright_acpimode && acpi_video_backlight_support()) {
+ if (acpi_video_backlight_support()) {
pr_info("This ThinkPad has standard ACPI backlight "
"brightness control, supported by the ACPI "
"video driver\n");
@@ -3758,7 +3759,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
}
}
-static void hotkey_suspend(pm_message_t state)
+static void hotkey_suspend(void)
{
/* Do these on suspend, we get the events on early resume! */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
@@ -6329,7 +6330,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return 0;
}
-static void brightness_suspend(pm_message_t state)
+static void brightness_suspend(void)
{
tpacpi_brightness_checkpoint_nvram();
}
@@ -6748,7 +6749,7 @@ static struct snd_kcontrol_new volume_alsa_control_mute __devinitdata = {
.get = volume_alsa_mute_get,
};
-static void volume_suspend(pm_message_t state)
+static void volume_suspend(void)
{
tpacpi_volume_checkpoint_nvram();
}
@@ -8107,7 +8108,7 @@ static void fan_exit(void)
flush_workqueue(tpacpi_wq);
}
-static void fan_suspend(pm_message_t state)
+static void fan_suspend(void)
{
int rc;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ee79ce6..c13ba5b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -95,6 +95,7 @@ MODULE_LICENSE("GPL");
/* registers */
#define HCI_FAN 0x0004
+#define HCI_TR_BACKLIGHT 0x0005
#define HCI_SYSTEM_EVENT 0x0016
#define HCI_VIDEO_OUT 0x001c
#define HCI_HOTKEY_EVENT 0x001e
@@ -134,6 +135,7 @@ struct toshiba_acpi_dev {
unsigned int system_event_supported:1;
unsigned int ntfy_supported:1;
unsigned int info_supported:1;
+ unsigned int tr_backlight_supported:1;
struct mutex mutex;
};
@@ -478,34 +480,70 @@ static const struct rfkill_ops toshiba_rfk_ops = {
.poll = bt_rfkill_poll,
};
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+{
+ u32 hci_result;
+ u32 status;
+
+ hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
+ *enabled = !status;
+ return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+{
+ u32 hci_result;
+ u32 value = !enable;
+
+ hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
+ return hci_result == HCI_SUCCESS ? 0 : -EIO;
+}
+
static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
-static int get_lcd(struct backlight_device *bd)
+static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
{
- struct toshiba_acpi_dev *dev = bl_get_data(bd);
u32 hci_result;
u32 value;
+ int brightness = 0;
+
+ if (dev->tr_backlight_supported) {
+ bool enabled;
+ int ret = get_tr_backlight_status(dev, &enabled);
+ if (ret)
+ return ret;
+ if (enabled)
+ return 0;
+ brightness++;
+ }
hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
if (hci_result == HCI_SUCCESS)
- return (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+ return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
return -EIO;
}
+static int get_lcd_brightness(struct backlight_device *bd)
+{
+ struct toshiba_acpi_dev *dev = bl_get_data(bd);
+ return __get_lcd_brightness(dev);
+}
+
static int lcd_proc_show(struct seq_file *m, void *v)
{
struct toshiba_acpi_dev *dev = m->private;
int value;
+ int levels;
if (!dev->backlight_dev)
return -ENODEV;
- value = get_lcd(dev->backlight_dev);
+ levels = dev->backlight_dev->props.max_brightness + 1;
+ value = get_lcd_brightness(dev->backlight_dev);
if (value >= 0) {
seq_printf(m, "brightness: %d\n", value);
- seq_printf(m, "brightness_levels: %d\n",
- HCI_LCD_BRIGHTNESS_LEVELS);
+ seq_printf(m, "brightness_levels: %d\n", levels);
return 0;
}
@@ -518,10 +556,19 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
return single_open(file, lcd_proc_show, PDE(inode)->data);
}
-static int set_lcd(struct toshiba_acpi_dev *dev, int value)
+static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
{
u32 hci_result;
+ if (dev->tr_backlight_supported) {
+ bool enable = !value;
+ int ret = set_tr_backlight_status(dev, enable);
+ if (ret)
+ return ret;
+ if (value)
+ value--;
+ }
+
value = value << HCI_LCD_BRIGHTNESS_SHIFT;
hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
return hci_result == HCI_SUCCESS ? 0 : -EIO;
@@ -530,7 +577,7 @@ static int set_lcd(struct toshiba_acpi_dev *dev, int value)
static int set_lcd_status(struct backlight_device *bd)
{
struct toshiba_acpi_dev *dev = bl_get_data(bd);
- return set_lcd(dev, bd->props.brightness);
+ return set_lcd_brightness(dev, bd->props.brightness);
}
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
@@ -541,6 +588,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t len;
int value;
int ret;
+ int levels = dev->backlight_dev->props.max_brightness + 1;
len = min(count, sizeof(cmd) - 1);
if (copy_from_user(cmd, buf, len))
@@ -548,8 +596,8 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
cmd[len] = '\0';
if (sscanf(cmd, " brightness : %i", &value) == 1 &&
- value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) {
- ret = set_lcd(dev, value);
+ value >= 0 && value < levels) {
+ ret = set_lcd_brightness(dev, value);
if (ret == 0)
ret = count;
} else {
@@ -860,8 +908,9 @@ static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
}
static const struct backlight_ops toshiba_backlight_data = {
- .get_brightness = get_lcd,
- .update_status = set_lcd_status,
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = get_lcd_brightness,
+ .update_status = set_lcd_status,
};
static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
@@ -1020,6 +1069,56 @@ static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
return error;
}
+static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
+{
+ struct backlight_properties props;
+ int brightness;
+ int ret;
+ bool enabled;
+
+ /*
+ * Some machines don't support the backlight methods at all, and
+ * others support it read-only. Either of these is pretty useless,
+ * so only register the backlight device if the backlight method
+ * supports both reads and writes.
+ */
+ brightness = __get_lcd_brightness(dev);
+ if (brightness < 0)
+ return 0;
+ ret = set_lcd_brightness(dev, brightness);
+ if (ret) {
+ pr_debug("Backlight method is read-only, disabling backlight support\n");
+ return 0;
+ }
+
+ /* Determine whether or not BIOS supports transflective backlight */
+ ret = get_tr_backlight_status(dev, &enabled);
+ dev->tr_backlight_supported = !ret;
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
+
+ /* adding an extra level and having 0 change to transflective mode */
+ if (dev->tr_backlight_supported)
+ props.max_brightness++;
+
+ dev->backlight_dev = backlight_device_register("toshiba",
+ &dev->acpi_dev->dev,
+ dev,
+ &toshiba_backlight_data,
+ &props);
+ if (IS_ERR(dev->backlight_dev)) {
+ ret = PTR_ERR(dev->backlight_dev);
+ pr_err("Could not register toshiba backlight device\n");
+ dev->backlight_dev = NULL;
+ return ret;
+ }
+
+ dev->backlight_dev->props.brightness = brightness;
+ return 0;
+}
+
static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
@@ -1078,7 +1177,6 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
u32 dummy;
bool bt_present;
int ret = 0;
- struct backlight_properties props;
if (toshiba_acpi)
return -EBUSY;
@@ -1104,21 +1202,9 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
mutex_init(&dev->mutex);
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
- dev->backlight_dev = backlight_device_register("toshiba",
- &acpi_dev->dev,
- dev,
- &toshiba_backlight_data,
- &props);
- if (IS_ERR(dev->backlight_dev)) {
- ret = PTR_ERR(dev->backlight_dev);
-
- pr_err("Could not register toshiba backlight device\n");
- dev->backlight_dev = NULL;
+ ret = toshiba_acpi_setup_backlight(dev);
+ if (ret)
goto error;
- }
- dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev);
/* Register rfkill switch for Bluetooth */
if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
@@ -1210,10 +1296,9 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
}
}
-static int toshiba_acpi_suspend(struct acpi_device *acpi_dev,
- pm_message_t state)
+static int toshiba_acpi_suspend(struct device *device)
{
- struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+ struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
u32 result;
if (dev->hotkey_dev)
@@ -1222,9 +1307,9 @@ static int toshiba_acpi_suspend(struct acpi_device *acpi_dev,
return 0;
}
-static int toshiba_acpi_resume(struct acpi_device *acpi_dev)
+static int toshiba_acpi_resume(struct device *device)
{
- struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+ struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
u32 result;
if (dev->hotkey_dev)
@@ -1233,6 +1318,9 @@ static int toshiba_acpi_resume(struct acpi_device *acpi_dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
+ toshiba_acpi_suspend, toshiba_acpi_resume);
+
static struct acpi_driver toshiba_acpi_driver = {
.name = "Toshiba ACPI driver",
.owner = THIS_MODULE,
@@ -1242,9 +1330,8 @@ static struct acpi_driver toshiba_acpi_driver = {
.add = toshiba_acpi_add,
.remove = toshiba_acpi_remove,
.notify = toshiba_acpi_notify,
- .suspend = toshiba_acpi_suspend,
- .resume = toshiba_acpi_resume,
},
+ .drv.pm = &toshiba_acpi_pm,
};
static int __init toshiba_acpi_init(void)
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 5fb7186..715a43c 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -34,7 +34,6 @@ MODULE_LICENSE("GPL");
static int toshiba_bt_rfkill_add(struct acpi_device *device);
static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type);
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
-static int toshiba_bt_resume(struct acpi_device *device);
static const struct acpi_device_id bt_device_ids[] = {
{ "TOS6205", 0},
@@ -42,6 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, bt_device_ids);
+static int toshiba_bt_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
+
static struct acpi_driver toshiba_bt_rfkill_driver = {
.name = "Toshiba BT",
.class = "Toshiba",
@@ -50,9 +52,9 @@ static struct acpi_driver toshiba_bt_rfkill_driver = {
.add = toshiba_bt_rfkill_add,
.remove = toshiba_bt_rfkill_remove,
.notify = toshiba_bt_rfkill_notify,
- .resume = toshiba_bt_resume,
},
.owner = THIS_MODULE,
+ .drv.pm = &toshiba_bt_pm,
};
@@ -88,9 +90,9 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
toshiba_bluetooth_enable(device->handle);
}
-static int toshiba_bt_resume(struct acpi_device *device)
+static int toshiba_bt_resume(struct device *dev)
{
- return toshiba_bluetooth_enable(device->handle);
+ return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
}
static int toshiba_bt_rfkill_add(struct acpi_device *device)
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index 41781ed..b57ad86 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -15,15 +15,26 @@
#include <asm/olpc.h>
+static bool card_blocked;
+
static int rfkill_set_block(void *data, bool blocked)
{
unsigned char cmd;
+ int r;
+
+ if (blocked == card_blocked)
+ return 0;
+
if (blocked)
cmd = EC_WLAN_ENTER_RESET;
else
cmd = EC_WLAN_LEAVE_RESET;
- return olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+ r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
+ if (r == 0)
+ card_blocked = blocked;
+
+ return r;
}
static const struct rfkill_ops rfkill_ops = {
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index fad153d..849c07c 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -77,11 +77,13 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
}
}
-static int ebook_switch_resume(struct acpi_device *device)
+static int ebook_switch_resume(struct device *dev)
{
- return ebook_send_state(device);
+ return ebook_send_state(to_acpi_device(dev));
}
+static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume);
+
static int ebook_switch_add(struct acpi_device *device)
{
struct ebook_switch *button;
@@ -161,10 +163,10 @@ static struct acpi_driver xo15_ebook_driver = {
.ids = ebook_device_ids,
.ops = {
.add = ebook_switch_add,
- .resume = ebook_switch_resume,
.remove = ebook_switch_remove,
.notify = ebook_switch_notify,
},
+ .drv.pm = &ebook_switch_pm,
};
static int __init xo15_ebook_init(void)
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index f2525af..aa764ec 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,5 +1,5 @@
menuconfig POWER_SUPPLY
- tristate "Power supply class support"
+ bool "Power supply class support"
help
Say Y here to enable power supply class support. This allows
power supply (batteries, AC, USB) monitoring by userspace
@@ -77,7 +77,7 @@ config BATTERY_DS2780
Say Y here to enable support for batteries with ds2780 chip.
config BATTERY_DS2781
- tristate "2781 battery driver"
+ tristate "DS2781 battery driver"
depends on HAS_IOMEM
select W1
select W1_SLAVE_DS2781
@@ -181,14 +181,15 @@ config BATTERY_MAX17040
to operate with a single lithium cell
config BATTERY_MAX17042
- tristate "Maxim MAX17042/8997/8966 Fuel Gauge"
+ tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
depends on I2C
help
MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
in handheld and portable equipment. The MAX17042 is configured
to operate with a single lithium cell. MAX8997 and MAX8966 are
multi-function devices that include fuel gauages that are compatible
- with MAX17042.
+ with MAX17042. This driver also supports max17047/50 chips which are
+ improved version of max17042.
config BATTERY_Z2
tristate "Z2 battery driver"
@@ -291,6 +292,7 @@ config CHARGER_MAX8998
config CHARGER_SMB347
tristate "Summit Microelectronics SMB347 Battery Charger"
depends on I2C
+ select REGMAP_I2C
help
Say Y to include support for Summit Microelectronics SMB347
Battery Charger.
@@ -308,3 +310,5 @@ config AB8500_BATTERY_THERM_ON_BATCTRL
Say Y to enable battery temperature measurements using
thermistor connected on BATCTRL ADC.
endif # POWER_SUPPLY
+
+source "drivers/power/avs/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b6b2434..ee58afb 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -43,4 +43,5 @@ obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index d8bb993..bba3cca 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -964,10 +964,15 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
{
int irq, i, ret = 0;
u8 val;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_btemp *di;
+
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
- struct ab8500_btemp *di =
- kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -977,7 +982,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
/* get btemp specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->btemp;
if (!di->pdata) {
dev_err(di->dev, "no btemp platform data supplied\n");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index e2b4acc..d2303d0 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -2534,10 +2534,15 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev)
static int __devinit ab8500_charger_probe(struct platform_device *pdev)
{
int irq, i, charger_status, ret = 0;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_charger *di;
- struct ab8500_charger *di =
- kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL);
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -2550,9 +2555,7 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
spin_lock_init(&di->usb_state.usb_lock);
/* get charger specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->charger;
-
if (!di->pdata) {
dev_err(di->dev, "no charger platform data supplied\n");
ret = -EINVAL;
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index c22f2f0..bf02225 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2446,10 +2446,15 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
{
int i, irq;
int ret = 0;
- struct abx500_bm_plat_data *plat_data;
+ struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
+ struct ab8500_fg *di;
+
+ if (!plat_data) {
+ dev_err(&pdev->dev, "No platform data\n");
+ return -EINVAL;
+ }
- struct ab8500_fg *di =
- kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
+ di = kzalloc(sizeof(*di), GFP_KERNEL);
if (!di)
return -ENOMEM;
@@ -2461,7 +2466,6 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
/* get fg specific platform data */
- plat_data = pdev->dev.platform_data;
di->pdata = plat_data->fg;
if (!di->pdata) {
dev_err(di->dev, "no fg platform data supplied\n");
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
new file mode 100644
index 0000000..2a1008b
--- /dev/null
+++ b/drivers/power/avs/Kconfig
@@ -0,0 +1,12 @@
+menuconfig POWER_AVS
+ bool "Adaptive Voltage Scaling class support"
+ help
+ AVS is a power management technique which finely controls the
+ operating voltage of a device in order to optimize (i.e. reduce)
+ its power consumption.
+ At a given operating point the voltage is adapted depending on
+ static factors (chip manufacturing process) and dynamic factors
+ (temperature depending performance).
+ AVS is also called SmartReflex on OMAP devices.
+
+ Say Y here to enable Adaptive Voltage Scaling class support.
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
new file mode 100644
index 0000000..0843386
--- /dev/null
+++ b/drivers/power/avs/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
diff --git a/drivers/power/avs/smartreflex.c b/drivers/power/avs/smartreflex.c
new file mode 100644
index 0000000..44efc6e
--- /dev/null
+++ b/drivers/power/avs/smartreflex.c
@@ -0,0 +1,1126 @@
+/*
+ * OMAP SmartReflex Voltage Control
+ *
+ * Author: Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ * Kalle Jokiniemi
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ * Lesly A M <x0080970@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/power/smartreflex.h>
+
+#define SMARTREFLEX_NAME_LEN 16
+#define NVALUE_NAME_LEN 40
+#define SR_DISABLE_TIMEOUT 200
+
+/* sr_list contains all the instances of smartreflex module */
+static LIST_HEAD(sr_list);
+
+static struct omap_sr_class_data *sr_class;
+static struct omap_sr_pmic_data *sr_pmic_data;
+static struct dentry *sr_dbg_dir;
+
+static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value)
+{
+ __raw_writel(value, (sr->base + offset));
+}
+
+static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask,
+ u32 value)
+{
+ u32 reg_val;
+
+ /*
+ * Smartreflex error config register is special as it contains
+ * certain status bits which if written a 1 into means a clear
+ * of those bits. So in order to make sure no accidental write of
+ * 1 happens to those status bits, do a clear of them in the read
+ * value. This mean this API doesn't rewrite values in these bits
+ * if they are currently set, but does allow the caller to write
+ * those bits.
+ */
+ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1)
+ mask |= ERRCONFIG_STATUS_V1_MASK;
+ else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2)
+ mask |= ERRCONFIG_VPBOUNDINTST_V2;
+
+ reg_val = __raw_readl(sr->base + offset);
+ reg_val &= ~mask;
+
+ value &= mask;
+
+ reg_val |= value;
+
+ __raw_writel(reg_val, (sr->base + offset));
+}
+
+static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset)
+{
+ return __raw_readl(sr->base + offset);
+}
+
+static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr_info;
+
+ if (!voltdm) {
+ pr_err("%s: Null voltage domain passed!\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ list_for_each_entry(sr_info, &sr_list, node) {
+ if (voltdm == sr_info->voltdm)
+ return sr_info;
+ }
+
+ return ERR_PTR(-ENODATA);
+}
+
+static irqreturn_t sr_interrupt(int irq, void *data)
+{
+ struct omap_sr *sr_info = data;
+ u32 status = 0;
+
+ switch (sr_info->ip_type) {
+ case SR_TYPE_V1:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, ERRCONFIG_V1);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, ERRCONFIG_V1, status);
+ break;
+ case SR_TYPE_V2:
+ /* Read the status bits */
+ status = sr_read_reg(sr_info, IRQSTATUS);
+
+ /* Clear them by writing back */
+ sr_write_reg(sr_info, IRQSTATUS, status);
+ break;
+ default:
+ dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n",
+ sr_info->ip_type);
+ return IRQ_NONE;
+ }
+
+ if (sr_class->notify)
+ sr_class->notify(sr_info, status);
+
+ return IRQ_HANDLED;
+}
+
+static void sr_set_clk_length(struct omap_sr *sr)
+{
+ struct clk *sys_ck;
+ u32 sys_clk_speed;
+
+ if (cpu_is_omap34xx())
+ sys_ck = clk_get(NULL, "sys_ck");
+ else
+ sys_ck = clk_get(NULL, "sys_clkin_ck");
+
+ if (IS_ERR(sys_ck)) {
+ dev_err(&sr->pdev->dev, "%s: unable to get sys clk\n",
+ __func__);
+ return;
+ }
+
+ sys_clk_speed = clk_get_rate(sys_ck);
+ clk_put(sys_ck);
+
+ switch (sys_clk_speed) {
+ case 12000000:
+ sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK;
+ break;
+ case 13000000:
+ sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK;
+ break;
+ case 19200000:
+ sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK;
+ break;
+ case 26000000:
+ sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK;
+ break;
+ case 38400000:
+ sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Invalid sysclk value: %d\n",
+ __func__, sys_clk_speed);
+ break;
+ }
+}
+
+static void sr_set_regfields(struct omap_sr *sr)
+{
+ /*
+ * For time being these values are defined in smartreflex.h
+ * and populated during init. May be they can be moved to board
+ * file or pmic specific data structure. In that case these structure
+ * fields will have to be populated using the pdata or pmic structure.
+ */
+ if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ sr->err_weight = OMAP3430_SR_ERRWEIGHT;
+ sr->err_maxlimit = OMAP3430_SR_ERRMAXLIMIT;
+ sr->accum_data = OMAP3430_SR_ACCUMDATA;
+ if (!(strcmp(sr->name, "smartreflex_mpu_iva"))) {
+ sr->senn_avgweight = OMAP3430_SR1_SENNAVGWEIGHT;
+ sr->senp_avgweight = OMAP3430_SR1_SENPAVGWEIGHT;
+ } else {
+ sr->senn_avgweight = OMAP3430_SR2_SENNAVGWEIGHT;
+ sr->senp_avgweight = OMAP3430_SR2_SENPAVGWEIGHT;
+ }
+ }
+}
+
+static void sr_start_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (!sr_class->enable(sr))
+ sr->autocomp_active = true;
+}
+
+static void sr_stop_vddautocomp(struct omap_sr *sr)
+{
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev,
+ "%s: smartreflex class driver not registered\n",
+ __func__);
+ return;
+ }
+
+ if (sr->autocomp_active) {
+ sr_class->disable(sr, 1);
+ sr->autocomp_active = false;
+ }
+}
+
+/*
+ * This function handles the intializations which have to be done
+ * only when both sr device and class driver regiter has
+ * completed. This will be attempted to be called from both sr class
+ * driver register and sr device intializtion API's. Only one call
+ * will ultimately succeed.
+ *
+ * Currently this function registers interrupt handler for a particular SR
+ * if smartreflex class driver is already registered and has
+ * requested for interrupts and the SR interrupt line in present.
+ */
+static int sr_late_init(struct omap_sr *sr_info)
+{
+ struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data;
+ struct resource *mem;
+ int ret = 0;
+
+ if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
+ ret = request_irq(sr_info->irq, sr_interrupt,
+ 0, sr_info->name, sr_info);
+ if (ret)
+ goto error;
+ disable_irq(sr_info->irq);
+ }
+
+ if (pdata && pdata->enable_on_init)
+ sr_start_vddautocomp(sr_info);
+
+ return ret;
+
+error:
+ iounmap(sr_info->base);
+ mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+ list_del(&sr_info->node);
+ dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
+ "interrupt handler. Smartreflex will"
+ "not function as desired\n", __func__);
+ kfree(sr_info);
+
+ return ret;
+}
+
+static void sr_v1_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+ int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTST;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /* Disable all other SR interrupts and clear the status as needed */
+ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1)
+ errconf_val |= ERRCONFIG_VPBOUNDINTST_V1;
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1),
+ errconf_val);
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
+ ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN,
+ ERRCONFIG_MCUDISACKINTST);
+}
+
+static void sr_v2_disable(struct omap_sr *sr)
+{
+ int timeout = 0;
+
+ /* Enable MCUDisableAcknowledge interrupt */
+ sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT);
+
+ /* SRCONFIG - disable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0);
+
+ /*
+ * Disable all other SR interrupts and clear the status
+ * write to status register ONLY on need basis - only if status
+ * is set.
+ */
+ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2)
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ ERRCONFIG_VPBOUNDINTST_V2);
+ else
+ sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2,
+ 0x0);
+ sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT |
+ IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT));
+ sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT |
+ IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT));
+
+ /*
+ * Wait for SR to be disabled.
+ * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us.
+ */
+ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) &
+ IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
+ timeout);
+
+ if (timeout >= SR_DISABLE_TIMEOUT)
+ dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
+ __func__);
+
+ /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */
+ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
+}
+
+static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row(
+ struct omap_sr *sr, u32 efuse_offs)
+{
+ int i;
+
+ if (!sr->nvalue_table) {
+ dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n",
+ __func__);
+ return NULL;
+ }
+
+ for (i = 0; i < sr->nvalue_count; i++) {
+ if (sr->nvalue_table[i].efuse_offs == efuse_offs)
+ return &sr->nvalue_table[i];
+ }
+
+ return NULL;
+}
+
+/* Public Functions */
+
+/**
+ * sr_configure_errgen() - Configures the smrtreflex to perform AVS using the
+ * error generator module.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the error generator module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_errgen(struct voltagedomain *voltdm)
+{
+ u32 sr_config, sr_errconfig, errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
+ return PTR_ERR(sr);
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN;
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+ "module without specifying the ip\n", __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) |
+ (sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) |
+ (sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT);
+ sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK |
+ SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK),
+ sr_errconfig);
+
+ /* Enabling the interrupts if the ERROR module is used */
+ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st),
+ vpboundint_en);
+
+ return 0;
+}
+
+/**
+ * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable the error generator module inside the smartreflex module.
+ *
+ * Returns 0 on success and error value in case of failure.
+ */
+int sr_disable_errgen(struct voltagedomain *voltdm)
+{
+ u32 errconfig_offs;
+ u32 vpboundint_en, vpboundint_st;
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
+ return PTR_ERR(sr);
+ }
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ errconfig_offs = ERRCONFIG_V1;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1;
+ break;
+ case SR_TYPE_V2:
+ errconfig_offs = ERRCONFIG_V2;
+ vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2;
+ vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+ "module without specifying the ip\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Disable the interrupts of ERROR module */
+ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0);
+
+ /* Disable the Sensor and errorgen */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0);
+
+ return 0;
+}
+
+/**
+ * sr_configure_minmax() - Configures the smrtreflex to perform AVS using the
+ * minmaxavg module.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * configure the minmaxavg module inside the smartreflex module.
+ * SR settings if using the ERROR module inside Smartreflex.
+ * SR CLASS 3 by default uses only the ERROR module where as
+ * SR CLASS 2 can choose between ERROR module and MINMAXAVG
+ * module. Returns 0 on success and error value in case of failure.
+ */
+int sr_configure_minmax(struct voltagedomain *voltdm)
+{
+ u32 sr_config, sr_avgwt;
+ u32 senp_en = 0, senn_en = 0;
+ u8 senp_shift, senn_shift;
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
+ return PTR_ERR(sr);
+ }
+
+ if (!sr->clk_length)
+ sr_set_clk_length(sr);
+
+ senp_en = sr->senp_mod;
+ senn_en = sr->senn_mod;
+
+ sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) |
+ SRCONFIG_SENENABLE |
+ (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT);
+
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_config |= SRCONFIG_DELAYCTRL;
+ senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT;
+ break;
+ case SR_TYPE_V2:
+ senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT;
+ senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT;
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+ "module without specifying the ip\n", __func__);
+ return -EINVAL;
+ }
+
+ sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift));
+ sr_write_reg(sr, SRCONFIG, sr_config);
+ sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) |
+ (sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT);
+ sr_write_reg(sr, AVGWEIGHT, sr_avgwt);
+
+ /*
+ * Enabling the interrupts if MINMAXAVG module is used.
+ * TODO: check if all the interrupts are mandatory
+ */
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_modify_reg(sr, ERRCONFIG_V1,
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN |
+ ERRCONFIG_MCUBOUNDINTEN),
+ (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST |
+ ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST |
+ ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST));
+ break;
+ case SR_TYPE_V2:
+ sr_write_reg(sr, IRQSTATUS,
+ IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT |
+ IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT);
+ sr_write_reg(sr, IRQENABLE_SET,
+ IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT |
+ IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex"
+ "module without specifying the ip\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * sr_enable() - Enables the smartreflex module.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ * @volt: The voltage at which the Voltage domain associated with
+ * the smartreflex module is operating at.
+ * This is required only to program the correct Ntarget value.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * enable a smartreflex module. Returns 0 on success. Returns error
+ * value if the voltage passed is wrong or if ntarget value is wrong.
+ */
+int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
+{
+ struct omap_volt_data *volt_data;
+ struct omap_sr *sr = _sr_lookup(voltdm);
+ struct omap_sr_nvalue_table *nvalue_row;
+ int ret;
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
+ return PTR_ERR(sr);
+ }
+
+ volt_data = omap_voltage_get_voltdata(sr->voltdm, volt);
+
+ if (IS_ERR(volt_data)) {
+ dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table"
+ "for nominal voltage %ld\n", __func__, volt);
+ return PTR_ERR(volt_data);
+ }
+
+ nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs);
+
+ if (!nvalue_row) {
+ dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n",
+ __func__, volt);
+ return -ENODATA;
+ }
+
+ /* errminlimit is opp dependent and hence linked to voltage */
+ sr->err_minlimit = nvalue_row->errminlimit;
+
+ pm_runtime_get_sync(&sr->pdev->dev);
+
+ /* Check if SR is already enabled. If yes do nothing */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE)
+ return 0;
+
+ /* Configure SR */
+ ret = sr_class->configure(sr);
+ if (ret)
+ return ret;
+
+ sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue);
+
+ /* SRCONFIG - enable SR */
+ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
+ return 0;
+}
+
+/**
+ * sr_disable() - Disables the smartreflex module.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the smartreflex class driver to
+ * disable a smartreflex module.
+ */
+void sr_disable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ /* Check if SR clocks are already disabled. If yes do nothing */
+ if (pm_runtime_suspended(&sr->pdev->dev))
+ return;
+
+ /*
+ * Disable SR if only it is indeed enabled. Else just
+ * disable the clocks.
+ */
+ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) {
+ switch (sr->ip_type) {
+ case SR_TYPE_V1:
+ sr_v1_disable(sr);
+ break;
+ case SR_TYPE_V2:
+ sr_v2_disable(sr);
+ break;
+ default:
+ dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n",
+ sr->ip_type);
+ }
+ }
+
+ pm_runtime_put_sync_suspend(&sr->pdev->dev);
+}
+
+/**
+ * sr_register_class() - API to register a smartreflex class parameters.
+ * @class_data: The structure containing various sr class specific data.
+ *
+ * This API is to be called by the smartreflex class driver to register itself
+ * with the smartreflex driver during init. Returns 0 on success else the
+ * error value.
+ */
+int sr_register_class(struct omap_sr_class_data *class_data)
+{
+ struct omap_sr *sr_info;
+
+ if (!class_data) {
+ pr_warning("%s:, Smartreflex class data passed is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (sr_class) {
+ pr_warning("%s: Smartreflex class driver already registered\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ sr_class = class_data;
+
+ /*
+ * Call into late init to do intializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ list_for_each_entry(sr_info, &sr_list, node)
+ sr_late_init(sr_info);
+
+ return 0;
+}
+
+/**
+ * omap_sr_enable() - API to enable SR clocks and to call into the
+ * registered smartreflex class enable API.
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to enable
+ * a particular smartreflex module. This API will do the initial
+ * configurations to turn on the smartreflex module and in turn call
+ * into the registered smartreflex class enable API.
+ */
+void omap_sr_enable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
+ "registered\n", __func__);
+ return;
+ }
+
+ sr_class->enable(sr);
+}
+
+/**
+ * omap_sr_disable() - API to disable SR without resetting the voltage
+ * processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable not to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
+ "registered\n", __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 0);
+}
+
+/**
+ * omap_sr_disable_reset_volt() - API to disable SR and reset the
+ * voltage processor voltage
+ * @voltdm: VDD pointer to which the SR module to be configured belongs to.
+ *
+ * This API is to be called from the kernel in order to disable
+ * a particular smartreflex module. This API will in turn call
+ * into the registered smartreflex class disable API. This API will tell
+ * the smartreflex class disable to reset the VP voltage after
+ * disabling smartreflex.
+ */
+void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
+{
+ struct omap_sr *sr = _sr_lookup(voltdm);
+
+ if (IS_ERR(sr)) {
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
+ return;
+ }
+
+ if (!sr->autocomp_active)
+ return;
+
+ if (!sr_class || !(sr_class->disable)) {
+ dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not"
+ "registered\n", __func__);
+ return;
+ }
+
+ sr_class->disable(sr, 1);
+}
+
+/**
+ * omap_sr_register_pmic() - API to register pmic specific info.
+ * @pmic_data: The structure containing pmic specific data.
+ *
+ * This API is to be called from the PMIC specific code to register with
+ * smartreflex driver pmic specific info. Currently the only info required
+ * is the smartreflex init on the PMIC side.
+ */
+void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data)
+{
+ if (!pmic_data) {
+ pr_warning("%s: Trying to register NULL PMIC data structure"
+ "with smartreflex\n", __func__);
+ return;
+ }
+
+ sr_pmic_data = pmic_data;
+}
+
+/* PM Debug FS entries to enable and disable smartreflex. */
+static int omap_sr_autocomp_show(void *data, u64 *val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warning("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ *val = sr_info->autocomp_active;
+
+ return 0;
+}
+
+static int omap_sr_autocomp_store(void *data, u64 val)
+{
+ struct omap_sr *sr_info = data;
+
+ if (!sr_info) {
+ pr_warning("%s: omap_sr struct not found\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Sanity check */
+ if (val > 1) {
+ pr_warning("%s: Invalid argument %lld\n", __func__, val);
+ return -EINVAL;
+ }
+
+ /* control enable/disable only if there is a delta in value */
+ if (sr_info->autocomp_active != val) {
+ if (!val)
+ sr_stop_vddautocomp(sr_info);
+ else
+ sr_start_vddautocomp(sr_info);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show,
+ omap_sr_autocomp_store, "%llu\n");
+
+static int __init omap_sr_probe(struct platform_device *pdev)
+{
+ struct omap_sr *sr_info;
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct resource *mem, *irq;
+ struct dentry *nvalue_dir;
+ int i, ret = 0;
+
+ sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL);
+ if (!sr_info) {
+ dev_err(&pdev->dev, "%s: unable to allocate sr_info\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, sr_info);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ ret = -EINVAL;
+ goto err_free_devinfo;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "%s: no mem resource\n", __func__);
+ ret = -ENODEV;
+ goto err_free_devinfo;
+ }
+
+ mem = request_mem_region(mem->start, resource_size(mem),
+ dev_name(&pdev->dev));
+ if (!mem) {
+ dev_err(&pdev->dev, "%s: no mem region\n", __func__);
+ ret = -EBUSY;
+ goto err_free_devinfo;
+ }
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_irq_safe(&pdev->dev);
+
+ sr_info->name = kasprintf(GFP_KERNEL, "%s", pdata->name);
+ if (!sr_info->name) {
+ dev_err(&pdev->dev, "%s: Unable to alloc SR instance name\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
+ sr_info->pdev = pdev;
+ sr_info->srid = pdev->id;
+ sr_info->voltdm = pdata->voltdm;
+ sr_info->nvalue_table = pdata->nvalue_table;
+ sr_info->nvalue_count = pdata->nvalue_count;
+ sr_info->senn_mod = pdata->senn_mod;
+ sr_info->senp_mod = pdata->senp_mod;
+ sr_info->autocomp_active = false;
+ sr_info->ip_type = pdata->ip_type;
+ sr_info->base = ioremap(mem->start, resource_size(mem));
+ if (!sr_info->base) {
+ dev_err(&pdev->dev, "%s: ioremap fail\n", __func__);
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
+ if (irq)
+ sr_info->irq = irq->start;
+
+ sr_set_clk_length(sr_info);
+ sr_set_regfields(sr_info);
+
+ list_add(&sr_info->node, &sr_list);
+
+ /*
+ * Call into late init to do intializations that require
+ * both sr driver and sr class driver to be initiallized.
+ */
+ if (sr_class) {
+ ret = sr_late_init(sr_info);
+ if (ret) {
+ pr_warning("%s: Error in SR late init\n", __func__);
+ goto err_iounmap;
+ }
+ }
+
+ dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__);
+ if (!sr_dbg_dir) {
+ sr_dbg_dir = debugfs_create_dir("smartreflex", NULL);
+ if (IS_ERR_OR_NULL(sr_dbg_dir)) {
+ ret = PTR_ERR(sr_dbg_dir);
+ pr_err("%s:sr debugfs dir creation failed(%d)\n",
+ __func__, ret);
+ goto err_iounmap;
+ }
+ }
+
+ sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
+ if (IS_ERR_OR_NULL(sr_info->dbg_dir)) {
+ dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
+ __func__);
+ ret = PTR_ERR(sr_info->dbg_dir);
+ goto err_free_name;
+ }
+
+ (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR,
+ sr_info->dbg_dir, (void *)sr_info, &pm_sr_fops);
+ (void) debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_weight);
+ (void) debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
+ &sr_info->err_maxlimit);
+
+ nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
+ if (IS_ERR_OR_NULL(nvalue_dir)) {
+ dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
+ "for n-values\n", __func__);
+ ret = PTR_ERR(nvalue_dir);
+ goto err_debugfs;
+ }
+
+ if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) {
+ dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n",
+ __func__, sr_info->name);
+
+ ret = -ENODATA;
+ goto err_debugfs;
+ }
+
+ for (i = 0; i < sr_info->nvalue_count; i++) {
+ char name[NVALUE_NAME_LEN + 1];
+
+ snprintf(name, sizeof(name), "volt_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].nvalue));
+ snprintf(name, sizeof(name), "errminlimit_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].errminlimit));
+
+ }
+
+ return ret;
+
+err_debugfs:
+ debugfs_remove_recursive(sr_info->dbg_dir);
+err_free_name:
+ kfree(sr_info->name);
+err_iounmap:
+ list_del(&sr_info->node);
+ iounmap(sr_info->base);
+err_release_region:
+ release_mem_region(mem->start, resource_size(mem));
+err_free_devinfo:
+ kfree(sr_info);
+
+ return ret;
+}
+
+static int __devexit omap_sr_remove(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct omap_sr *sr_info;
+ struct resource *mem;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return -EINVAL;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return PTR_ERR(sr_info);
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+ if (sr_info->dbg_dir)
+ debugfs_remove_recursive(sr_info->dbg_dir);
+
+ list_del(&sr_info->node);
+ iounmap(sr_info->base);
+ kfree(sr_info->name);
+ kfree(sr_info);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return 0;
+}
+
+static void __devexit omap_sr_shutdown(struct platform_device *pdev)
+{
+ struct omap_sr_data *pdata = pdev->dev.platform_data;
+ struct omap_sr *sr_info;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
+ return;
+ }
+
+ sr_info = _sr_lookup(pdata->voltdm);
+ if (IS_ERR(sr_info)) {
+ dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
+ __func__);
+ return;
+ }
+
+ if (sr_info->autocomp_active)
+ sr_stop_vddautocomp(sr_info);
+
+ return;
+}
+
+static struct platform_driver smartreflex_driver = {
+ .remove = __devexit_p(omap_sr_remove),
+ .shutdown = __devexit_p(omap_sr_shutdown),
+ .driver = {
+ .name = "smartreflex",
+ },
+};
+
+static int __init sr_init(void)
+{
+ int ret = 0;
+
+ /*
+ * sr_init is a late init. If by then a pmic specific API is not
+ * registered either there is no need for anything to be done on
+ * the PMIC side or somebody has forgotten to register a PMIC
+ * handler. Warn for the second condition.
+ */
+ if (sr_pmic_data && sr_pmic_data->sr_pmic_init)
+ sr_pmic_data->sr_pmic_init();
+ else
+ pr_warning("%s: No PMIC hook to init smartreflex\n", __func__);
+
+ ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe);
+ if (ret) {
+ pr_err("%s: platform driver register failed for SR\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+late_initcall(sr_init);
+
+static void __exit sr_exit(void)
+{
+ platform_driver_unregister(&smartreflex_driver);
+}
+module_exit(sr_exit);
+
+MODULE_DESCRIPTION("OMAP Smartreflex Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("Texas Instruments Inc");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 9eca9f1..86935ec 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -23,6 +23,16 @@
#include <linux/power/charger-manager.h>
#include <linux/regulator/consumer.h>
+static const char * const default_event_names[] = {
+ [CM_EVENT_UNKNOWN] = "Unknown",
+ [CM_EVENT_BATT_FULL] = "Battery Full",
+ [CM_EVENT_BATT_IN] = "Battery Inserted",
+ [CM_EVENT_BATT_OUT] = "Battery Pulled Out",
+ [CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
+ [CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
+ [CM_EVENT_OTHERS] = "Other battery events"
+};
+
/*
* Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
* delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -57,6 +67,12 @@ static bool cm_suspended;
static bool cm_rtc_set;
static unsigned long cm_suspend_duration_ms;
+/* About normal (not suspended) monitoring */
+static unsigned long polling_jiffy = ULONG_MAX; /* ULONG_MAX: no polling */
+static unsigned long next_polling; /* Next appointed polling time */
+static struct workqueue_struct *cm_wq; /* init at driver add */
+static struct delayed_work cm_monitor_work; /* init at driver add */
+
/* Global charger-manager description */
static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
@@ -71,6 +87,11 @@ static bool is_batt_present(struct charger_manager *cm)
int i, ret;
switch (cm->desc->battery_present) {
+ case CM_BATTERY_PRESENT:
+ present = true;
+ break;
+ case CM_NO_BATTERY:
+ break;
case CM_FUEL_GAUGE:
ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
POWER_SUPPLY_PROP_PRESENT, &val);
@@ -279,6 +300,26 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
}
/**
+ * try_charger_restart - Restart charging.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Restart charging by turning off and on the charger.
+ */
+static int try_charger_restart(struct charger_manager *cm)
+{
+ int err;
+
+ if (cm->emergency_stop)
+ return -EAGAIN;
+
+ err = try_charger_enable(cm, false);
+ if (err)
+ return err;
+
+ return try_charger_enable(cm, true);
+}
+
+/**
* uevent_notify - Let users know something has changed.
* @cm: the Charger Manager representing the battery.
* @event: the event string.
@@ -334,6 +375,46 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
}
/**
+ * fullbatt_vchk - Check voltage drop some times after "FULL" event.
+ * @work: the work_struct appointing the function
+ *
+ * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
+ * charger_desc, Charger Manager checks voltage drop after the battery
+ * "FULL" event. It checks whether the voltage has dropped more than
+ * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
+ */
+static void fullbatt_vchk(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct charger_manager *cm = container_of(dwork,
+ struct charger_manager, fullbatt_vchk_work);
+ struct charger_desc *desc = cm->desc;
+ int batt_uV, err, diff;
+
+ /* remove the appointment for fullbatt_vchk */
+ cm->fullbatt_vchk_jiffies_at = 0;
+
+ if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+ return;
+
+ err = get_batt_uV(cm, &batt_uV);
+ if (err) {
+ dev_err(cm->dev, "%s: get_batt_uV error(%d).\n", __func__, err);
+ return;
+ }
+
+ diff = cm->fullbatt_vchk_uV;
+ diff -= batt_uV;
+
+ dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
+
+ if (diff > desc->fullbatt_vchkdrop_uV) {
+ try_charger_restart(cm);
+ uevent_notify(cm, "Recharge");
+ }
+}
+
+/**
* _cm_monitor - Monitor the temperature and return true for exceptions.
* @cm: the Charger Manager representing the battery.
*
@@ -392,6 +473,131 @@ static bool cm_monitor(void)
return stop;
}
+/**
+ * _setup_polling - Setup the next instance of polling.
+ * @work: work_struct of the function _setup_polling.
+ */
+static void _setup_polling(struct work_struct *work)
+{
+ unsigned long min = ULONG_MAX;
+ struct charger_manager *cm;
+ bool keep_polling = false;
+ unsigned long _next_polling;
+
+ mutex_lock(&cm_list_mtx);
+
+ list_for_each_entry(cm, &cm_list, entry) {
+ if (is_polling_required(cm) && cm->desc->polling_interval_ms) {
+ keep_polling = true;
+
+ if (min > cm->desc->polling_interval_ms)
+ min = cm->desc->polling_interval_ms;
+ }
+ }
+
+ polling_jiffy = msecs_to_jiffies(min);
+ if (polling_jiffy <= CM_JIFFIES_SMALL)
+ polling_jiffy = CM_JIFFIES_SMALL + 1;
+
+ if (!keep_polling)
+ polling_jiffy = ULONG_MAX;
+ if (polling_jiffy == ULONG_MAX)
+ goto out;
+
+ WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
+ ". try it later. %s\n", __func__);
+
+ _next_polling = jiffies + polling_jiffy;
+
+ if (!delayed_work_pending(&cm_monitor_work) ||
+ (delayed_work_pending(&cm_monitor_work) &&
+ time_after(next_polling, _next_polling))) {
+ cancel_delayed_work_sync(&cm_monitor_work);
+ next_polling = jiffies + polling_jiffy;
+ queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
+ }
+
+out:
+ mutex_unlock(&cm_list_mtx);
+}
+static DECLARE_WORK(setup_polling, _setup_polling);
+
+/**
+ * cm_monitor_poller - The Monitor / Poller.
+ * @work: work_struct of the function cm_monitor_poller
+ *
+ * During non-suspended state, cm_monitor_poller is used to poll and monitor
+ * the batteries.
+ */
+static void cm_monitor_poller(struct work_struct *work)
+{
+ cm_monitor();
+ schedule_work(&setup_polling);
+}
+
+/**
+ * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
+ * @cm: the Charger Manager representing the battery.
+ */
+static void fullbatt_handler(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+
+ if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
+ goto out;
+
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (delayed_work_pending(&cm->fullbatt_vchk_work))
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
+ queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+ msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
+ cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
+ desc->fullbatt_vchkdrop_ms);
+
+ if (cm->fullbatt_vchk_jiffies_at == 0)
+ cm->fullbatt_vchk_jiffies_at = 1;
+
+out:
+ dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged.\n");
+ uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
+}
+
+/**
+ * battout_handler - Event handler for CM_EVENT_BATT_OUT
+ * @cm: the Charger Manager representing the battery.
+ */
+static void battout_handler(struct charger_manager *cm)
+{
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (!is_batt_present(cm)) {
+ dev_emerg(cm->dev, "Battery Pulled Out!\n");
+ uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
+ } else {
+ uevent_notify(cm, "Battery Reinserted?");
+ }
+}
+
+/**
+ * misc_event_handler - Handler for other evnets
+ * @cm: the Charger Manager representing the battery.
+ * @type: the Charger Manager representing the battery.
+ */
+static void misc_event_handler(struct charger_manager *cm,
+ enum cm_event_types type)
+{
+ if (cm_suspended)
+ device_set_wakeup_capable(cm->dev, true);
+
+ if (!delayed_work_pending(&cm_monitor_work) &&
+ is_polling_required(cm) && cm->desc->polling_interval_ms)
+ schedule_work(&setup_polling);
+ uevent_notify(cm, default_event_names[type]);
+}
+
static int charger_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -613,6 +819,21 @@ static bool cm_setup_timer(void)
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
+ unsigned int fbchk_ms = 0;
+
+ /* fullbatt_vchk is required. setup timer for that */
+ if (cm->fullbatt_vchk_jiffies_at) {
+ fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
+ - jiffies);
+ if (time_is_before_eq_jiffies(
+ cm->fullbatt_vchk_jiffies_at) ||
+ msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
+ fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+ fbchk_ms = 0;
+ }
+ }
+ CM_MIN_VALID(wakeup_ms, fbchk_ms);
+
/* Skip if polling is not required for this CM */
if (!is_polling_required(cm) && !cm->emergency_stop)
continue;
@@ -672,6 +893,23 @@ static bool cm_setup_timer(void)
return false;
}
+static void _cm_fbchk_in_suspend(struct charger_manager *cm)
+{
+ unsigned long jiffy_now = jiffies;
+
+ if (!cm->fullbatt_vchk_jiffies_at)
+ return;
+
+ if (g_desc && g_desc->assume_timer_stops_in_suspend)
+ jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
+
+ /* Execute now if it's going to be executed not too long after */
+ jiffy_now += CM_JIFFIES_SMALL;
+
+ if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
+ fullbatt_vchk(&cm->fullbatt_vchk_work.work);
+}
+
/**
* cm_suspend_again - Determine whether suspend again or not
*
@@ -693,6 +931,8 @@ bool cm_suspend_again(void)
ret = true;
mutex_lock(&cm_list_mtx);
list_for_each_entry(cm, &cm_list, entry) {
+ _cm_fbchk_in_suspend(cm);
+
if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
cm->status_save_batt != is_batt_present(cm)) {
ret = false;
@@ -796,6 +1036,21 @@ static int charger_manager_probe(struct platform_device *pdev)
memcpy(cm->desc, desc, sizeof(struct charger_desc));
cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
+ /*
+ * The following two do not need to be errors.
+ * Users may intentionally ignore those two features.
+ */
+ if (desc->fullbatt_uV == 0) {
+ dev_info(&pdev->dev, "Ignoring full-battery voltage threshold"
+ " as it is not supplied.");
+ }
+ if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
+ dev_info(&pdev->dev, "Disabling full-battery voltage drop "
+ "checking mechanism as it is not supplied.");
+ desc->fullbatt_vchkdrop_ms = 0;
+ desc->fullbatt_vchkdrop_uV = 0;
+ }
+
if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
ret = -EINVAL;
dev_err(&pdev->dev, "charger_regulators undefined.\n");
@@ -903,6 +1158,8 @@ static int charger_manager_probe(struct platform_device *pdev)
cm->charger_psy.num_properties++;
}
+ INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
+
ret = power_supply_register(NULL, &cm->charger_psy);
if (ret) {
dev_err(&pdev->dev, "Cannot register charger-manager with"
@@ -928,6 +1185,15 @@ static int charger_manager_probe(struct platform_device *pdev)
list_add(&cm->entry, &cm_list);
mutex_unlock(&cm_list_mtx);
+ /*
+ * Charger-manager is capable of waking up the systme from sleep
+ * when event is happend through cm_notify_event()
+ */
+ device_init_wakeup(&pdev->dev, true);
+ device_set_wakeup_capable(&pdev->dev, false);
+
+ schedule_work(&setup_polling);
+
return 0;
err_chg_enable:
@@ -958,9 +1224,17 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
list_del(&cm->entry);
mutex_unlock(&cm_list_mtx);
+ if (work_pending(&setup_polling))
+ cancel_work_sync(&setup_polling);
+ if (delayed_work_pending(&cm_monitor_work))
+ cancel_delayed_work_sync(&cm_monitor_work);
+
regulator_bulk_free(desc->num_charger_regulators,
desc->charger_regulators);
power_supply_unregister(&cm->charger_psy);
+
+ try_charger_enable(cm, false);
+
kfree(cm->charger_psy.properties);
kfree(cm->charger_stat);
kfree(cm->desc);
@@ -975,6 +1249,18 @@ static const struct platform_device_id charger_manager_id[] = {
};
MODULE_DEVICE_TABLE(platform, charger_manager_id);
+static int cm_suspend_noirq(struct device *dev)
+{
+ int ret = 0;
+
+ if (device_may_wakeup(dev)) {
+ device_set_wakeup_capable(dev, false);
+ ret = -EAGAIN;
+ }
+
+ return ret;
+}
+
static int cm_suspend_prepare(struct device *dev)
{
struct charger_manager *cm = dev_get_drvdata(dev);
@@ -1000,6 +1286,8 @@ static int cm_suspend_prepare(struct device *dev)
cm_suspended = true;
}
+ if (delayed_work_pending(&cm->fullbatt_vchk_work))
+ cancel_delayed_work(&cm->fullbatt_vchk_work);
cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
cm->status_save_batt = is_batt_present(cm);
@@ -1027,11 +1315,40 @@ static void cm_suspend_complete(struct device *dev)
cm_rtc_set = false;
}
+ /* Re-enqueue delayed work (fullbatt_vchk_work) */
+ if (cm->fullbatt_vchk_jiffies_at) {
+ unsigned long delay = 0;
+ unsigned long now = jiffies + CM_JIFFIES_SMALL;
+
+ if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
+ delay = (unsigned long)((long)now
+ - (long)(cm->fullbatt_vchk_jiffies_at));
+ delay = jiffies_to_msecs(delay);
+ } else {
+ delay = 0;
+ }
+
+ /*
+ * Account for cm_suspend_duration_ms if
+ * assume_timer_stops_in_suspend is active
+ */
+ if (g_desc && g_desc->assume_timer_stops_in_suspend) {
+ if (delay > cm_suspend_duration_ms)
+ delay -= cm_suspend_duration_ms;
+ else
+ delay = 0;
+ }
+
+ queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
+ msecs_to_jiffies(delay));
+ }
+ device_set_wakeup_capable(cm->dev, false);
uevent_notify(cm, NULL);
}
static const struct dev_pm_ops charger_manager_pm = {
.prepare = cm_suspend_prepare,
+ .suspend_noirq = cm_suspend_noirq,
.complete = cm_suspend_complete,
};
@@ -1048,16 +1365,91 @@ static struct platform_driver charger_manager_driver = {
static int __init charger_manager_init(void)
{
+ cm_wq = create_freezable_workqueue("charger_manager");
+ INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
+
return platform_driver_register(&charger_manager_driver);
}
late_initcall(charger_manager_init);
static void __exit charger_manager_cleanup(void)
{
+ destroy_workqueue(cm_wq);
+ cm_wq = NULL;
+
platform_driver_unregister(&charger_manager_driver);
}
module_exit(charger_manager_cleanup);
+/**
+ * find_power_supply - find the associated power_supply of charger
+ * @cm: the Charger Manager representing the battery
+ * @psy: pointer to instance of charger's power_supply
+ */
+static bool find_power_supply(struct charger_manager *cm,
+ struct power_supply *psy)
+{
+ int i;
+ bool found = false;
+
+ for (i = 0; cm->charger_stat[i]; i++) {
+ if (psy == cm->charger_stat[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ return found;
+}
+
+/**
+ * cm_notify_event - charger driver notify Charger Manager of charger event
+ * @psy: pointer to instance of charger's power_supply
+ * @type: type of charger event
+ * @msg: optional message passed to uevent_notify fuction
+ */
+void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
+ char *msg)
+{
+ struct charger_manager *cm;
+ bool found_power_supply = false;
+
+ if (psy == NULL)
+ return;
+
+ mutex_lock(&cm_list_mtx);
+ list_for_each_entry(cm, &cm_list, entry) {
+ found_power_supply = find_power_supply(cm, psy);
+ if (found_power_supply)
+ break;
+ }
+ mutex_unlock(&cm_list_mtx);
+
+ if (!found_power_supply)
+ return;
+
+ switch (type) {
+ case CM_EVENT_BATT_FULL:
+ fullbatt_handler(cm);
+ break;
+ case CM_EVENT_BATT_OUT:
+ battout_handler(cm);
+ break;
+ case CM_EVENT_BATT_IN:
+ case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
+ misc_event_handler(cm, type);
+ break;
+ case CM_EVENT_UNKNOWN:
+ case CM_EVENT_OTHERS:
+ uevent_notify(cm, msg ? msg : default_event_names[type]);
+ break;
+ default:
+ dev_err(cm->dev, "%s type not specified.\n", __func__);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(cm_notify_event);
+
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_DESCRIPTION("Charger Manager");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index ca0d653..975684a 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -643,9 +643,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK1_END -
- DS2781_EEPROM_BLOCK1_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
return ds2781_read_block(dev_info, buf,
DS2781_EEPROM_BLOCK1_START + off, count);
@@ -661,9 +659,7 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
int ret;
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK1_END -
- DS2781_EEPROM_BLOCK1_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
ret = ds2781_write(dev_info, buf,
DS2781_EEPROM_BLOCK1_START + off, count);
@@ -682,7 +678,7 @@ static struct bin_attribute ds2781_param_eeprom_bin_attr = {
.name = "param_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
- .size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1,
+ .size = DS2781_PARAM_EEPROM_SIZE,
.read = ds2781_read_param_eeprom_bin,
.write = ds2781_write_param_eeprom_bin,
};
@@ -696,9 +692,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
struct power_supply *psy = to_power_supply(dev);
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK0_END -
- DS2781_EEPROM_BLOCK0_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
return ds2781_read_block(dev_info, buf,
DS2781_EEPROM_BLOCK0_START + off, count);
@@ -715,9 +709,7 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
int ret;
- count = min_t(loff_t, count,
- DS2781_EEPROM_BLOCK0_END -
- DS2781_EEPROM_BLOCK0_START + 1 - off);
+ count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
ret = ds2781_write(dev_info, buf,
DS2781_EEPROM_BLOCK0_START + off, count);
@@ -736,7 +728,7 @@ static struct bin_attribute ds2781_user_eeprom_bin_attr = {
.name = "user_eeprom",
.mode = S_IRUGO | S_IWUSR,
},
- .size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1,
+ .size = DS2781_USER_EEPROM_SIZE,
.read = ds2781_read_user_eeprom_bin,
.write = ds2781_write_user_eeprom_bin,
};
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 39eb50f..e5ccd29 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -474,13 +474,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
fail2:
power_supply_unregister(&isp->psy);
fail1:
+ isp1704_charger_set_power(isp, 0);
usb_put_transceiver(isp->phy);
fail0:
kfree(isp);
dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
- isp1704_charger_set_power(isp, 0);
return ret;
}
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 04620c2..140788b 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/pm.h>
#include <linux/mod_devicetable.h>
#include <linux/power_supply.h>
#include <linux/power/max17042_battery.h>
@@ -61,9 +62,13 @@
#define dP_ACC_100 0x1900
#define dP_ACC_200 0x3200
+#define MAX17042_IC_VERSION 0x0092
+#define MAX17047_IC_VERSION 0x00AC /* same for max17050 */
+
struct max17042_chip {
struct i2c_client *client;
struct power_supply battery;
+ enum max170xx_chip_type chip_type;
struct max17042_platform_data *pdata;
struct work_struct work;
int init_complete;
@@ -105,6 +110,7 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_TEMP,
@@ -150,7 +156,10 @@ static int max17042_get_property(struct power_supply *psy,
val->intval *= 20000; /* Units of LSB = 20mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ if (chip->chip_type == MAX17042)
+ ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ else
+ ret = max17042_read_reg(chip->client, MAX17047_V_empty);
if (ret < 0)
return ret;
@@ -171,6 +180,13 @@ static int max17042_get_property(struct power_supply *psy,
val->intval = ret * 625 / 8;
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_OCV:
+ ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 625 / 8;
+ break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
if (ret < 0)
@@ -325,11 +341,10 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
static int max17042_init_model(struct max17042_chip *chip)
{
int ret;
- int table_size =
- sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
+ int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
u16 *temp_data;
- temp_data = kzalloc(table_size, GFP_KERNEL);
+ temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
return -ENOMEM;
@@ -354,12 +369,11 @@ static int max17042_init_model(struct max17042_chip *chip)
static int max17042_verify_model_lock(struct max17042_chip *chip)
{
int i;
- int table_size =
- sizeof(chip->pdata->config_data->cell_char_tbl);
+ int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
u16 *temp_data;
int ret = 0;
- temp_data = kzalloc(table_size, GFP_KERNEL);
+ temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
if (!temp_data)
return -ENOMEM;
@@ -382,6 +396,9 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
max17042_write_reg(chip->client, MAX17042_FilterCFG,
config->filter_cfg);
max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
+ if (chip->chip_type == MAX17047)
+ max17042_write_reg(chip->client, MAX17047_FullSOCThr,
+ config->full_soc_thresh);
}
static void max17042_write_custom_regs(struct max17042_chip *chip)
@@ -392,12 +409,23 @@ static void max17042_write_custom_regs(struct max17042_chip *chip)
config->rcomp0);
max17042_write_verify_reg(chip->client, MAX17042_TempCo,
config->tcompc0);
- max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
- config->kempty0);
max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
config->ichgt_term);
+ if (chip->chip_type == MAX17042) {
+ max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
+ config->kempty0);
+ } else {
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
+ config->qrtbl00);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
+ config->qrtbl10);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
+ config->qrtbl20);
+ max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
+ config->qrtbl30);
+ }
}
static void max17042_update_capacity_regs(struct max17042_chip *chip)
@@ -453,6 +481,8 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
config->design_cap);
max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
config->fullcapnom);
+ /* Update SOC register with new SOC */
+ max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
}
/*
@@ -489,20 +519,28 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
- max17042_override_por(client, MAX17042_SOC_empty, config->socempty);
+ if (chip->chip_type == MAX17042)
+ max17042_override_por(client, MAX17042_SOC_empty,
+ config->socempty);
max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
max17042_override_por(client, MAX17042_dQacc, config->dqacc);
max17042_override_por(client, MAX17042_dPacc, config->dpacc);
- max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ if (chip->chip_type == MAX17042)
+ max17042_override_por(client, MAX17042_V_empty, config->vempty);
+ else
+ max17042_override_por(client, MAX17047_V_empty, config->vempty);
max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
max17042_override_por(client, MAX17042_FCTC, config->fctc);
max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
- max17042_override_por(client, MAX17042_EmptyTempCo,
- config->empty_tempco);
- max17042_override_por(client, MAX17042_K_empty0, config->kempty0);
+ if (chip->chip_type) {
+ max17042_override_por(client, MAX17042_EmptyTempCo,
+ config->empty_tempco);
+ max17042_override_por(client, MAX17042_K_empty0,
+ config->kempty0);
+ }
}
static int max17042_init_chip(struct max17042_chip *chip)
@@ -659,7 +697,19 @@ static int __devinit max17042_probe(struct i2c_client *client,
i2c_set_clientdata(client, chip);
- chip->battery.name = "max17042_battery";
+ ret = max17042_read_reg(chip->client, MAX17042_DevName);
+ if (ret == MAX17042_IC_VERSION) {
+ dev_dbg(&client->dev, "chip type max17042 detected\n");
+ chip->chip_type = MAX17042;
+ } else if (ret == MAX17047_IC_VERSION) {
+ dev_dbg(&client->dev, "chip type max17047/50 detected\n");
+ chip->chip_type = MAX17047;
+ } else {
+ dev_err(&client->dev, "device version mismatch: %x\n", ret);
+ return -EIO;
+ }
+
+ chip->battery.name = "max170xx_battery";
chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
chip->battery.get_property = max17042_get_property;
chip->battery.properties = max17042_battery_props;
@@ -683,6 +733,12 @@ static int __devinit max17042_probe(struct i2c_client *client,
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
}
+ ret = power_supply_register(&client->dev, &chip->battery);
+ if (ret) {
+ dev_err(&client->dev, "failed: power supply register\n");
+ return ret;
+ }
+
if (client->irq) {
ret = request_threaded_irq(client->irq, NULL,
max17042_thread_handler,
@@ -693,13 +749,14 @@ static int __devinit max17042_probe(struct i2c_client *client,
reg |= CONFIG_ALRT_BIT_ENBL;
max17042_write_reg(client, MAX17042_CONFIG, reg);
max17042_set_soc_threshold(chip, 1);
- } else
+ } else {
+ client->irq = 0;
dev_err(&client->dev, "%s(): cannot get IRQ\n",
__func__);
+ }
}
reg = max17042_read_reg(chip->client, MAX17042_STATUS);
-
if (reg & STATUS_POR_BIT) {
INIT_WORK(&chip->work, max17042_init_worker);
schedule_work(&chip->work);
@@ -707,23 +764,65 @@ static int __devinit max17042_probe(struct i2c_client *client,
chip->init_complete = 1;
}
- ret = power_supply_register(&client->dev, &chip->battery);
- if (ret)
- dev_err(&client->dev, "failed: power supply register\n");
- return ret;
+ return 0;
}
static int __devexit max17042_remove(struct i2c_client *client)
{
struct max17042_chip *chip = i2c_get_clientdata(client);
+ if (client->irq)
+ free_irq(client->irq, chip);
power_supply_unregister(&chip->battery);
return 0;
}
+#ifdef CONFIG_PM
+static int max17042_suspend(struct device *dev)
+{
+ struct max17042_chip *chip = dev_get_drvdata(dev);
+
+ /*
+ * disable the irq and enable irq_wake
+ * capability to the interrupt line.
+ */
+ if (chip->client->irq) {
+ disable_irq(chip->client->irq);
+ enable_irq_wake(chip->client->irq);
+ }
+
+ return 0;
+}
+
+static int max17042_resume(struct device *dev)
+{
+ struct max17042_chip *chip = dev_get_drvdata(dev);
+
+ if (chip->client->irq) {
+ disable_irq_wake(chip->client->irq);
+ enable_irq(chip->client->irq);
+ /* re-program the SOC thresholds to 1% change */
+ max17042_set_soc_threshold(chip, 1);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops max17042_pm_ops = {
+ .suspend = max17042_suspend,
+ .resume = max17042_resume,
+};
+
+#define MAX17042_PM_OPS (&max17042_pm_ops)
+#else
+#define MAX17042_PM_OPS NULL
+#endif
+
#ifdef CONFIG_OF
static const struct of_device_id max17042_dt_match[] = {
{ .compatible = "maxim,max17042" },
+ { .compatible = "maxim,max17047" },
+ { .compatible = "maxim,max17050" },
{ },
};
MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -731,6 +830,8 @@ MODULE_DEVICE_TABLE(of, max17042_dt_match);
static const struct i2c_device_id max17042_id[] = {
{ "max17042", 0 },
+ { "max17047", 1 },
+ { "max17050", 2 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max17042_id);
@@ -739,6 +840,7 @@ static struct i2c_driver max17042_i2c_driver = {
.driver = {
.name = "max17042",
.of_match_table = of_match_ptr(max17042_dt_match),
+ .pm = MAX17042_PM_OPS,
},
.probe = max17042_probe,
.remove = __devexit_p(max17042_remove),
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 4368e7d..4150747 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -146,6 +146,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(voltage_min_design),
POWER_SUPPLY_ATTR(voltage_now),
POWER_SUPPLY_ATTR(voltage_avg),
+ POWER_SUPPLY_ATTR(voltage_ocv),
POWER_SUPPLY_ATTR(current_max),
POWER_SUPPLY_ATTR(current_now),
POWER_SUPPLY_ATTR(current_avg),
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 06b659d..a5b6849 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -89,7 +89,7 @@ static const struct chip_data {
[REG_CURRENT] =
SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
[REG_CAPACITY] =
- SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+ SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
[REG_REMAINING_CAPACITY] =
SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
[REG_REMAINING_CAPACITY_CHARGE] =
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
index ce1694d..f8eedd8 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/smb347-charger.c
@@ -11,7 +11,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/debugfs.h>
+#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -21,7 +21,7 @@
#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/power/smb347-charger.h>
-#include <linux/seq_file.h>
+#include <linux/regmap.h>
/*
* Configuration registers. These are mirrored to volatile RAM and can be
@@ -39,6 +39,7 @@
#define CFG_CURRENT_LIMIT_DC_SHIFT 4
#define CFG_CURRENT_LIMIT_USB_MASK 0x0f
#define CFG_FLOAT_VOLTAGE 0x03
+#define CFG_FLOAT_VOLTAGE_FLOAT_MASK 0x3f
#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0
#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6
#define CFG_STAT 0x05
@@ -113,29 +114,31 @@
#define STAT_C_CHARGER_ERROR BIT(6)
#define STAT_E 0x3f
+#define SMB347_MAX_REGISTER 0x3f
+
/**
* struct smb347_charger - smb347 charger instance
* @lock: protects concurrent access to online variables
- * @client: pointer to i2c client
+ * @dev: pointer to device
+ * @regmap: pointer to driver regmap
* @mains: power_supply instance for AC/DC power
* @usb: power_supply instance for USB power
* @battery: power_supply instance for battery
* @mains_online: is AC/DC input connected
* @usb_online: is USB input connected
* @charging_enabled: is charging enabled
- * @dentry: for debugfs
* @pdata: pointer to platform data
*/
struct smb347_charger {
struct mutex lock;
- struct i2c_client *client;
+ struct device *dev;
+ struct regmap *regmap;
struct power_supply mains;
struct power_supply usb;
struct power_supply battery;
bool mains_online;
bool usb_online;
bool charging_enabled;
- struct dentry *dentry;
const struct smb347_charger_platform_data *pdata;
};
@@ -193,14 +196,6 @@ static const unsigned int ccc_tbl[] = {
1200000,
};
-/* Convert register value to current using lookup table */
-static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
-{
- if (val >= size)
- return -EINVAL;
- return tbl[val];
-}
-
/* Convert current to register value using lookup table */
static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
{
@@ -212,43 +207,22 @@ static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
return i > 0 ? i - 1 : -EINVAL;
}
-static int smb347_read(struct smb347_charger *smb, u8 reg)
-{
- int ret;
-
- ret = i2c_smbus_read_byte_data(smb->client, reg);
- if (ret < 0)
- dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
- reg, ret);
- return ret;
-}
-
-static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
-{
- int ret;
-
- ret = i2c_smbus_write_byte_data(smb->client, reg, val);
- if (ret < 0)
- dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
- reg, ret);
- return ret;
-}
-
/**
- * smb347_update_status - updates the charging status
+ * smb347_update_ps_status - refreshes the power source status
* @smb: pointer to smb347 charger instance
*
- * Function checks status of the charging and updates internal state
- * accordingly. Returns %0 if there is no change in status, %1 if the
- * status has changed and negative errno in case of failure.
+ * Function checks whether any power source is connected to the charger and
+ * updates internal state accordingly. If there is a change to previous state
+ * function returns %1, otherwise %0 and negative errno in case of errror.
*/
-static int smb347_update_status(struct smb347_charger *smb)
+static int smb347_update_ps_status(struct smb347_charger *smb)
{
bool usb = false;
bool dc = false;
+ unsigned int val;
int ret;
- ret = smb347_read(smb, IRQSTAT_E);
+ ret = regmap_read(smb->regmap, IRQSTAT_E, &val);
if (ret < 0)
return ret;
@@ -257,9 +231,9 @@ static int smb347_update_status(struct smb347_charger *smb)
* platform data _and_ whether corresponding undervoltage is set.
*/
if (smb->pdata->use_mains)
- dc = !(ret & IRQSTAT_E_DCIN_UV_STAT);
+ dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
if (smb->pdata->use_usb)
- usb = !(ret & IRQSTAT_E_USBIN_UV_STAT);
+ usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
mutex_lock(&smb->lock);
ret = smb->mains_online != dc || smb->usb_online != usb;
@@ -271,15 +245,15 @@ static int smb347_update_status(struct smb347_charger *smb)
}
/*
- * smb347_is_online - returns whether input power source is connected
+ * smb347_is_ps_online - returns whether input power source is connected
* @smb: pointer to smb347 charger instance
*
* Returns %true if input power source is connected. Note that this is
* dependent on what platform has configured for usable power sources. For
- * example if USB is disabled, this will return %false even if the USB
- * cable is connected.
+ * example if USB is disabled, this will return %false even if the USB cable
+ * is connected.
*/
-static bool smb347_is_online(struct smb347_charger *smb)
+static bool smb347_is_ps_online(struct smb347_charger *smb)
{
bool ret;
@@ -299,16 +273,17 @@ static bool smb347_is_online(struct smb347_charger *smb)
*/
static int smb347_charging_status(struct smb347_charger *smb)
{
+ unsigned int val;
int ret;
- if (!smb347_is_online(smb))
+ if (!smb347_is_ps_online(smb))
return 0;
- ret = smb347_read(smb, STAT_C);
+ ret = regmap_read(smb->regmap, STAT_C, &val);
if (ret < 0)
return 0;
- return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
+ return (val & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
}
static int smb347_charging_set(struct smb347_charger *smb, bool enable)
@@ -316,27 +291,17 @@ static int smb347_charging_set(struct smb347_charger *smb, bool enable)
int ret = 0;
if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
- dev_dbg(&smb->client->dev,
- "charging enable/disable in SW disabled\n");
+ dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
return 0;
}
mutex_lock(&smb->lock);
if (smb->charging_enabled != enable) {
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- goto out;
-
- smb->charging_enabled = enable;
-
- if (enable)
- ret |= CMD_A_CHG_ENABLED;
- else
- ret &= ~CMD_A_CHG_ENABLED;
-
- ret = smb347_write(smb, CMD_A, ret);
+ ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
+ enable ? CMD_A_CHG_ENABLED : 0);
+ if (!ret)
+ smb->charging_enabled = enable;
}
-out:
mutex_unlock(&smb->lock);
return ret;
}
@@ -351,7 +316,7 @@ static inline int smb347_charging_disable(struct smb347_charger *smb)
return smb347_charging_set(smb, false);
}
-static int smb347_update_online(struct smb347_charger *smb)
+static int smb347_start_stop_charging(struct smb347_charger *smb)
{
int ret;
@@ -360,16 +325,14 @@ static int smb347_update_online(struct smb347_charger *smb)
* disable or enable the charging. We do it manually because it
* depends on how the platform has configured the valid inputs.
*/
- if (smb347_is_online(smb)) {
+ if (smb347_is_ps_online(smb)) {
ret = smb347_charging_enable(smb);
if (ret < 0)
- dev_err(&smb->client->dev,
- "failed to enable charging\n");
+ dev_err(smb->dev, "failed to enable charging\n");
} else {
ret = smb347_charging_disable(smb);
if (ret < 0)
- dev_err(&smb->client->dev,
- "failed to disable charging\n");
+ dev_err(smb->dev, "failed to disable charging\n");
}
return ret;
@@ -377,112 +340,120 @@ static int smb347_update_online(struct smb347_charger *smb)
static int smb347_set_charge_current(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_CHARGE_CURRENT);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->max_charge_current) {
- val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
+ ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
smb->pdata->max_charge_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_FCC_MASK;
- ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_FCC_MASK,
+ ret << CFG_CHARGE_CURRENT_FCC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->pre_charge_current) {
- val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
+ ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
smb->pdata->pre_charge_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_PCC_MASK;
- ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_PCC_MASK,
+ ret << CFG_CHARGE_CURRENT_PCC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->termination_current) {
- val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
+ ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
smb->pdata->termination_current);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CHARGE_CURRENT_TC_MASK;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
+ CFG_CHARGE_CURRENT_TC_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_CHARGE_CURRENT, ret);
+ return 0;
}
static int smb347_set_current_limits(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_CURRENT_LIMIT);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->mains_current_limit) {
- val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+ ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
smb->pdata->mains_current_limit);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CURRENT_LIMIT_DC_MASK;
- ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+ CFG_CURRENT_LIMIT_DC_MASK,
+ ret << CFG_CURRENT_LIMIT_DC_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->usb_hc_current_limit) {
- val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
+ ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
smb->pdata->usb_hc_current_limit);
- if (val < 0)
- return val;
+ if (ret < 0)
+ return ret;
- ret &= ~CFG_CURRENT_LIMIT_USB_MASK;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
+ CFG_CURRENT_LIMIT_USB_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_CURRENT_LIMIT, ret);
+ return 0;
}
static int smb347_set_voltage_limits(struct smb347_charger *smb)
{
- int ret, val;
-
- ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
- if (ret < 0)
- return ret;
+ int ret;
if (smb->pdata->pre_to_fast_voltage) {
- val = smb->pdata->pre_to_fast_voltage;
+ ret = smb->pdata->pre_to_fast_voltage;
/* uV */
- val = clamp_val(val, 2400000, 3000000) - 2400000;
- val /= 200000;
+ ret = clamp_val(ret, 2400000, 3000000) - 2400000;
+ ret /= 200000;
- ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK;
- ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+ CFG_FLOAT_VOLTAGE_THRESHOLD_MASK,
+ ret << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT);
+ if (ret < 0)
+ return ret;
}
if (smb->pdata->max_charge_voltage) {
- val = smb->pdata->max_charge_voltage;
+ ret = smb->pdata->max_charge_voltage;
/* uV */
- val = clamp_val(val, 3500000, 4500000) - 3500000;
- val /= 20000;
+ ret = clamp_val(ret, 3500000, 4500000) - 3500000;
+ ret /= 20000;
- ret |= val;
+ ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
+ CFG_FLOAT_VOLTAGE_FLOAT_MASK, ret);
+ if (ret < 0)
+ return ret;
}
- return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret);
+ return 0;
}
static int smb347_set_temp_limits(struct smb347_charger *smb)
{
bool enable_therm_monitor = false;
- int ret, val;
+ int ret = 0;
+ int val;
if (smb->pdata->chip_temp_threshold) {
val = smb->pdata->chip_temp_threshold;
@@ -491,22 +462,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 100, 130) - 100;
val /= 10;
- ret = smb347_read(smb, CFG_OTG);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
- ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
-
- ret = smb347_write(smb, CFG_OTG, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_OTG,
+ CFG_OTG_TEMP_THRESHOLD_MASK,
+ val << CFG_OTG_TEMP_THRESHOLD_SHIFT);
if (ret < 0)
return ret;
}
- ret = smb347_read(smb, CFG_TEMP_LIMIT);
- if (ret < 0)
- return ret;
-
if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
val = smb->pdata->soft_cold_temp_limit;
@@ -515,8 +477,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
/* this goes from higher to lower so invert the value */
val = ~val & 0x3;
- ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK;
- ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_SOFT_COLD_MASK,
+ val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -527,8 +492,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 40, 55) - 40;
val /= 5;
- ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK;
- ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_SOFT_HOT_MASK,
+ val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -541,8 +509,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
/* this goes from higher to lower so invert the value */
val = ~val & 0x3;
- ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK;
- ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_HARD_COLD_MASK,
+ val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
@@ -553,16 +524,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
val = clamp_val(val, 50, 65) - 50;
val /= 5;
- ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK;
- ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT;
+ ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
+ CFG_TEMP_LIMIT_HARD_HOT_MASK,
+ val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT);
+ if (ret < 0)
+ return ret;
enable_therm_monitor = true;
}
- ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
- if (ret < 0)
- return ret;
-
/*
* If any of the temperature limits are set, we also enable the
* thermistor monitoring.
@@ -574,25 +544,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
* depending on the configuration.
*/
if (enable_therm_monitor) {
- ret = smb347_read(smb, CFG_THERM);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_THERM_MONITOR_DISABLED;
-
- ret = smb347_write(smb, CFG_THERM, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_MONITOR_DISABLED, 0);
if (ret < 0)
return ret;
}
if (smb->pdata->suspend_on_hard_temp_limit) {
- ret = smb347_read(smb, CFG_SYSOK);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
-
- ret = smb347_write(smb, CFG_SYSOK, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
+ CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
if (ret < 0)
return ret;
}
@@ -601,17 +561,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
val = smb->pdata->soft_temp_limit_compensation & 0x3;
- ret = smb347_read(smb, CFG_THERM);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
+ val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
- ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK;
- ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT;
-
- ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
- ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
-
- ret = smb347_write(smb, CFG_THERM, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_THERM,
+ CFG_THERM_SOFT_COLD_COMPENSATION_MASK,
+ val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
}
@@ -622,14 +580,9 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
if (val < 0)
return val;
- ret = smb347_read(smb, CFG_OTG);
- if (ret < 0)
- return ret;
-
- ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
- ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
-
- ret = smb347_write(smb, CFG_OTG, ret);
+ ret = regmap_update_bits(smb->regmap, CFG_OTG,
+ CFG_OTG_CC_COMPENSATION_MASK,
+ (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT);
if (ret < 0)
return ret;
}
@@ -648,22 +601,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
*/
static int smb347_set_writable(struct smb347_charger *smb, bool writable)
{
- int ret;
-
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- return ret;
-
- if (writable)
- ret |= CMD_A_ALLOW_WRITE;
- else
- ret &= ~CMD_A_ALLOW_WRITE;
-
- return smb347_write(smb, CMD_A, ret);
+ return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
+ writable ? CMD_A_ALLOW_WRITE : 0);
}
static int smb347_hw_init(struct smb347_charger *smb)
{
+ unsigned int val;
int ret;
ret = smb347_set_writable(smb, true);
@@ -692,34 +636,19 @@ static int smb347_hw_init(struct smb347_charger *smb)
/* If USB charging is disabled we put the USB in suspend mode */
if (!smb->pdata->use_usb) {
- ret = smb347_read(smb, CMD_A);
- if (ret < 0)
- goto fail;
-
- ret |= CMD_A_SUSPEND_ENABLED;
-
- ret = smb347_write(smb, CMD_A, ret);
+ ret = regmap_update_bits(smb->regmap, CMD_A,
+ CMD_A_SUSPEND_ENABLED,
+ CMD_A_SUSPEND_ENABLED);
if (ret < 0)
goto fail;
}
- ret = smb347_read(smb, CFG_OTHER);
- if (ret < 0)
- goto fail;
-
/*
* If configured by platform data, we enable hardware Auto-OTG
* support for driving VBUS. Otherwise we disable it.
*/
- ret &= ~CFG_OTHER_RID_MASK;
- if (smb->pdata->use_usb_otg)
- ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
-
- ret = smb347_write(smb, CFG_OTHER, ret);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
+ ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
+ smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
if (ret < 0)
goto fail;
@@ -728,32 +657,33 @@ static int smb347_hw_init(struct smb347_charger *smb)
* command register unless pin control is specified in the platform
* data.
*/
- ret &= ~CFG_PIN_EN_CTRL_MASK;
-
switch (smb->pdata->enable_control) {
- case SMB347_CHG_ENABLE_SW:
- /* Do nothing, 0 means i2c control */
- break;
case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
- ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW;
+ val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
break;
case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
- ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+ val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
+ break;
+ default:
+ val = 0;
break;
}
- /* Disable Automatic Power Source Detection (APSD) interrupt. */
- ret &= ~CFG_PIN_EN_APSD_IRQ;
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL_MASK,
+ val);
+ if (ret < 0)
+ goto fail;
- ret = smb347_write(smb, CFG_PIN, ret);
+ /* Disable Automatic Power Source Detection (APSD) interrupt. */
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_APSD_IRQ, 0);
if (ret < 0)
goto fail;
- ret = smb347_update_status(smb);
+ ret = smb347_update_ps_status(smb);
if (ret < 0)
goto fail;
- ret = smb347_update_online(smb);
+ ret = smb347_start_stop_charging(smb);
fail:
smb347_set_writable(smb, false);
@@ -763,24 +693,25 @@ fail:
static irqreturn_t smb347_interrupt(int irq, void *data)
{
struct smb347_charger *smb = data;
- int stat_c, irqstat_e, irqstat_c;
- irqreturn_t ret = IRQ_NONE;
+ unsigned int stat_c, irqstat_e, irqstat_c;
+ bool handled = false;
+ int ret;
- stat_c = smb347_read(smb, STAT_C);
- if (stat_c < 0) {
- dev_warn(&smb->client->dev, "reading STAT_C failed\n");
+ ret = regmap_read(smb->regmap, STAT_C, &stat_c);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading STAT_C failed\n");
return IRQ_NONE;
}
- irqstat_c = smb347_read(smb, IRQSTAT_C);
- if (irqstat_c < 0) {
- dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n");
+ ret = regmap_read(smb->regmap, IRQSTAT_C, &irqstat_c);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading IRQSTAT_C failed\n");
return IRQ_NONE;
}
- irqstat_e = smb347_read(smb, IRQSTAT_E);
- if (irqstat_e < 0) {
- dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n");
+ ret = regmap_read(smb->regmap, IRQSTAT_E, &irqstat_e);
+ if (ret < 0) {
+ dev_warn(smb->dev, "reading IRQSTAT_E failed\n");
return IRQ_NONE;
}
@@ -789,13 +720,11 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
* disable charging.
*/
if (stat_c & STAT_C_CHARGER_ERROR) {
- dev_err(&smb->client->dev,
- "error in charger, disabling charging\n");
+ dev_err(smb->dev, "error in charger, disabling charging\n");
smb347_charging_disable(smb);
power_supply_changed(&smb->battery);
-
- ret = IRQ_HANDLED;
+ handled = true;
}
/*
@@ -806,7 +735,7 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
power_supply_changed(&smb->battery);
- ret = IRQ_HANDLED;
+ handled = true;
}
/*
@@ -814,15 +743,17 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
* was connected or disconnected.
*/
if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
- if (smb347_update_status(smb) > 0) {
- smb347_update_online(smb);
- power_supply_changed(&smb->mains);
- power_supply_changed(&smb->usb);
+ if (smb347_update_ps_status(smb) > 0) {
+ smb347_start_stop_charging(smb);
+ if (smb->pdata->use_mains)
+ power_supply_changed(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_changed(&smb->usb);
}
- ret = IRQ_HANDLED;
+ handled = true;
}
- return ret;
+ return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int smb347_irq_set(struct smb347_charger *smb, bool enable)
@@ -839,41 +770,18 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
* - termination current reached
* - charger error
*/
- if (enable) {
- ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV);
- if (ret < 0)
- goto fail;
-
- ret = smb347_write(smb, CFG_STATUS_IRQ,
- CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
- if (ret < 0)
- goto fail;
-
- ret |= CFG_PIN_EN_CHARGER_ERROR;
-
- ret = smb347_write(smb, CFG_PIN, ret);
- } else {
- ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
- if (ret < 0)
- goto fail;
-
- ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
- if (ret < 0)
- goto fail;
-
- ret = smb347_read(smb, CFG_PIN);
- if (ret < 0)
- goto fail;
-
- ret &= ~CFG_PIN_EN_CHARGER_ERROR;
+ ret = regmap_update_bits(smb->regmap, CFG_FAULT_IRQ, 0xff,
+ enable ? CFG_FAULT_IRQ_DCIN_UV : 0);
+ if (ret < 0)
+ goto fail;
- ret = smb347_write(smb, CFG_PIN, ret);
- }
+ ret = regmap_update_bits(smb->regmap, CFG_STATUS_IRQ, 0xff,
+ enable ? CFG_STATUS_IRQ_TERMINATION_OR_TAPER : 0);
+ if (ret < 0)
+ goto fail;
+ ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
+ enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
fail:
smb347_set_writable(smb, false);
return ret;
@@ -889,18 +797,18 @@ static inline int smb347_irq_disable(struct smb347_charger *smb)
return smb347_irq_set(smb, false);
}
-static int smb347_irq_init(struct smb347_charger *smb)
+static int smb347_irq_init(struct smb347_charger *smb,
+ struct i2c_client *client)
{
const struct smb347_charger_platform_data *pdata = smb->pdata;
int ret, irq = gpio_to_irq(pdata->irq_gpio);
- ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name);
+ ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
if (ret < 0)
goto fail;
ret = request_threaded_irq(irq, NULL, smb347_interrupt,
- IRQF_TRIGGER_FALLING, smb->client->name,
- smb);
+ IRQF_TRIGGER_FALLING, client->name, smb);
if (ret < 0)
goto fail_gpio;
@@ -912,23 +820,14 @@ static int smb347_irq_init(struct smb347_charger *smb)
* Configure the STAT output to be suitable for interrupts: disable
* all other output (except interrupts) and make it active low.
*/
- ret = smb347_read(smb, CFG_STAT);
- if (ret < 0)
- goto fail_readonly;
-
- ret &= ~CFG_STAT_ACTIVE_HIGH;
- ret |= CFG_STAT_DISABLED;
-
- ret = smb347_write(smb, CFG_STAT, ret);
- if (ret < 0)
- goto fail_readonly;
-
- ret = smb347_irq_enable(smb);
+ ret = regmap_update_bits(smb->regmap, CFG_STAT,
+ CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
+ CFG_STAT_DISABLED);
if (ret < 0)
goto fail_readonly;
smb347_set_writable(smb, false);
- smb->client->irq = irq;
+ client->irq = irq;
return 0;
fail_readonly:
@@ -938,7 +837,7 @@ fail_irq:
fail_gpio:
gpio_free(pdata->irq_gpio);
fail:
- smb->client->irq = 0;
+ client->irq = 0;
return ret;
}
@@ -987,13 +886,13 @@ static int smb347_battery_get_property(struct power_supply *psy,
const struct smb347_charger_platform_data *pdata = smb->pdata;
int ret;
- ret = smb347_update_status(smb);
+ ret = smb347_update_ps_status(smb);
if (ret < 0)
return ret;
switch (prop) {
case POWER_SUPPLY_PROP_STATUS:
- if (!smb347_is_online(smb)) {
+ if (!smb347_is_ps_online(smb)) {
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
}
@@ -1004,7 +903,7 @@ static int smb347_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- if (!smb347_is_online(smb))
+ if (!smb347_is_ps_online(smb))
return -ENODATA;
/*
@@ -1036,44 +935,6 @@ static int smb347_battery_get_property(struct power_supply *psy,
val->intval = pdata->battery_info.voltage_max_design;
break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- if (!smb347_is_online(smb))
- return -ENODATA;
- ret = smb347_read(smb, STAT_A);
- if (ret < 0)
- return ret;
-
- ret &= STAT_A_FLOAT_VOLTAGE_MASK;
- if (ret > 0x3d)
- ret = 0x3d;
-
- val->intval = 3500000 + ret * 20000;
- break;
-
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- if (!smb347_is_online(smb))
- return -ENODATA;
-
- ret = smb347_read(smb, STAT_B);
- if (ret < 0)
- return ret;
-
- /*
- * The current value is composition of FCC and PCC values
- * and we can detect which table to use from bit 5.
- */
- if (ret & 0x20) {
- val->intval = hw_to_current(fcc_tbl,
- ARRAY_SIZE(fcc_tbl),
- ret & 7);
- } else {
- ret >>= 3;
- val->intval = hw_to_current(pcc_tbl,
- ARRAY_SIZE(pcc_tbl),
- ret & 7);
- }
- break;
-
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
val->intval = pdata->battery_info.charge_full_design;
break;
@@ -1095,64 +956,58 @@ static enum power_supply_property smb347_battery_properties[] = {
POWER_SUPPLY_PROP_TECHNOLOGY,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_MODEL_NAME,
};
-static int smb347_debugfs_show(struct seq_file *s, void *data)
+static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
{
- struct smb347_charger *smb = s->private;
- int ret;
- u8 reg;
-
- seq_printf(s, "Control registers:\n");
- seq_printf(s, "==================\n");
- for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
- }
- seq_printf(s, "\n");
-
- seq_printf(s, "Command registers:\n");
- seq_printf(s, "==================\n");
- ret = smb347_read(smb, CMD_A);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
- ret = smb347_read(smb, CMD_B);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
- ret = smb347_read(smb, CMD_C);
- seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
- seq_printf(s, "\n");
-
- seq_printf(s, "Interrupt status registers:\n");
- seq_printf(s, "===========================\n");
- for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
- }
- seq_printf(s, "\n");
-
- seq_printf(s, "Status registers:\n");
- seq_printf(s, "=================\n");
- for (reg = STAT_A; reg <= STAT_E; reg++) {
- ret = smb347_read(smb, reg);
- seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
+ switch (reg) {
+ case IRQSTAT_A:
+ case IRQSTAT_C:
+ case IRQSTAT_E:
+ case IRQSTAT_F:
+ case STAT_A:
+ case STAT_B:
+ case STAT_C:
+ case STAT_E:
+ return true;
}
- return 0;
+ return false;
}
-static int smb347_debugfs_open(struct inode *inode, struct file *file)
+static bool smb347_readable_reg(struct device *dev, unsigned int reg)
{
- return single_open(file, smb347_debugfs_show, inode->i_private);
+ switch (reg) {
+ case CFG_CHARGE_CURRENT:
+ case CFG_CURRENT_LIMIT:
+ case CFG_FLOAT_VOLTAGE:
+ case CFG_STAT:
+ case CFG_PIN:
+ case CFG_THERM:
+ case CFG_SYSOK:
+ case CFG_OTHER:
+ case CFG_OTG:
+ case CFG_TEMP_LIMIT:
+ case CFG_FAULT_IRQ:
+ case CFG_STATUS_IRQ:
+ case CFG_ADDRESS:
+ case CMD_A:
+ case CMD_B:
+ case CMD_C:
+ return true;
+ }
+
+ return smb347_volatile_reg(dev, reg);
}
-static const struct file_operations smb347_debugfs_fops = {
- .open = smb347_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+static const struct regmap_config smb347_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SMB347_MAX_REGISTER,
+ .volatile_reg = smb347_volatile_reg,
+ .readable_reg = smb347_readable_reg,
};
static int smb347_probe(struct i2c_client *client,
@@ -1178,28 +1033,45 @@ static int smb347_probe(struct i2c_client *client,
i2c_set_clientdata(client, smb);
mutex_init(&smb->lock);
- smb->client = client;
+ smb->dev = &client->dev;
smb->pdata = pdata;
+ smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
+ if (IS_ERR(smb->regmap))
+ return PTR_ERR(smb->regmap);
+
ret = smb347_hw_init(smb);
if (ret < 0)
return ret;
- smb->mains.name = "smb347-mains";
- smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
- smb->mains.get_property = smb347_mains_get_property;
- smb->mains.properties = smb347_mains_properties;
- smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
- smb->mains.supplied_to = battery;
- smb->mains.num_supplicants = ARRAY_SIZE(battery);
-
- smb->usb.name = "smb347-usb";
- smb->usb.type = POWER_SUPPLY_TYPE_USB;
- smb->usb.get_property = smb347_usb_get_property;
- smb->usb.properties = smb347_usb_properties;
- smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
- smb->usb.supplied_to = battery;
- smb->usb.num_supplicants = ARRAY_SIZE(battery);
+ if (smb->pdata->use_mains) {
+ smb->mains.name = "smb347-mains";
+ smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
+ smb->mains.get_property = smb347_mains_get_property;
+ smb->mains.properties = smb347_mains_properties;
+ smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
+ smb->mains.supplied_to = battery;
+ smb->mains.num_supplicants = ARRAY_SIZE(battery);
+ ret = power_supply_register(dev, &smb->mains);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (smb->pdata->use_usb) {
+ smb->usb.name = "smb347-usb";
+ smb->usb.type = POWER_SUPPLY_TYPE_USB;
+ smb->usb.get_property = smb347_usb_get_property;
+ smb->usb.properties = smb347_usb_properties;
+ smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
+ smb->usb.supplied_to = battery;
+ smb->usb.num_supplicants = ARRAY_SIZE(battery);
+ ret = power_supply_register(dev, &smb->usb);
+ if (ret < 0) {
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
+ return ret;
+ }
+ }
smb->battery.name = "smb347-battery";
smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1207,20 +1079,13 @@ static int smb347_probe(struct i2c_client *client,
smb->battery.properties = smb347_battery_properties;
smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
- ret = power_supply_register(dev, &smb->mains);
- if (ret < 0)
- return ret;
-
- ret = power_supply_register(dev, &smb->usb);
- if (ret < 0) {
- power_supply_unregister(&smb->mains);
- return ret;
- }
ret = power_supply_register(dev, &smb->battery);
if (ret < 0) {
- power_supply_unregister(&smb->usb);
- power_supply_unregister(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_unregister(&smb->usb);
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
return ret;
}
@@ -1229,15 +1094,15 @@ static int smb347_probe(struct i2c_client *client,
* interrupt support here.
*/
if (pdata->irq_gpio >= 0) {
- ret = smb347_irq_init(smb);
+ ret = smb347_irq_init(smb, client);
if (ret < 0) {
dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
dev_warn(dev, "disabling IRQ support\n");
+ } else {
+ smb347_irq_enable(smb);
}
}
- smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
- &smb347_debugfs_fops);
return 0;
}
@@ -1245,9 +1110,6 @@ static int smb347_remove(struct i2c_client *client)
{
struct smb347_charger *smb = i2c_get_clientdata(client);
- if (!IS_ERR_OR_NULL(smb->dentry))
- debugfs_remove(smb->dentry);
-
if (client->irq) {
smb347_irq_disable(smb);
free_irq(client->irq, smb);
@@ -1255,8 +1117,10 @@ static int smb347_remove(struct i2c_client *client)
}
power_supply_unregister(&smb->battery);
- power_supply_unregister(&smb->usb);
- power_supply_unregister(&smb->mains);
+ if (smb->pdata->use_usb)
+ power_supply_unregister(&smb->usb);
+ if (smb->pdata->use_mains)
+ power_supply_unregister(&smb->mains);
return 0;
}
@@ -1275,17 +1139,7 @@ static struct i2c_driver smb347_driver = {
.id_table = smb347_id,
};
-static int __init smb347_init(void)
-{
- return i2c_add_driver(&smb347_driver);
-}
-module_init(smb347_init);
-
-static void __exit smb347_exit(void)
-{
- i2c_del_driver(&smb347_driver);
-}
-module_exit(smb347_exit);
+module_i2c_driver(smb347_driver);
MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index bc87192..6194d35 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -22,6 +22,20 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
ports for Input/Output direction to allow other traffic
than Maintenance transfers.
+config RAPIDIO_DMA_ENGINE
+ bool "DMA Engine support for RapidIO"
+ depends on RAPIDIO
+ select DMADEVICES
+ select DMA_ENGINE
+ help
+ Say Y here if you want to use DMA Engine frameork for RapidIO data
+ transfers to/from target RIO devices. RapidIO uses NREAD and
+ NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
+ memory and memory on remote target device. You need a DMA controller
+ capable to perform data transfers to/from RapidIO.
+
+ If you are unsure about this, say Y here.
+
config RAPIDIO_DEBUG
bool "RapidIO subsystem debug messages"
depends on RAPIDIO
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 3b7b4e2..7b62860 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o
+ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
+obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o
+endif
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 30d2072..722246c 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
u16 destid, u8 hopcount, u32 offset, int len,
u32 *data, int do_wr)
{
+ void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
struct tsi721_dma_desc *bd_ptr;
u32 rd_count, swr_ptr, ch_stat;
int i, err = 0;
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
return -EINVAL;
- bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base;
+ bd_ptr = priv->mdma.bd_base;
- rd_count = ioread32(
- priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
+ rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
/* Initialize DMA descriptor */
bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
mb();
/* Start DMA operation */
- iowrite32(rd_count + 2,
- priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
- ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
+ ioread32(regs + TSI721_DMAC_DWRCNT);
i = 0;
/* Wait until DMA transfer is finished */
- while ((ch_stat = ioread32(priv->regs +
- TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) {
+ while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
+ & TSI721_DMAC_STS_RUN) {
udelay(1);
if (++i >= 5000000) {
dev_dbg(&priv->pdev->dev,
"%s : DMA[%d] read timeout ch_status=%x\n",
- __func__, TSI721_DMACH_MAINT, ch_stat);
+ __func__, priv->mdma.ch_id, ch_stat);
if (!do_wr)
*data = 0xffffffff;
err = -EIO;
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
__func__, ch_stat);
dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
- iowrite32(TSI721_DMAC_INT_ALL,
- priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT));
- iowrite32(TSI721_DMAC_CTL_INIT,
- priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
+ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
udelay(10);
- iowrite32(0, priv->regs +
- TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
+ iowrite32(0, regs + TSI721_DMAC_DWRCNT);
udelay(1);
if (!do_wr)
*data = 0xffffffff;
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
* NOTE: Skipping check and clear FIFO entries because we are waiting
* for transfer to be completed.
*/
- swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT));
- iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT));
+ swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
+ iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
err_out:
return err;
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
tsi721_pw_handler(mport);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ if (dev_int & TSI721_DEV_INT_BDMA_CH) {
+ int ch;
+
+ if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
+ dev_dbg(&priv->pdev->dev,
+ "IRQ from DMA channel 0x%08x\n", dev_ch_int);
+
+ for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
+ if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
+ continue;
+ tsi721_bdma_handler(&priv->bdma[ch]);
+ }
+ }
+ }
+#endif
return IRQ_HANDLED;
}
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
iowrite32(TSI721_SR_CHINT_IDBQRCV,
priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
- iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
- priv->regs + TSI721_DEV_CHAN_INTE);
/* Enable SRIO MAC interrupts */
iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
priv->regs + TSI721_RIO_EM_DEV_INT_EN);
+ /* Enable interrupts from channels in use */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
+ (TSI721_INT_BDMA_CHAN_M &
+ ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
+#else
+ intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
+#endif
+ iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
+
if (priv->flags & TSI721_USING_MSIX)
intr = TSI721_DEV_INT_SRIO;
else
intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
- TSI721_DEV_INT_SMSG_CH;
+ TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
iowrite32(intr, priv->regs + TSI721_DEV_INTE);
ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
TSI721_MSIX_OMSG_INT(i);
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ /*
+ * Initialize MSI-X entries for Block DMA Engine:
+ * this driver supports XXX DMA channels
+ * (one is reserved for SRIO maintenance transactions)
+ */
+ for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+ entries[TSI721_VECT_DMA0_DONE + i].entry =
+ TSI721_MSIX_DMACH_DONE(i);
+ entries[TSI721_VECT_DMA0_INT + i].entry =
+ TSI721_MSIX_DMACH_INT(i);
+ }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
if (err) {
if (err > 0)
dev_info(&priv->pdev->dev,
"Only %d MSI-X vectors available, "
"not using MSI-X\n", err);
+ else
+ dev_err(&priv->pdev->dev,
+ "Failed to enable MSI-X (err=%d)\n", err);
return err;
}
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
i, pci_name(priv->pdev));
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ for (i = 0; i < TSI721_DMA_CHNUM; i++) {
+ priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
+ entries[TSI721_VECT_DMA0_DONE + i].vector;
+ snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
+ i, pci_name(priv->pdev));
+
+ priv->msix[TSI721_VECT_DMA0_INT + i].vector =
+ entries[TSI721_VECT_DMA0_INT + i].vector;
+ snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
+ IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
+ i, pci_name(priv->pdev));
+ }
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
return 0;
}
#endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
priv->idb_base = NULL;
}
-static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
+/**
+ * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
+ * @priv: pointer to tsi721 private data
+ *
+ * Initialize BDMA channel allocated for RapidIO maintenance read/write
+ * request generation
+ * Returns %0 on success or %-ENOMEM on failure.
+ */
+static int tsi721_bdma_maint_init(struct tsi721_device *priv)
{
struct tsi721_dma_desc *bd_ptr;
u64 *sts_ptr;
dma_addr_t bd_phys, sts_phys;
int sts_size;
- int bd_num = priv->bdma[chnum].bd_num;
+ int bd_num = 2;
+ void __iomem *regs;
- dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum);
+ dev_dbg(&priv->pdev->dev,
+ "Init Block DMA Engine for Maintenance requests, CH%d\n",
+ TSI721_DMACH_MAINT);
/*
* Initialize DMA channel for maintenance requests
*/
+ priv->mdma.ch_id = TSI721_DMACH_MAINT;
+ regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
+
/* Allocate space for DMA descriptors */
bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
if (!bd_ptr)
return -ENOMEM;
- priv->bdma[chnum].bd_phys = bd_phys;
- priv->bdma[chnum].bd_base = bd_ptr;
+ priv->mdma.bd_num = bd_num;
+ priv->mdma.bd_phys = bd_phys;
+ priv->mdma.bd_base = bd_ptr;
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
dma_free_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
bd_ptr, bd_phys);
- priv->bdma[chnum].bd_base = NULL;
+ priv->mdma.bd_base = NULL;
return -ENOMEM;
}
- priv->bdma[chnum].sts_phys = sts_phys;
- priv->bdma[chnum].sts_base = sts_ptr;
- priv->bdma[chnum].sts_size = sts_size;
+ priv->mdma.sts_phys = sts_phys;
+ priv->mdma.sts_base = sts_ptr;
+ priv->mdma.sts_size = sts_size;
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
/* Setup DMA descriptor pointers */
- iowrite32(((u64)bd_phys >> 32),
- priv->regs + TSI721_DMAC_DPTRH(chnum));
+ iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
- priv->regs + TSI721_DMAC_DPTRL(chnum));
+ regs + TSI721_DMAC_DPTRL);
/* Setup descriptor status FIFO */
- iowrite32(((u64)sts_phys >> 32),
- priv->regs + TSI721_DMAC_DSBH(chnum));
+ iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
- priv->regs + TSI721_DMAC_DSBL(chnum));
+ regs + TSI721_DMAC_DSBL);
iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
- priv->regs + TSI721_DMAC_DSSZ(chnum));
+ regs + TSI721_DMAC_DSSZ);
/* Clear interrupt bits */
- iowrite32(TSI721_DMAC_INT_ALL,
- priv->regs + TSI721_DMAC_INT(chnum));
+ iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
- ioread32(priv->regs + TSI721_DMAC_INT(chnum));
+ ioread32(regs + TSI721_DMAC_INT);
/* Toggle DMA channel initialization */
- iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum));
- ioread32(priv->regs + TSI721_DMAC_CTL(chnum));
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
+ ioread32(regs + TSI721_DMAC_CTL);
udelay(10);
return 0;
}
-static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum)
+static int tsi721_bdma_maint_free(struct tsi721_device *priv)
{
u32 ch_stat;
+ struct tsi721_bdma_maint *mdma = &priv->mdma;
+ void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
- if (priv->bdma[chnum].bd_base == NULL)
+ if (mdma->bd_base == NULL)
return 0;
/* Check if DMA channel still running */
- ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum));
+ ch_stat = ioread32(regs + TSI721_DMAC_STS);
if (ch_stat & TSI721_DMAC_STS_RUN)
return -EFAULT;
/* Put DMA channel into init state */
- iowrite32(TSI721_DMAC_CTL_INIT,
- priv->regs + TSI721_DMAC_CTL(chnum));
+ iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
/* Free space allocated for DMA descriptors */
dma_free_coherent(&priv->pdev->dev,
- priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc),
- priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys);
- priv->bdma[chnum].bd_base = NULL;
+ mdma->bd_num * sizeof(struct tsi721_dma_desc),
+ mdma->bd_base, mdma->bd_phys);
+ mdma->bd_base = NULL;
/* Free space allocated for status FIFO */
dma_free_coherent(&priv->pdev->dev,
- priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts),
- priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys);
- priv->bdma[chnum].sts_base = NULL;
- return 0;
-}
-
-static int tsi721_bdma_init(struct tsi721_device *priv)
-{
- /* Initialize BDMA channel allocated for RapidIO maintenance read/write
- * request generation
- */
- priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
- if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
- dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
- " channel %d, aborting\n", TSI721_DMACH_MAINT);
- return -ENOMEM;
- }
-
+ mdma->sts_size * sizeof(struct tsi721_dma_sts),
+ mdma->sts_base, mdma->sts_phys);
+ mdma->sts_base = NULL;
return 0;
}
-static void tsi721_bdma_free(struct tsi721_device *priv)
-{
- tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
-}
-
/* Enable Inbound Messaging Interrupts */
static void
tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
/* Disable all BDMA Channel interrupts */
for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
- iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch));
+ iowrite32(0,
+ priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
/* Disable all general BDMA interrupts */
iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
mport->phy_type = RIO_PHY_SERIAL;
mport->priv = (void *)priv;
mport->phys_efptr = 0x100;
+ priv->mport = mport;
INIT_LIST_HEAD(&mport->dbells);
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
if (!err) {
tsi721_interrupts_init(priv);
ops->pwenable = tsi721_pw_enable;
- } else
+ } else {
dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
"vector %02X err=0x%x\n", pdev->irq, err);
+ goto err_exit;
+ }
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ tsi721_register_dma(priv);
+#endif
/* Enable SRIO link */
iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
TSI721_DEVCTL_SRBOOT_CMPL,
priv->regs + TSI721_DEVCTL);
rio_register_mport(mport);
- priv->mport = mport;
if (mport->host_deviceid >= 0)
iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
return 0;
+
+err_exit:
+ kfree(mport);
+ kfree(ops);
+ return err;
}
static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
tsi721_init_pc2sr_mapping(priv);
tsi721_init_sr2pc_mapping(priv);
- if (tsi721_bdma_init(priv)) {
+ if (tsi721_bdma_maint_init(priv)) {
dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
err = -ENOMEM;
goto err_unmap_bars;
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
err_free_consistent:
tsi721_doorbell_free(priv);
err_free_bdma:
- tsi721_bdma_free(priv);
+ tsi721_bdma_maint_free(priv);
err_unmap_bars:
if (priv->regs)
iounmap(priv->regs);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 1c226b3..59de9d7 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -167,6 +167,8 @@
#define TSI721_DEV_INTE 0x29840
#define TSI721_DEV_INT 0x29844
#define TSI721_DEV_INTSET 0x29848
+#define TSI721_DEV_INT_BDMA_CH 0x00002000
+#define TSI721_DEV_INT_BDMA_NCH 0x00001000
#define TSI721_DEV_INT_SMSG_CH 0x00000800
#define TSI721_DEV_INT_SMSG_NCH 0x00000400
#define TSI721_DEV_INT_SR2PC_CH 0x00000200
@@ -181,6 +183,8 @@
#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x)))
#define TSI721_INT_OMSG_CHAN_M 0x0000ff00
#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x)))
+#define TSI721_INT_BDMA_CHAN_M 0x000000ff
+#define TSI721_INT_BDMA_CHAN(x) (1 << (x))
/*
* PC2SR block registers
@@ -235,14 +239,16 @@
* x = 0..7
*/
-#define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000)
+#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000)
-#define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000)
+#define TSI721_DMAC_DWRCNT 0x000
+#define TSI721_DMAC_DRDCNT 0x004
+
+#define TSI721_DMAC_CTL 0x008
#define TSI721_DMAC_CTL_SUSP 0x00000002
#define TSI721_DMAC_CTL_INIT 0x00000001
-#define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000)
+#define TSI721_DMAC_INT 0x00c
#define TSI721_DMAC_INT_STFULL 0x00000010
#define TSI721_DMAC_INT_DONE 0x00000008
#define TSI721_DMAC_INT_SUSP 0x00000004
@@ -250,34 +256,33 @@
#define TSI721_DMAC_INT_IOFDONE 0x00000001
#define TSI721_DMAC_INT_ALL 0x0000001f
-#define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000)
+#define TSI721_DMAC_INTSET 0x010
-#define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000)
+#define TSI721_DMAC_STS 0x014
#define TSI721_DMAC_STS_ABORT 0x00400000
#define TSI721_DMAC_STS_RUN 0x00200000
#define TSI721_DMAC_STS_CS 0x001f0000
-#define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000)
+#define TSI721_DMAC_INTE 0x018
-#define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRL 0x024
#define TSI721_DMAC_DPTRL_MASK 0xffffffe0
-#define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000)
+#define TSI721_DMAC_DPTRH 0x028
-#define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000)
+#define TSI721_DMAC_DSBL 0x02c
#define TSI721_DMAC_DSBL_MASK 0xffffffc0
-#define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000)
+#define TSI721_DMAC_DSBH 0x030
-#define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000)
+#define TSI721_DMAC_DSSZ 0x034
#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f
#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4)
-
-#define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000)
+#define TSI721_DMAC_DSRP 0x038
#define TSI721_DMAC_DSRP_MASK 0x0007ffff
-#define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000)
+#define TSI721_DMAC_DSWP 0x03c
#define TSI721_DMAC_DSWP_MASK 0x0007ffff
#define TSI721_BDMA_INTE 0x5f000
@@ -612,6 +617,8 @@ enum dma_rtype {
#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
+#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
+
#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0)
enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag {
/* Structures */
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+struct tsi721_tx_desc {
+ struct dma_async_tx_descriptor txd;
+ struct tsi721_dma_desc *hw_desc;
+ u16 destid;
+ /* low 64-bits of 66-bit RIO address */
+ u64 rio_addr;
+ /* upper 2-bits of 66-bit RIO address */
+ u8 rio_addr_u;
+ bool interrupt;
+ struct list_head desc_node;
+ struct list_head tx_list;
+};
+
struct tsi721_bdma_chan {
+ int id;
+ void __iomem *regs;
+ int bd_num; /* number of buffer descriptors */
+ void *bd_base; /* start of DMA descriptors */
+ dma_addr_t bd_phys;
+ void *sts_base; /* start of DMA BD status FIFO */
+ dma_addr_t sts_phys;
+ int sts_size;
+ u32 sts_rdptr;
+ u32 wr_count;
+ u32 wr_count_next;
+
+ struct dma_chan dchan;
+ struct tsi721_tx_desc *tx_desc;
+ spinlock_t lock;
+ struct list_head active_list;
+ struct list_head queue;
+ struct list_head free_list;
+ dma_cookie_t completed_cookie;
+ struct tasklet_struct tasklet;
+};
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
+struct tsi721_bdma_maint {
+ int ch_id; /* BDMA channel number */
int bd_num; /* number of buffer descriptors */
void *bd_base; /* start of DMA descriptors */
dma_addr_t bd_phys;
@@ -721,6 +769,24 @@ enum tsi721_msix_vect {
TSI721_VECT_IMB1_INT,
TSI721_VECT_IMB2_INT,
TSI721_VECT_IMB3_INT,
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+ TSI721_VECT_DMA0_DONE,
+ TSI721_VECT_DMA1_DONE,
+ TSI721_VECT_DMA2_DONE,
+ TSI721_VECT_DMA3_DONE,
+ TSI721_VECT_DMA4_DONE,
+ TSI721_VECT_DMA5_DONE,
+ TSI721_VECT_DMA6_DONE,
+ TSI721_VECT_DMA7_DONE,
+ TSI721_VECT_DMA0_INT,
+ TSI721_VECT_DMA1_INT,
+ TSI721_VECT_DMA2_INT,
+ TSI721_VECT_DMA3_INT,
+ TSI721_VECT_DMA4_INT,
+ TSI721_VECT_DMA5_INT,
+ TSI721_VECT_DMA6_INT,
+ TSI721_VECT_DMA7_INT,
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
TSI721_VECT_MAX
};
@@ -754,7 +820,11 @@ struct tsi721_device {
u32 pw_discard_count;
/* BDMA Engine */
+ struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
+
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
+#endif
/* Inbound Messaging */
int imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@ struct tsi721_device {
struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
};
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
+extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
+#endif
+
#endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644
index 0000000..92e06a5
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -0,0 +1,823 @@
+/*
+ * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
+ *
+ * Copyright 2011 Integrated Device Technology, Inc.
+ * Alexandre Bounine <alexandre.bounine@idt.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rio.h>
+#include <linux/rio_drv.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kfifo.h>
+#include <linux/delay.h>
+
+#include "tsi721.h"
+
+static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct tsi721_bdma_chan, dchan);
+}
+
+static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
+{
+ return container_of(ddev, struct rio_mport, dma)->priv;
+}
+
+static inline
+struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
+{
+ return container_of(txd, struct tsi721_tx_desc, txd);
+}
+
+static inline
+struct tsi721_tx_desc *tsi721_dma_first_active(
+ struct tsi721_bdma_chan *bdma_chan)
+{
+ return list_first_entry(&bdma_chan->active_list,
+ struct tsi721_tx_desc, desc_node);
+}
+
+static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_dma_desc *bd_ptr;
+ struct device *dev = bdma_chan->dchan.device->dev;
+ u64 *sts_ptr;
+ dma_addr_t bd_phys;
+ dma_addr_t sts_phys;
+ int sts_size;
+ int bd_num = bdma_chan->bd_num;
+
+ dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
+
+ /* Allocate space for DMA descriptors */
+ bd_ptr = dma_zalloc_coherent(dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ &bd_phys, GFP_KERNEL);
+ if (!bd_ptr)
+ return -ENOMEM;
+
+ bdma_chan->bd_phys = bd_phys;
+ bdma_chan->bd_base = bd_ptr;
+
+ dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
+ bd_ptr, (unsigned long long)bd_phys);
+
+ /* Allocate space for descriptor status FIFO */
+ sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
+ bd_num : TSI721_DMA_MINSTSSZ;
+ sts_size = roundup_pow_of_two(sts_size);
+ sts_ptr = dma_zalloc_coherent(dev,
+ sts_size * sizeof(struct tsi721_dma_sts),
+ &sts_phys, GFP_KERNEL);
+ if (!sts_ptr) {
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(dev,
+ bd_num * sizeof(struct tsi721_dma_desc),
+ bd_ptr, bd_phys);
+ bdma_chan->bd_base = NULL;
+ return -ENOMEM;
+ }
+
+ bdma_chan->sts_phys = sts_phys;
+ bdma_chan->sts_base = sts_ptr;
+ bdma_chan->sts_size = sts_size;
+
+ dev_dbg(dev,
+ "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
+ sts_ptr, (unsigned long long)sts_phys, sts_size);
+
+ /* Initialize DMA descriptors ring */
+ bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
+ bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
+ TSI721_DMAC_DPTRL_MASK);
+ bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
+
+ /* Setup DMA descriptor pointers */
+ iowrite32(((u64)bd_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DPTRH);
+ iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DPTRL);
+
+ /* Setup descriptor status FIFO */
+ iowrite32(((u64)sts_phys >> 32),
+ bdma_chan->regs + TSI721_DMAC_DSBH);
+ iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
+ bdma_chan->regs + TSI721_DMAC_DSBL);
+ iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
+ bdma_chan->regs + TSI721_DMAC_DSSZ);
+
+ /* Clear interrupt bits */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+
+ /* Toggle DMA channel initialization */
+ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+ ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
+ bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
+ bdma_chan->sts_rdptr = 0;
+ udelay(10);
+
+ return 0;
+}
+
+static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 ch_stat;
+
+ if (bdma_chan->bd_base == NULL)
+ return 0;
+
+ /* Check if DMA channel still running */
+ ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ if (ch_stat & TSI721_DMAC_STS_RUN)
+ return -EFAULT;
+
+ /* Put DMA channel into init state */
+ iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
+
+ /* Free space allocated for DMA descriptors */
+ dma_free_coherent(bdma_chan->dchan.device->dev,
+ bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
+ bdma_chan->bd_base, bdma_chan->bd_phys);
+ bdma_chan->bd_base = NULL;
+
+ /* Free space allocated for status FIFO */
+ dma_free_coherent(bdma_chan->dchan.device->dev,
+ bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
+ bdma_chan->sts_base, bdma_chan->sts_phys);
+ bdma_chan->sts_base = NULL;
+ return 0;
+}
+
+static void
+tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
+{
+ if (enable) {
+ /* Clear pending BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+ ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ /* Enable BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INTE);
+ } else {
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+ /* Clear pending BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL,
+ bdma_chan->regs + TSI721_DMAC_INT);
+ }
+
+}
+
+static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 sts;
+
+ sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ return ((sts & TSI721_DMAC_STS_RUN) == 0);
+}
+
+void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
+{
+ /* Disable BDMA channel interrupts */
+ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
+
+ tasklet_schedule(&bdma_chan->tasklet);
+}
+
+#ifdef CONFIG_PCI_MSI
+/**
+ * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
+ * @irq: Linux interrupt number
+ * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
+ *
+ * Handles BDMA channel interrupts signaled using MSI-X.
+ */
+static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
+{
+ struct tsi721_bdma_chan *bdma_chan = ptr;
+
+ tsi721_bdma_handler(bdma_chan);
+ return IRQ_HANDLED;
+}
+#endif /* CONFIG_PCI_MSI */
+
+/* Must be called with the spinlock held */
+static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (!tsi721_dma_is_idle(bdma_chan)) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to start non-idle channel\n");
+ return;
+ }
+
+ if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "BUG: Attempt to start DMA with no BDs ready\n");
+ return;
+ }
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "tx_chan: %p, chan: %d, regs: %p\n",
+ bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
+
+ iowrite32(bdma_chan->wr_count_next,
+ bdma_chan->regs + TSI721_DMAC_DWRCNT);
+ ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
+
+ bdma_chan->wr_count = bdma_chan->wr_count_next;
+}
+
+static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
+{
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "Put desc: %p into free list\n", desc);
+
+ if (desc) {
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+ list_add(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->wr_count_next = bdma_chan->wr_count;
+ spin_unlock_bh(&bdma_chan->lock);
+ }
+}
+
+static
+struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_tx_desc *tx_desc, *_tx_desc;
+ struct tsi721_tx_desc *ret = NULL;
+ int i;
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_for_each_entry_safe(tx_desc, _tx_desc,
+ &bdma_chan->free_list, desc_node) {
+ if (async_tx_test_ack(&tx_desc->txd)) {
+ list_del(&tx_desc->desc_node);
+ ret = tx_desc;
+ break;
+ }
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "desc %p not ACKed\n", tx_desc);
+ }
+
+ i = bdma_chan->wr_count_next % bdma_chan->bd_num;
+ if (i == bdma_chan->bd_num - 1) {
+ i = 0;
+ bdma_chan->wr_count_next++; /* skip link descriptor */
+ }
+
+ bdma_chan->wr_count_next++;
+ tx_desc->txd.phys = bdma_chan->bd_phys +
+ i * sizeof(struct tsi721_dma_desc);
+ tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
+
+ spin_unlock_bh(&bdma_chan->lock);
+
+ return ret;
+}
+
+static int
+tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc, struct scatterlist *sg,
+ enum dma_rtype rtype, u32 sys_size)
+{
+ struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
+ u64 rio_addr;
+
+ if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "SG element is too large\n");
+ return -EINVAL;
+ }
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
+ (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
+ sg_dma_len(sg));
+
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "bd_ptr = %p did=%d raddr=0x%llx\n",
+ bd_ptr, desc->destid, desc->rio_addr);
+
+ /* Initialize DMA descriptor */
+ bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
+ (rtype << 19) | desc->destid);
+ if (desc->interrupt)
+ bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
+ bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
+ (sys_size << 26) | sg_dma_len(sg));
+ rio_addr = (desc->rio_addr >> 2) |
+ ((u64)(desc->rio_addr_u & 0x3) << 62);
+ bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
+ bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
+ bd_ptr->t1.bufptr_lo = cpu_to_le32(
+ (u64)sg_dma_address(sg) & 0xffffffff);
+ bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
+ bd_ptr->t1.s_dist = 0;
+ bd_ptr->t1.s_size = 0;
+
+ return 0;
+}
+
+static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
+ struct tsi721_tx_desc *desc)
+{
+ struct dma_async_tx_descriptor *txd = &desc->txd;
+ dma_async_tx_callback callback = txd->callback;
+ void *param = txd->callback_param;
+
+ list_splice_init(&desc->tx_list, &bdma_chan->free_list);
+ list_move(&desc->desc_node, &bdma_chan->free_list);
+ bdma_chan->completed_cookie = txd->cookie;
+
+ if (callback)
+ callback(param);
+}
+
+static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
+{
+ struct tsi721_tx_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ BUG_ON(!tsi721_dma_is_idle(bdma_chan));
+
+ if (!list_empty(&bdma_chan->queue))
+ tsi721_start_dma(bdma_chan);
+
+ list_splice_init(&bdma_chan->active_list, &list);
+ list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ tsi721_dma_chain_complete(bdma_chan, desc);
+}
+
+static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
+{
+ u32 srd_ptr;
+ u64 *sts_ptr;
+ int i, j;
+
+ /* Check and clear descriptor status FIFO entries */
+ srd_ptr = bdma_chan->sts_rdptr;
+ sts_ptr = bdma_chan->sts_base;
+ j = srd_ptr * 8;
+ while (sts_ptr[j]) {
+ for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
+ sts_ptr[j] = 0;
+
+ ++srd_ptr;
+ srd_ptr %= bdma_chan->sts_size;
+ j = srd_ptr * 8;
+ }
+
+ iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
+ bdma_chan->sts_rdptr = srd_ptr;
+}
+
+static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
+{
+ if (list_empty(&bdma_chan->active_list) ||
+ list_is_singular(&bdma_chan->active_list)) {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: Active_list empty\n", __func__);
+ tsi721_dma_complete_all(bdma_chan);
+ } else {
+ dev_dbg(bdma_chan->dchan.device->dev,
+ "%s: Active_list NOT empty\n", __func__);
+ tsi721_dma_chain_complete(bdma_chan,
+ tsi721_dma_first_active(bdma_chan));
+ tsi721_start_dma(bdma_chan);
+ }
+}
+
+static void tsi721_dma_tasklet(unsigned long data)
+{
+ struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
+ u32 dmac_int, dmac_sts;
+
+ dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
+ dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
+ __func__, bdma_chan->id, dmac_int);
+ /* Clear channel interrupts */
+ iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
+
+ if (dmac_int & TSI721_DMAC_INT_ERR) {
+ dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
+ __func__, bdma_chan->id, dmac_sts);
+ }
+
+ if (dmac_int & TSI721_DMAC_INT_STFULL) {
+ dev_err(bdma_chan->dchan.device->dev,
+ "%s: DMAC%d descriptor status FIFO is full\n",
+ __func__, bdma_chan->id);
+ }
+
+ if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
+ tsi721_clr_stat(bdma_chan);
+ spin_lock(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan);
+ spin_unlock(&bdma_chan->lock);
+ }
+
+ /* Re-Enable BDMA channel interrupts */
+ iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
+}
+
+static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
+ dma_cookie_t cookie;
+
+ spin_lock_bh(&bdma_chan->lock);
+
+ cookie = txd->chan->cookie;
+ if (++cookie < 0)
+ cookie = 1;
+ txd->chan->cookie = cookie;
+ txd->cookie = cookie;
+
+ if (list_empty(&bdma_chan->active_list)) {
+ list_add_tail(&desc->desc_node, &bdma_chan->active_list);
+ tsi721_start_dma(bdma_chan);
+ } else {
+ list_add_tail(&desc->desc_node, &bdma_chan->queue);
+ }
+
+ spin_unlock_bh(&bdma_chan->lock);
+ return cookie;
+}
+
+static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+ struct tsi721_tx_desc *desc = NULL;
+ LIST_HEAD(tmp_list);
+ int i;
+ int rc;
+
+ if (bdma_chan->bd_base)
+ return bdma_chan->bd_num - 1;
+
+ /* Initialize BDMA channel */
+ if (tsi721_bdma_ch_init(bdma_chan)) {
+ dev_err(dchan->device->dev, "Unable to initialize data DMA"
+ " channel %d, aborting\n", bdma_chan->id);
+ return -ENOMEM;
+ }
+
+ /* Alocate matching number of logical descriptors */
+ desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
+ GFP_KERNEL);
+ if (!desc) {
+ dev_err(dchan->device->dev,
+ "Failed to allocate logical descriptors\n");
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ bdma_chan->tx_desc = desc;
+
+ for (i = 0; i < bdma_chan->bd_num - 1; i++) {
+ dma_async_tx_descriptor_init(&desc[i].txd, dchan);
+ desc[i].txd.tx_submit = tsi721_tx_submit;
+ desc[i].txd.flags = DMA_CTRL_ACK;
+ INIT_LIST_HEAD(&desc[i].tx_list);
+ list_add_tail(&desc[i].desc_node, &tmp_list);
+ }
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice(&tmp_list, &bdma_chan->free_list);
+ bdma_chan->completed_cookie = dchan->cookie = 1;
+ spin_unlock_bh(&bdma_chan->lock);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ /* Request interrupt service if we are in MSI-X mode */
+ rc = request_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ tsi721_bdma_msix, 0,
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].irq_name,
+ (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dchan->device->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "BDMA%d-DONE\n", bdma_chan->id);
+ goto err_out;
+ }
+
+ rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector,
+ tsi721_bdma_msix, 0,
+ priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].irq_name,
+ (void *)bdma_chan);
+
+ if (rc) {
+ dev_dbg(dchan->device->dev,
+ "Unable to allocate MSI-X interrupt for "
+ "BDMA%d-INT\n", bdma_chan->id);
+ free_irq(
+ priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector,
+ (void *)bdma_chan);
+ rc = -EIO;
+ goto err_out;
+ }
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tasklet_enable(&bdma_chan->tasklet);
+ tsi721_bdma_interrupt_enable(bdma_chan, 1);
+
+ return bdma_chan->bd_num - 1;
+
+err_out:
+ kfree(desc);
+ tsi721_bdma_ch_free(bdma_chan);
+ return rc;
+}
+
+static void tsi721_free_chan_resources(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+#ifdef CONFIG_PCI_MSI
+ struct tsi721_device *priv = to_tsi721(dchan->device);
+#endif
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (bdma_chan->bd_base == NULL)
+ return;
+
+ BUG_ON(!list_empty(&bdma_chan->active_list));
+ BUG_ON(!list_empty(&bdma_chan->queue));
+
+ tasklet_disable(&bdma_chan->tasklet);
+
+ spin_lock_bh(&bdma_chan->lock);
+ list_splice_init(&bdma_chan->free_list, &list);
+ spin_unlock_bh(&bdma_chan->lock);
+
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ free_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector, (void *)bdma_chan);
+ }
+#endif /* CONFIG_PCI_MSI */
+
+ tsi721_bdma_ch_free(bdma_chan);
+ kfree(bdma_chan->tx_desc);
+}
+
+static
+enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_completed;
+ int ret;
+
+ spin_lock_bh(&bdma_chan->lock);
+ last_completed = bdma_chan->completed_cookie;
+ last_used = dchan->cookie;
+ spin_unlock_bh(&bdma_chan->lock);
+
+ ret = dma_async_is_complete(cookie, last_completed, last_used);
+
+ dma_set_tx_state(txstate, last_completed, last_used, 0);
+
+ dev_dbg(dchan->device->dev,
+ "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
+ __func__, ret, last_completed, last_used);
+
+ return ret;
+}
+
+static void tsi721_issue_pending(struct dma_chan *dchan)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (tsi721_dma_is_idle(bdma_chan)) {
+ spin_lock_bh(&bdma_chan->lock);
+ tsi721_advance_work(bdma_chan);
+ spin_unlock_bh(&bdma_chan->lock);
+ } else
+ dev_dbg(dchan->device->dev,
+ "%s: DMA channel still busy\n", __func__);
+}
+
+static
+struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction dir, unsigned long flags,
+ void *tinfo)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ struct tsi721_tx_desc *desc = NULL;
+ struct tsi721_tx_desc *first = NULL;
+ struct scatterlist *sg;
+ struct rio_dma_ext *rext = tinfo;
+ u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
+ unsigned int i;
+ u32 sys_size = dma_to_mport(dchan->device)->sys_size;
+ enum dma_rtype rtype;
+
+ if (!sgl || !sg_len) {
+ dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
+ return NULL;
+ }
+
+ if (dir == DMA_DEV_TO_MEM)
+ rtype = NREAD;
+ else if (dir == DMA_MEM_TO_DEV) {
+ switch (rext->wr_type) {
+ case RDW_ALL_NWRITE:
+ rtype = ALL_NWRITE;
+ break;
+ case RDW_ALL_NWRITE_R:
+ rtype = ALL_NWRITE_R;
+ break;
+ case RDW_LAST_NWRITE_R:
+ default:
+ rtype = LAST_NWRITE_R;
+ break;
+ }
+ } else {
+ dev_err(dchan->device->dev,
+ "%s: Unsupported DMA direction option\n", __func__);
+ return NULL;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ int err;
+
+ dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
+ desc = tsi721_desc_get(bdma_chan);
+ if (!desc) {
+ dev_err(dchan->device->dev,
+ "Not enough descriptors available\n");
+ goto err_desc_get;
+ }
+
+ if (sg_is_last(sg))
+ desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
+ else
+ desc->interrupt = false;
+
+ desc->destid = rext->destid;
+ desc->rio_addr = rio_addr;
+ desc->rio_addr_u = 0;
+
+ err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
+ if (err) {
+ dev_err(dchan->device->dev,
+ "Failed to build desc: %d\n", err);
+ goto err_desc_get;
+ }
+
+ rio_addr += sg_dma_len(sg);
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->desc_node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ desc->txd.flags = flags;
+
+ return &first->txd;
+
+err_desc_get:
+ tsi721_desc_put(bdma_chan, first);
+ return NULL;
+}
+
+static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
+ struct tsi721_tx_desc *desc, *_d;
+ LIST_HEAD(list);
+
+ dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
+
+ if (cmd != DMA_TERMINATE_ALL)
+ return -ENXIO;
+
+ spin_lock_bh(&bdma_chan->lock);
+
+ /* make sure to stop the transfer */
+ iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
+
+ list_splice_init(&bdma_chan->active_list, &list);
+ list_splice_init(&bdma_chan->queue, &list);
+
+ list_for_each_entry_safe(desc, _d, &list, desc_node)
+ tsi721_dma_chain_complete(bdma_chan, desc);
+
+ spin_unlock_bh(&bdma_chan->lock);
+
+ return 0;
+}
+
+int __devinit tsi721_register_dma(struct tsi721_device *priv)
+{
+ int i;
+ int nr_channels = TSI721_DMA_MAXCH;
+ int err;
+ struct rio_mport *mport = priv->mport;
+
+ mport->dma.dev = &priv->pdev->dev;
+ mport->dma.chancnt = nr_channels;
+
+ INIT_LIST_HEAD(&mport->dma.channels);
+
+ for (i = 0; i < nr_channels; i++) {
+ struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
+
+ if (i == TSI721_DMACH_MAINT)
+ continue;
+
+ bdma_chan->bd_num = 64;
+ bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
+
+ bdma_chan->dchan.device = &mport->dma;
+ bdma_chan->dchan.cookie = 1;
+ bdma_chan->dchan.chan_id = i;
+ bdma_chan->id = i;
+
+ spin_lock_init(&bdma_chan->lock);
+
+ INIT_LIST_HEAD(&bdma_chan->active_list);
+ INIT_LIST_HEAD(&bdma_chan->queue);
+ INIT_LIST_HEAD(&bdma_chan->free_list);
+
+ tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
+ (unsigned long)bdma_chan);
+ tasklet_disable(&bdma_chan->tasklet);
+ list_add_tail(&bdma_chan->dchan.device_node,
+ &mport->dma.channels);
+ }
+
+ dma_cap_zero(mport->dma.cap_mask);
+ dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
+
+ mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
+ mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
+ mport->dma.device_tx_status = tsi721_tx_status;
+ mport->dma.device_issue_pending = tsi721_issue_pending;
+ mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
+ mport->dma.device_control = tsi721_device_control;
+
+ err = dma_async_device_register(&mport->dma);
+ if (err)
+ dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
+
+ return err;
+}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 86c9a09..c40665a 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1121,6 +1121,87 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
return 0;
}
+#ifdef CONFIG_RAPIDIO_DMA_ENGINE
+
+static bool rio_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct rio_dev *rdev = arg;
+
+ /* Check that DMA device belongs to the right MPORT */
+ return (rdev->net->hport ==
+ container_of(chan->device, struct rio_mport, dma));
+}
+
+/**
+ * rio_request_dma - request RapidIO capable DMA channel that supports
+ * specified target RapidIO device.
+ * @rdev: RIO device control structure
+ *
+ * Returns pointer to allocated DMA channel or NULL if failed.
+ */
+struct dma_chan *rio_request_dma(struct rio_dev *rdev)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *dchan;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dchan = dma_request_channel(mask, rio_chan_filter, rdev);
+
+ return dchan;
+}
+EXPORT_SYMBOL_GPL(rio_request_dma);
+
+/**
+ * rio_release_dma - release specified DMA channel
+ * @dchan: DMA channel to release
+ */
+void rio_release_dma(struct dma_chan *dchan)
+{
+ dma_release_channel(dchan);
+}
+EXPORT_SYMBOL_GPL(rio_release_dma);
+
+/**
+ * rio_dma_prep_slave_sg - RapidIO specific wrapper
+ * for device_prep_slave_sg callback defined by DMAENGINE.
+ * @rdev: RIO device control structure
+ * @dchan: DMA channel to configure
+ * @data: RIO specific data descriptor
+ * @direction: DMA data transfer direction (TO or FROM the device)
+ * @flags: dmaengine defined flags
+ *
+ * Initializes RapidIO capable DMA channel for the specified data transfer.
+ * Uses DMA channel private extension to pass information related to remote
+ * target RIO device.
+ * Returns pointer to DMA transaction descriptor or NULL if failed.
+ */
+struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
+ struct dma_chan *dchan, struct rio_dma_data *data,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct dma_async_tx_descriptor *txd = NULL;
+ struct rio_dma_ext rio_ext;
+
+ if (dchan->device->device_prep_slave_sg == NULL) {
+ pr_err("%s: prep_rio_sg == NULL\n", __func__);
+ return NULL;
+ }
+
+ rio_ext.destid = rdev->destid;
+ rio_ext.rio_addr_u = data->rio_addr_u;
+ rio_ext.rio_addr = data->rio_addr;
+ rio_ext.wr_type = data->wr_type;
+
+ txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
+ direction, flags, &rio_ext);
+
+ return txd;
+}
+EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
+
+#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
+
static void rio_fixup_device(struct rio_dev *dev)
{
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c86b886..f34c3be 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -20,6 +20,7 @@ menuconfig REGULATOR
If unsure, say no.
+
if REGULATOR
config REGULATOR_DEBUG
@@ -88,6 +89,13 @@ config REGULATOR_AAT2870
If you have a AnalogicTech AAT2870 say Y to enable the
regulator driver.
+config REGULATOR_ARIZONA
+ tristate "Wolfson Arizona class devices"
+ depends on MFD_ARIZONA
+ help
+ Support for the regulators found on Wolfson Arizona class
+ devices.
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
@@ -195,6 +203,14 @@ config REGULATOR_MAX8998
via I2C bus. The provided regulator is suitable for S3C6410
and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX77686
+ tristate "Maxim 77686 regulator"
+ depends on MFD_MAX77686
+ help
+ This driver controls a Maxim 77686 regulator
+ via I2C bus. The provided regulator is suitable for
+ Exynos-4 chips to control VARM and VINT voltages.
+
config REGULATOR_PCAP
tristate "Motorola PCAP2 regulator driver"
depends on EZX_PCAP
@@ -216,6 +232,19 @@ config REGULATOR_LP3972
Say Y here to support the voltage regulators and convertors
on National Semiconductors LP3972 PMIC
+config REGULATOR_LP872X
+ bool "TI/National Semiconductor LP8720/LP8725 voltage regulators"
+ depends on I2C=y
+ select REGMAP_I2C
+ help
+ This driver supports LP8720/LP8725 PMIC
+
+config REGULATOR_LP8788
+ bool "TI LP8788 Power Regulators"
+ depends on MFD_LP8788
+ help
+ This driver supports LP8788 voltage regulator chip.
+
config REGULATOR_PCF50633
tristate "NXP PCF50633 regulator driver"
depends on MFD_PCF50633
@@ -233,6 +262,14 @@ config REGULATOR_RC5T583
through regulator interface. The device supports multiple DCDC/LDO
outputs which can be controlled by i2c communication.
+config REGULATOR_S2MPS11
+ tristate "Samsung S2MPS11 voltage regulator"
+ depends on MFD_SEC_CORE
+ help
+ This driver supports a Samsung S2MPS11 voltage output regulator
+ via I2C bus. S2MPS11 is comprised of high efficient Buck converters
+ including Dual-Phase Buck converter, Buck-Boost converter, various LDOs.
+
config REGULATOR_S5M8767
tristate "Samsung S5M8767A voltage regulator"
depends on MFD_S5M_CORE
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 977fd46..3342615 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
+obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
@@ -23,6 +24,9 @@ obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
+obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
+obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
+obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
@@ -30,6 +34,7 @@ obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
+obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
@@ -37,6 +42,7 @@ obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
+obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 06776ca..6f45bfd 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -33,11 +33,6 @@ struct aat2870_regulator {
struct aat2870_data *aat2870;
struct regulator_desc desc;
- const int *voltages; /* uV */
-
- int min_uV;
- int max_uV;
-
u8 enable_addr;
u8 enable_shift;
u8 enable_mask;
@@ -47,14 +42,6 @@ struct aat2870_regulator {
u8 voltage_mask;
};
-static int aat2870_ldo_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
-
- return ri->voltages[selector];
-}
-
static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
@@ -111,7 +98,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
}
static struct regulator_ops aat2870_ldo_ops = {
- .list_voltage = aat2870_ldo_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage_sel = aat2870_ldo_set_voltage_sel,
.get_voltage_sel = aat2870_ldo_get_voltage_sel,
.enable = aat2870_ldo_enable,
@@ -119,7 +106,7 @@ static struct regulator_ops aat2870_ldo_ops = {
.is_enabled = aat2870_ldo_is_enabled,
};
-static const int aat2870_ldo_voltages[] = {
+static const unsigned int aat2870_ldo_voltages[] = {
1200000, 1300000, 1500000, 1600000,
1800000, 2000000, 2200000, 2500000,
2600000, 2700000, 2800000, 2900000,
@@ -132,13 +119,11 @@ static const int aat2870_ldo_voltages[] = {
.name = #ids, \
.id = AAT2870_ID_##ids, \
.n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \
+ .volt_table = aat2870_ldo_voltages, \
.ops = &aat2870_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
- .voltages = aat2870_ldo_voltages, \
- .min_uV = 1200000, \
- .max_uV = 3300000, \
}
static struct aat2870_regulator aat2870_regulators[] = {
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 03f4d9c..182b553 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -43,20 +43,12 @@
* @dev: handle to the device
* @plfdata: AB3100 platform data passed in at probe time
* @regreg: regulator register number in the AB3100
- * @fixed_voltage: a fixed voltage for this regulator, if this
- * 0 the voltages array is used instead.
- * @typ_voltages: an array of available typical voltages for
- * this regulator
- * @voltages_len: length of the array of available voltages
*/
struct ab3100_regulator {
struct regulator_dev *rdev;
struct device *dev;
struct ab3100_platform_data *plfdata;
u8 regreg;
- int fixed_voltage;
- int const *typ_voltages;
- u8 voltages_len;
};
/* The order in which registers are initialized */
@@ -80,7 +72,7 @@ static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = {
#define LDO_C_VOLTAGE 2650000
#define LDO_D_VOLTAGE 2650000
-static const int ldo_e_buck_typ_voltages[] = {
+static const unsigned int ldo_e_buck_typ_voltages[] = {
1800000,
1400000,
1300000,
@@ -90,7 +82,7 @@ static const int ldo_e_buck_typ_voltages[] = {
900000,
};
-static const int ldo_f_typ_voltages[] = {
+static const unsigned int ldo_f_typ_voltages[] = {
1800000,
1400000,
1300000,
@@ -101,21 +93,21 @@ static const int ldo_f_typ_voltages[] = {
2650000,
};
-static const int ldo_g_typ_voltages[] = {
+static const unsigned int ldo_g_typ_voltages[] = {
2850000,
2750000,
1800000,
1500000,
};
-static const int ldo_h_typ_voltages[] = {
+static const unsigned int ldo_h_typ_voltages[] = {
2750000,
1800000,
1500000,
1200000,
};
-static const int ldo_k_typ_voltages[] = {
+static const unsigned int ldo_k_typ_voltages[] = {
2750000,
1800000,
};
@@ -126,40 +118,27 @@ static struct ab3100_regulator
ab3100_regulators[AB3100_NUM_REGULATORS] = {
{
.regreg = AB3100_LDO_A,
- .fixed_voltage = LDO_A_VOLTAGE,
},
{
.regreg = AB3100_LDO_C,
- .fixed_voltage = LDO_C_VOLTAGE,
},
{
.regreg = AB3100_LDO_D,
- .fixed_voltage = LDO_D_VOLTAGE,
},
{
.regreg = AB3100_LDO_E,
- .typ_voltages = ldo_e_buck_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_e_buck_typ_voltages),
},
{
.regreg = AB3100_LDO_F,
- .typ_voltages = ldo_f_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_f_typ_voltages),
},
{
.regreg = AB3100_LDO_G,
- .typ_voltages = ldo_g_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_g_typ_voltages),
},
{
.regreg = AB3100_LDO_H,
- .typ_voltages = ldo_h_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_h_typ_voltages),
},
{
.regreg = AB3100_LDO_K,
- .typ_voltages = ldo_k_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_k_typ_voltages),
},
{
.regreg = AB3100_LDO_EXT,
@@ -167,8 +146,6 @@ ab3100_regulators[AB3100_NUM_REGULATORS] = {
},
{
.regreg = AB3100_BUCK,
- .typ_voltages = ldo_e_buck_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_e_buck_typ_voltages),
},
};
@@ -178,7 +155,7 @@ ab3100_regulators[AB3100_NUM_REGULATORS] = {
*/
static int ab3100_enable_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
int err;
u8 regval;
@@ -209,7 +186,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
static int ab3100_disable_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
int err;
u8 regval;
@@ -242,7 +219,7 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
static int ab3100_is_enabled_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
u8 regval;
int err;
@@ -257,26 +234,12 @@ static int ab3100_is_enabled_regulator(struct regulator_dev *reg)
return regval & AB3100_REG_ON_MASK;
}
-static int ab3100_list_voltage_regulator(struct regulator_dev *reg,
- unsigned selector)
-{
- struct ab3100_regulator *abreg = reg->reg_data;
-
- if (selector >= abreg->voltages_len)
- return -EINVAL;
- return abreg->typ_voltages[selector];
-}
-
static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
u8 regval;
int err;
- /* Return the voltage for fixed regulators immediately */
- if (abreg->fixed_voltage)
- return abreg->fixed_voltage;
-
/*
* For variable types, read out setting and index into
* supplied voltage list.
@@ -294,20 +257,20 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
regval &= 0xE0;
regval >>= 5;
- if (regval >= abreg->voltages_len) {
+ if (regval >= reg->desc->n_voltages) {
dev_err(&reg->dev,
"regulator register %02x contains an illegal voltage setting\n",
abreg->regreg);
return -EINVAL;
}
- return abreg->typ_voltages[regval];
+ return reg->desc->volt_table[regval];
}
static int ab3100_set_voltage_regulator_sel(struct regulator_dev *reg,
unsigned selector)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
u8 regval;
int err;
@@ -336,7 +299,7 @@ static int ab3100_set_voltage_regulator_sel(struct regulator_dev *reg,
static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
int uV)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
u8 regval;
int err;
int bestindex;
@@ -379,42 +342,22 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
*/
static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
return abreg->plfdata->external_voltage;
}
-static int ab3100_enable_time_regulator(struct regulator_dev *reg)
+static int ab3100_get_fixed_voltage_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
-
- /* Per-regulator power on delay from spec */
- switch (abreg->regreg) {
- case AB3100_LDO_A: /* Fallthrough */
- case AB3100_LDO_C: /* Fallthrough */
- case AB3100_LDO_D: /* Fallthrough */
- case AB3100_LDO_E: /* Fallthrough */
- case AB3100_LDO_H: /* Fallthrough */
- case AB3100_LDO_K:
- return 200;
- case AB3100_LDO_F:
- return 600;
- case AB3100_LDO_G:
- return 400;
- case AB3100_BUCK:
- return 1000;
- default:
- break;
- }
- return 0;
+ return reg->desc->min_uV;
}
static struct regulator_ops regulator_ops_fixed = {
+ .list_voltage = regulator_list_voltage_linear,
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
- .get_voltage = ab3100_get_voltage_regulator,
- .enable_time = ab3100_enable_time_regulator,
+ .get_voltage = ab3100_get_fixed_voltage_regulator,
};
static struct regulator_ops regulator_ops_variable = {
@@ -423,8 +366,7 @@ static struct regulator_ops regulator_ops_variable = {
.is_enabled = ab3100_is_enabled_regulator,
.get_voltage = ab3100_get_voltage_regulator,
.set_voltage_sel = ab3100_set_voltage_regulator_sel,
- .list_voltage = ab3100_list_voltage_regulator,
- .enable_time = ab3100_enable_time_regulator,
+ .list_voltage = regulator_list_voltage_table,
};
static struct regulator_ops regulator_ops_variable_sleepable = {
@@ -434,8 +376,7 @@ static struct regulator_ops regulator_ops_variable_sleepable = {
.get_voltage = ab3100_get_voltage_regulator,
.set_voltage_sel = ab3100_set_voltage_regulator_sel,
.set_suspend_voltage = ab3100_set_suspend_voltage_regulator,
- .list_voltage = ab3100_list_voltage_regulator,
- .enable_time = ab3100_enable_time_regulator,
+ .list_voltage = regulator_list_voltage_table,
};
/*
@@ -457,62 +398,81 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.name = "LDO_A",
.id = AB3100_LDO_A,
.ops = &regulator_ops_fixed,
+ .n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = LDO_A_VOLTAGE,
+ .enable_time = 200,
},
{
.name = "LDO_C",
.id = AB3100_LDO_C,
.ops = &regulator_ops_fixed,
+ .n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = LDO_C_VOLTAGE,
+ .enable_time = 200,
},
{
.name = "LDO_D",
.id = AB3100_LDO_D,
.ops = &regulator_ops_fixed,
+ .n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = LDO_D_VOLTAGE,
+ .enable_time = 200,
},
{
.name = "LDO_E",
.id = AB3100_LDO_E,
.ops = &regulator_ops_variable_sleepable,
.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
+ .volt_table = ldo_e_buck_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 200,
},
{
.name = "LDO_F",
.id = AB3100_LDO_F,
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_f_typ_voltages),
+ .volt_table = ldo_f_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 600,
},
{
.name = "LDO_G",
.id = AB3100_LDO_G,
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_g_typ_voltages),
+ .volt_table = ldo_g_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 400,
},
{
.name = "LDO_H",
.id = AB3100_LDO_H,
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_h_typ_voltages),
+ .volt_table = ldo_h_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 200,
},
{
.name = "LDO_K",
.id = AB3100_LDO_K,
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_k_typ_voltages),
+ .volt_table = ldo_k_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 200,
},
{
.name = "LDO_EXT",
@@ -528,6 +488,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 1000,
},
};
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index e1b8c54..13d424f 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -30,9 +30,6 @@
* @dev: device pointer
* @desc: regulator description
* @regulator_dev: regulator device
- * @max_uV: maximum voltage (for variable voltage supplies)
- * @min_uV: minimum voltage (for variable voltage supplies)
- * @fixed_uV: typical voltage (for fixed voltage supplies)
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
* @update_mask: mask to enable/disable regulator
@@ -40,17 +37,12 @@
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
- * @voltages: supported voltage table
- * @voltages_len: number of supported voltages for the regulator
* @delay: startup/set voltage delay in us
*/
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *regulator;
- int max_uV;
- int min_uV;
- int fixed_uV;
u8 update_bank;
u8 update_reg;
u8 update_mask;
@@ -58,13 +50,11 @@ struct ab8500_regulator_info {
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
- int const *voltages;
- int voltages_len;
unsigned int delay;
};
/* voltage tables for the vauxn/vintcore supplies */
-static const int ldo_vauxn_voltages[] = {
+static const unsigned int ldo_vauxn_voltages[] = {
1100000,
1200000,
1300000,
@@ -83,7 +73,7 @@ static const int ldo_vauxn_voltages[] = {
3300000,
};
-static const int ldo_vaux3_voltages[] = {
+static const unsigned int ldo_vaux3_voltages[] = {
1200000,
1500000,
1800000,
@@ -94,7 +84,7 @@ static const int ldo_vaux3_voltages[] = {
2910000,
};
-static const int ldo_vintcore_voltages[] = {
+static const unsigned int ldo_vintcore_voltages[] = {
1200000,
1225000,
1250000,
@@ -185,25 +175,6 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
return false;
}
-static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-
- if (info == NULL) {
- dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
- return -EINVAL;
- }
-
- /* return the uV for the fixed regulators */
- if (info->fixed_uV)
- return info->fixed_uV;
-
- if (selector >= info->voltages_len)
- return -EINVAL;
-
- return info->voltages[selector];
-}
-
static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
int ret, val;
@@ -279,14 +250,7 @@ static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_sel)
{
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- int ret;
- /* If the regulator isn't on, it won't take time here */
- ret = ab8500_regulator_is_enabled(rdev);
- if (ret < 0)
- return ret;
- if (!ret)
- return 0;
return info->delay;
}
@@ -296,21 +260,14 @@ static struct regulator_ops ab8500_regulator_ops = {
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage_sel = ab8500_regulator_get_voltage_sel,
.set_voltage_sel = ab8500_regulator_set_voltage_sel,
- .list_voltage = ab8500_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.enable_time = ab8500_regulator_enable_time,
.set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
{
- struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-
- if (info == NULL) {
- dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
- return -EINVAL;
- }
-
- return info->fixed_uV;
+ return rdev->desc->min_uV;
}
static struct regulator_ops ab8500_regulator_fixed_ops = {
@@ -318,9 +275,8 @@ static struct regulator_ops ab8500_regulator_fixed_ops = {
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage = ab8500_fixed_get_voltage,
- .list_voltage = ab8500_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
.enable_time = ab8500_regulator_enable_time,
- .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
static struct ab8500_regulator_info
@@ -329,7 +285,7 @@ static struct ab8500_regulator_info
* Variable Voltage Regulators
* name, min mV, max mV,
* update bank, reg, mask, enable val
- * volt bank, reg, mask, table, table length
+ * volt bank, reg, mask
*/
[AB8500_LDO_AUX1] = {
.desc = {
@@ -339,9 +295,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_AUX1,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
},
- .min_uV = 1100000,
- .max_uV = 3300000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x03,
@@ -349,8 +304,6 @@ static struct ab8500_regulator_info
.voltage_bank = 0x04,
.voltage_reg = 0x1f,
.voltage_mask = 0x0f,
- .voltages = ldo_vauxn_voltages,
- .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
},
[AB8500_LDO_AUX2] = {
.desc = {
@@ -360,9 +313,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_AUX2,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
},
- .min_uV = 1100000,
- .max_uV = 3300000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x0c,
@@ -370,8 +322,6 @@ static struct ab8500_regulator_info
.voltage_bank = 0x04,
.voltage_reg = 0x20,
.voltage_mask = 0x0f,
- .voltages = ldo_vauxn_voltages,
- .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
},
[AB8500_LDO_AUX3] = {
.desc = {
@@ -381,9 +331,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_AUX3,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
+ .volt_table = ldo_vaux3_voltages,
},
- .min_uV = 1100000,
- .max_uV = 3300000,
.update_bank = 0x04,
.update_reg = 0x0a,
.update_mask = 0x03,
@@ -391,8 +340,6 @@ static struct ab8500_regulator_info
.voltage_bank = 0x04,
.voltage_reg = 0x21,
.voltage_mask = 0x07,
- .voltages = ldo_vaux3_voltages,
- .voltages_len = ARRAY_SIZE(ldo_vaux3_voltages),
},
[AB8500_LDO_INTCORE] = {
.desc = {
@@ -402,9 +349,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_INTCORE,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
+ .volt_table = ldo_vintcore_voltages,
},
- .min_uV = 1100000,
- .max_uV = 3300000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x44,
@@ -412,8 +358,6 @@ static struct ab8500_regulator_info
.voltage_bank = 0x03,
.voltage_reg = 0x80,
.voltage_mask = 0x38,
- .voltages = ldo_vintcore_voltages,
- .voltages_len = ARRAY_SIZE(ldo_vintcore_voltages),
},
/*
@@ -429,9 +373,9 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_TVOUT,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 2000000,
},
.delay = 10000,
- .fixed_uV = 2000000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
@@ -445,8 +389,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_USB,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 3300000,
},
- .fixed_uV = 3300000,
.update_bank = 0x03,
.update_reg = 0x82,
.update_mask = 0x03,
@@ -460,8 +404,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_AUDIO,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 2000000,
},
- .fixed_uV = 2000000,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x02,
@@ -475,8 +419,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_ANAMIC1,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 2050000,
},
- .fixed_uV = 2050000,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x08,
@@ -490,8 +434,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_ANAMIC2,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 2050000,
},
- .fixed_uV = 2050000,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x10,
@@ -505,8 +449,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_DMIC,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 1800000,
},
- .fixed_uV = 1800000,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x04,
@@ -520,8 +464,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_ANA,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 1200000,
},
- .fixed_uV = 1200000,
.update_bank = 0x04,
.update_reg = 0x06,
.update_mask = 0x0c,
@@ -769,9 +713,7 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
if (info->desc.id == AB8500_LDO_AUX3) {
info->desc.n_voltages =
ARRAY_SIZE(ldo_vauxn_voltages);
- info->voltages = ldo_vauxn_voltages;
- info->voltages_len =
- ARRAY_SIZE(ldo_vauxn_voltages);
+ info->desc.volt_table = ldo_vauxn_voltages;
info->voltage_mask = 0xf;
}
}
@@ -794,17 +736,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
}
static struct of_regulator_match ab8500_regulator_matches[] = {
- { .name = "LDO-AUX1", .driver_data = (void *) AB8500_LDO_AUX1, },
- { .name = "LDO-AUX2", .driver_data = (void *) AB8500_LDO_AUX2, },
- { .name = "LDO-AUX3", .driver_data = (void *) AB8500_LDO_AUX3, },
- { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, },
- { .name = "LDO-TVOUT", .driver_data = (void *) AB8500_LDO_TVOUT, },
- { .name = "LDO-USB", .driver_data = (void *) AB8500_LDO_USB, },
- { .name = "LDO-AUDIO", .driver_data = (void *) AB8500_LDO_AUDIO, },
- { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
- { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
- { .name = "LDO-DMIC", .driver_data = (void *) AB8500_LDO_DMIC, },
- { .name = "LDO-ANA", .driver_data = (void *) AB8500_LDO_ANA, },
+ { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
+ { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
+ { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, },
+ { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
+ { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, },
+ { .name = "ab8500_ldo_usb", .driver_data = (void *) AB8500_LDO_USB, },
+ { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, },
+ { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
+ { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
+ { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, },
+ { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
};
static __devinit int
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 46d05f3..f123f7e 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -89,9 +89,12 @@ static int ad5398_set_current_limit(struct regulator_dev *rdev, int min_uA, int
unsigned short data;
int ret;
- if (min_uA > chip->max_uA || min_uA < chip->min_uA)
- return -EINVAL;
- if (max_uA > chip->max_uA || max_uA < chip->min_uA)
+ if (min_uA < chip->min_uA)
+ min_uA = chip->min_uA;
+ if (max_uA > chip->max_uA)
+ max_uA = chip->max_uA;
+
+ if (min_uA > chip->max_uA || max_uA < chip->min_uA)
return -EINVAL;
selector = DIV_ROUND_UP((min_uA - chip->min_uA) * chip->current_level,
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3660bac..e9c2085 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -43,33 +43,15 @@ struct anatop_regulator {
struct regulator_init_data *initdata;
};
-static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
- int max_uV, unsigned *selector)
+static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- u32 val, sel, mask;
- int uv;
-
- uv = min_uV;
- dev_dbg(&reg->dev, "%s: uv %d, min %d, max %d\n", __func__,
- uv, anatop_reg->min_voltage,
- anatop_reg->max_voltage);
-
- if (uv < anatop_reg->min_voltage) {
- if (max_uV > anatop_reg->min_voltage)
- uv = anatop_reg->min_voltage;
- else
- return -EINVAL;
- }
+ u32 val, mask;
if (!anatop_reg->control_reg)
return -ENOTSUPP;
- sel = DIV_ROUND_UP(uv - anatop_reg->min_voltage, 25000);
- if (sel * 25000 + anatop_reg->min_voltage > anatop_reg->max_voltage)
- return -EINVAL;
- val = anatop_reg->min_bit_val + sel;
- *selector = sel;
+ val = anatop_reg->min_bit_val + selector;
dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
anatop_reg->vol_bit_shift;
@@ -94,21 +76,11 @@ static int anatop_get_voltage_sel(struct regulator_dev *reg)
return val - anatop_reg->min_bit_val;
}
-static int anatop_list_voltage(struct regulator_dev *reg, unsigned selector)
-{
- struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- int uv;
-
- uv = anatop_reg->min_voltage + selector * 25000;
- dev_dbg(&reg->dev, "vddio = %d, selector = %u\n", uv, selector);
-
- return uv;
-}
-
static struct regulator_ops anatop_rops = {
- .set_voltage = anatop_set_voltage,
+ .set_voltage_sel = anatop_set_voltage_sel,
.get_voltage_sel = anatop_get_voltage_sel,
- .list_voltage = anatop_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
};
static int __devinit anatop_regulator_probe(struct platform_device *pdev)
@@ -176,6 +148,8 @@ static int __devinit anatop_regulator_probe(struct platform_device *pdev)
rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage)
/ 25000 + 1;
+ rdesc->min_uV = sreg->min_voltage;
+ rdesc->uV_step = 25000;
config.dev = &pdev->dev;
config.init_data = initdata;
@@ -224,7 +198,7 @@ static struct platform_driver anatop_regulator_driver = {
.of_match_table = of_anatop_regulator_match_tbl,
},
.probe = anatop_regulator_probe,
- .remove = anatop_regulator_remove,
+ .remove = __devexit_p(anatop_regulator_remove),
};
static int __init anatop_regulator_init(void)
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
new file mode 100644
index 0000000..c8f95c0
--- /dev/null
+++ b/drivers/regulator/arizona-ldo1.c
@@ -0,0 +1,138 @@
+/*
+ * arizona-ldo1.c -- LDO1 supply for Arizona devices
+ *
+ * Copyright 2012 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/pdata.h>
+#include <linux/mfd/arizona/registers.h>
+
+struct arizona_ldo1 {
+ struct regulator_dev *regulator;
+ struct arizona *arizona;
+
+ struct regulator_consumer_supply supply;
+ struct regulator_init_data init_data;
+};
+
+static struct regulator_ops arizona_ldo1_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc arizona_ldo1 = {
+ .name = "LDO1",
+ .supply_name = "LDOVDD",
+ .type = REGULATOR_VOLTAGE,
+ .ops = &arizona_ldo1_ops,
+
+ .vsel_reg = ARIZONA_LDO1_CONTROL_1,
+ .vsel_mask = ARIZONA_LDO1_VSEL_MASK,
+ .min_uV = 900000,
+ .uV_step = 50000,
+ .n_voltages = 7,
+
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_init_data arizona_ldo1_default = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+};
+
+static __devinit int arizona_ldo1_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct arizona_ldo1 *ldo1;
+ int ret;
+
+ ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
+ if (ldo1 == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ ldo1->arizona = arizona;
+
+ /*
+ * Since the chip usually supplies itself we provide some
+ * default init_data for it. This will be overridden with
+ * platform data if provided.
+ */
+ ldo1->init_data = arizona_ldo1_default;
+ ldo1->init_data.consumer_supplies = &ldo1->supply;
+ ldo1->supply.supply = "DCVDD";
+ ldo1->supply.dev_name = dev_name(arizona->dev);
+
+ config.dev = arizona->dev;
+ config.driver_data = ldo1;
+ config.regmap = arizona->regmap;
+ config.ena_gpio = arizona->pdata.ldoena;
+
+ if (arizona->pdata.ldo1)
+ config.init_data = arizona->pdata.ldo1;
+ else
+ config.init_data = &ldo1->init_data;
+
+ ldo1->regulator = regulator_register(&arizona_ldo1, &config);
+ if (IS_ERR(ldo1->regulator)) {
+ ret = PTR_ERR(ldo1->regulator);
+ dev_err(arizona->dev, "Failed to register LDO1 supply: %d\n",
+ ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ldo1);
+
+ return 0;
+}
+
+static __devexit int arizona_ldo1_remove(struct platform_device *pdev)
+{
+ struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
+
+ regulator_unregister(ldo1->regulator);
+
+ return 0;
+}
+
+static struct platform_driver arizona_ldo1_driver = {
+ .probe = arizona_ldo1_probe,
+ .remove = __devexit_p(arizona_ldo1_remove),
+ .driver = {
+ .name = "arizona-ldo1",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(arizona_ldo1_driver);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("Arizona LDO1 driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:arizona-ldo1");
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
new file mode 100644
index 0000000..450a069
--- /dev/null
+++ b/drivers/regulator/arizona-micsupp.c
@@ -0,0 +1,188 @@
+/*
+ * arizona-micsupp.c -- Microphone supply for Arizona devices
+ *
+ * Copyright 2012 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/pdata.h>
+#include <linux/mfd/arizona/registers.h>
+
+#define ARIZONA_MICSUPP_MAX_SELECTOR 0x1f
+
+struct arizona_micsupp {
+ struct regulator_dev *regulator;
+ struct arizona *arizona;
+
+ struct regulator_consumer_supply supply;
+ struct regulator_init_data init_data;
+};
+
+static int arizona_micsupp_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector > ARIZONA_MICSUPP_MAX_SELECTOR)
+ return -EINVAL;
+
+ if (selector == ARIZONA_MICSUPP_MAX_SELECTOR)
+ return 3300000;
+ else
+ return (selector * 50000) + 1700000;
+}
+
+static int arizona_micsupp_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ unsigned int voltage;
+ int selector;
+
+ if (min_uV < 1700000)
+ min_uV = 1700000;
+
+ if (min_uV > 3200000)
+ selector = ARIZONA_MICSUPP_MAX_SELECTOR;
+ else
+ selector = DIV_ROUND_UP(min_uV - 1700000, 50000);
+
+ if (selector < 0)
+ return -EINVAL;
+
+ voltage = arizona_micsupp_list_voltage(rdev, selector);
+ if (voltage < min_uV || voltage > max_uV)
+ return -EINVAL;
+
+ return selector;
+}
+
+static struct regulator_ops arizona_micsupp_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+
+ .list_voltage = arizona_micsupp_list_voltage,
+ .map_voltage = arizona_micsupp_map_voltage,
+
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc arizona_micsupp = {
+ .name = "MICVDD",
+ .supply_name = "CPVDD",
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ARIZONA_MICSUPP_MAX_SELECTOR + 1,
+ .ops = &arizona_micsupp_ops,
+
+ .vsel_reg = ARIZONA_LDO2_CONTROL_1,
+ .vsel_mask = ARIZONA_LDO2_VSEL_MASK,
+ .enable_reg = ARIZONA_MIC_CHARGE_PUMP_1,
+ .enable_mask = ARIZONA_CPMIC_ENA,
+
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_init_data arizona_micsupp_default = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_VOLTAGE,
+ .min_uV = 1700000,
+ .max_uV = 3300000,
+ },
+
+ .num_consumer_supplies = 1,
+};
+
+static __devinit int arizona_micsupp_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct arizona_micsupp *micsupp;
+ int ret;
+
+ micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL);
+ if (micsupp == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ micsupp->arizona = arizona;
+
+ /*
+ * Since the chip usually supplies itself we provide some
+ * default init_data for it. This will be overridden with
+ * platform data if provided.
+ */
+ micsupp->init_data = arizona_micsupp_default;
+ micsupp->init_data.consumer_supplies = &micsupp->supply;
+ micsupp->supply.supply = "MICVDD";
+ micsupp->supply.dev_name = dev_name(arizona->dev);
+
+ config.dev = arizona->dev;
+ config.driver_data = micsupp;
+ config.regmap = arizona->regmap;
+
+ if (arizona->pdata.micvdd)
+ config.init_data = arizona->pdata.micvdd;
+ else
+ config.init_data = &micsupp->init_data;
+
+ /* Default to regulated mode until the API supports bypass */
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_CHARGE_PUMP_1,
+ ARIZONA_CPMIC_BYPASS, 0);
+
+ micsupp->regulator = regulator_register(&arizona_micsupp, &config);
+ if (IS_ERR(micsupp->regulator)) {
+ ret = PTR_ERR(micsupp->regulator);
+ dev_err(arizona->dev, "Failed to register mic supply: %d\n",
+ ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, micsupp);
+
+ return 0;
+}
+
+static __devexit int arizona_micsupp_remove(struct platform_device *pdev)
+{
+ struct arizona_micsupp *micsupp = platform_get_drvdata(pdev);
+
+ regulator_unregister(micsupp->regulator);
+
+ return 0;
+}
+
+static struct platform_driver arizona_micsupp_driver = {
+ .probe = arizona_micsupp_probe,
+ .remove = __devexit_p(arizona_micsupp_remove),
+ .driver = {
+ .name = "arizona-micsupp",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(arizona_micsupp_driver);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("Arizona microphone supply driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:arizona-micsupp");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7584a74..2e31dff 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/suspend.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
@@ -108,28 +109,6 @@ static const char *rdev_get_name(struct regulator_dev *rdev)
return "";
}
-/* gets the regulator for a given consumer device */
-static struct regulator *get_device_regulator(struct device *dev)
-{
- struct regulator *regulator = NULL;
- struct regulator_dev *rdev;
-
- mutex_lock(&regulator_list_mutex);
- list_for_each_entry(rdev, &regulator_list, list) {
- mutex_lock(&rdev->mutex);
- list_for_each_entry(regulator, &rdev->consumer_list, list) {
- if (regulator->dev == dev) {
- mutex_unlock(&rdev->mutex);
- mutex_unlock(&regulator_list_mutex);
- return regulator;
- }
- }
- mutex_unlock(&rdev->mutex);
- }
- mutex_unlock(&regulator_list_mutex);
- return NULL;
-}
-
/**
* of_get_regulator - get a regulator device node based on supply name
* @dev: Device pointer for the consumer (of regulator) device
@@ -303,18 +282,6 @@ static int regulator_check_drms(struct regulator_dev *rdev)
return 0;
}
-static ssize_t device_requested_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct regulator *regulator;
-
- regulator = get_device_regulator(dev);
- if (regulator == NULL)
- return 0;
-
- return sprintf(buf, "%d\n", regulator->uA_load);
-}
-
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -427,6 +394,9 @@ static ssize_t regulator_status_show(struct device *dev,
case REGULATOR_STATUS_STANDBY:
label = "standby";
break;
+ case REGULATOR_STATUS_UNDEFINED:
+ label = "undefined";
+ break;
default:
return -ERANGE;
}
@@ -967,6 +937,14 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
+ if (rdev->constraints->ramp_delay && ops->set_ramp_delay) {
+ ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to set ramp_delay\n");
+ goto out;
+ }
+ }
+
print_constraints(rdev);
return 0;
out:
@@ -1097,48 +1075,29 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
list_add(&regulator->list, &rdev->consumer_list);
if (dev) {
- /* create a 'requested_microamps_name' sysfs entry */
- size = scnprintf(buf, REG_STR_SIZE,
- "microamps_requested_%s-%s",
- dev_name(dev), supply_name);
- if (size >= REG_STR_SIZE)
- goto overflow_err;
-
regulator->dev = dev;
- sysfs_attr_init(&regulator->dev_attr.attr);
- regulator->dev_attr.attr.name = kstrdup(buf, GFP_KERNEL);
- if (regulator->dev_attr.attr.name == NULL)
- goto attr_name_err;
-
- regulator->dev_attr.attr.mode = 0444;
- regulator->dev_attr.show = device_requested_uA_show;
- err = device_create_file(dev, &regulator->dev_attr);
- if (err < 0) {
- rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n");
- goto attr_name_err;
- }
- /* also add a link to the device sysfs entry */
+ /* Add a link to the device sysfs entry */
size = scnprintf(buf, REG_STR_SIZE, "%s-%s",
dev->kobj.name, supply_name);
if (size >= REG_STR_SIZE)
- goto attr_err;
+ goto overflow_err;
regulator->supply_name = kstrdup(buf, GFP_KERNEL);
if (regulator->supply_name == NULL)
- goto attr_err;
+ goto overflow_err;
err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj,
buf);
if (err) {
rdev_warn(rdev, "could not add device link %s err %d\n",
dev->kobj.name, err);
- goto link_name_err;
+ /* non-fatal */
}
} else {
regulator->supply_name = kstrdup(supply_name, GFP_KERNEL);
if (regulator->supply_name == NULL)
- goto attr_err;
+ goto overflow_err;
}
regulator->debugfs = debugfs_create_dir(regulator->supply_name,
@@ -1165,12 +1124,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
mutex_unlock(&rdev->mutex);
return regulator;
-link_name_err:
- kfree(regulator->supply_name);
-attr_err:
- device_remove_file(regulator->dev, &regulator->dev_attr);
-attr_name_err:
- kfree(regulator->dev_attr.attr.name);
overflow_err:
list_del(&regulator->list);
kfree(regulator);
@@ -1181,7 +1134,7 @@ overflow_err:
static int _regulator_get_enable_time(struct regulator_dev *rdev)
{
if (!rdev->desc->ops->enable_time)
- return 0;
+ return rdev->desc->enable_time;
return rdev->desc->ops->enable_time(rdev);
}
@@ -1420,11 +1373,8 @@ void regulator_put(struct regulator *regulator)
debugfs_remove_recursive(regulator->debugfs);
/* remove any sysfs entries */
- if (regulator->dev) {
+ if (regulator->dev)
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
- device_remove_file(regulator->dev, &regulator->dev_attr);
- kfree(regulator->dev_attr.attr.name);
- }
kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
@@ -1459,19 +1409,61 @@ void devm_regulator_put(struct regulator *regulator)
{
int rc;
- rc = devres_destroy(regulator->dev, devm_regulator_release,
+ rc = devres_release(regulator->dev, devm_regulator_release,
devm_regulator_match, regulator);
- if (rc == 0)
- regulator_put(regulator);
- else
+ if (rc != 0)
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regulator_put);
+static int _regulator_do_enable(struct regulator_dev *rdev)
+{
+ int ret, delay;
+
+ /* Query before enabling in case configuration dependent. */
+ ret = _regulator_get_enable_time(rdev);
+ if (ret >= 0) {
+ delay = ret;
+ } else {
+ rdev_warn(rdev, "enable_time() failed: %d\n", ret);
+ delay = 0;
+ }
+
+ trace_regulator_enable(rdev_get_name(rdev));
+
+ if (rdev->ena_gpio) {
+ gpio_set_value_cansleep(rdev->ena_gpio,
+ !rdev->ena_gpio_invert);
+ rdev->ena_gpio_state = 1;
+ } else if (rdev->desc->ops->enable) {
+ ret = rdev->desc->ops->enable(rdev);
+ if (ret < 0)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Allow the regulator to ramp; it would be useful to extend
+ * this for bulk operations so that the regulators can ramp
+ * together. */
+ trace_regulator_enable_delay(rdev_get_name(rdev));
+
+ if (delay >= 1000) {
+ mdelay(delay / 1000);
+ udelay(delay % 1000);
+ } else if (delay) {
+ udelay(delay);
+ }
+
+ trace_regulator_enable_complete(rdev_get_name(rdev));
+
+ return 0;
+}
+
/* locks held by regulator_enable() */
static int _regulator_enable(struct regulator_dev *rdev)
{
- int ret, delay;
+ int ret;
/* check voltage and requested load before enabling */
if (rdev->constraints &&
@@ -1485,40 +1477,10 @@ static int _regulator_enable(struct regulator_dev *rdev)
if (!_regulator_can_change_status(rdev))
return -EPERM;
- if (!rdev->desc->ops->enable)
- return -EINVAL;
-
- /* Query before enabling in case configuration
- * dependent. */
- ret = _regulator_get_enable_time(rdev);
- if (ret >= 0) {
- delay = ret;
- } else {
- rdev_warn(rdev, "enable_time() failed: %d\n",
- ret);
- delay = 0;
- }
-
- trace_regulator_enable(rdev_get_name(rdev));
-
- /* Allow the regulator to ramp; it would be useful
- * to extend this for bulk operations so that the
- * regulators can ramp together. */
- ret = rdev->desc->ops->enable(rdev);
+ ret = _regulator_do_enable(rdev);
if (ret < 0)
return ret;
- trace_regulator_enable_delay(rdev_get_name(rdev));
-
- if (delay >= 1000) {
- mdelay(delay / 1000);
- udelay(delay % 1000);
- } else if (delay) {
- udelay(delay);
- }
-
- trace_regulator_enable_complete(rdev_get_name(rdev));
-
} else if (ret < 0) {
rdev_err(rdev, "is_enabled() failed: %d\n", ret);
return ret;
@@ -1567,6 +1529,30 @@ int regulator_enable(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(regulator_enable);
+static int _regulator_do_disable(struct regulator_dev *rdev)
+{
+ int ret;
+
+ trace_regulator_disable(rdev_get_name(rdev));
+
+ if (rdev->ena_gpio) {
+ gpio_set_value_cansleep(rdev->ena_gpio,
+ rdev->ena_gpio_invert);
+ rdev->ena_gpio_state = 0;
+
+ } else if (rdev->desc->ops->disable) {
+ ret = rdev->desc->ops->disable(rdev);
+ if (ret != 0)
+ return ret;
+ }
+
+ trace_regulator_disable_complete(rdev_get_name(rdev));
+
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
+ return 0;
+}
+
/* locks held by regulator_disable() */
static int _regulator_disable(struct regulator_dev *rdev)
{
@@ -1581,20 +1567,12 @@ static int _regulator_disable(struct regulator_dev *rdev)
(rdev->constraints && !rdev->constraints->always_on)) {
/* we are last user */
- if (_regulator_can_change_status(rdev) &&
- rdev->desc->ops->disable) {
- trace_regulator_disable(rdev_get_name(rdev));
-
- ret = rdev->desc->ops->disable(rdev);
+ if (_regulator_can_change_status(rdev)) {
+ ret = _regulator_do_disable(rdev);
if (ret < 0) {
rdev_err(rdev, "failed to disable\n");
return ret;
}
-
- trace_regulator_disable_complete(rdev_get_name(rdev));
-
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
}
rdev->use_count = 0;
@@ -1812,6 +1790,10 @@ EXPORT_SYMBOL_GPL(regulator_disable_regmap);
static int _regulator_is_enabled(struct regulator_dev *rdev)
{
+ /* A GPIO control always takes precedence */
+ if (rdev->ena_gpio)
+ return rdev->ena_gpio_state;
+
/* If we don't know then assume that the regulator is always on */
if (!rdev->desc->ops->is_enabled)
return 1;
@@ -1883,6 +1865,31 @@ int regulator_list_voltage_linear(struct regulator_dev *rdev,
EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
/**
+ * regulator_list_voltage_table - List voltages with table based mapping
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with table based mapping between voltages and
+ * selectors can set volt_table in the regulator descriptor
+ * and then use this function as their list_voltage() operation.
+ */
+int regulator_list_voltage_table(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (!rdev->desc->volt_table) {
+ BUG_ON(!rdev->desc->volt_table);
+ return -EINVAL;
+ }
+
+ if (selector >= rdev->desc->n_voltages)
+ return -EINVAL;
+
+ return rdev->desc->volt_table[selector];
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_table);
+
+/**
* regulator_list_voltage - enumerate supported voltages
* @regulator: regulator source
* @selector: identify voltage to list
@@ -1928,8 +1935,18 @@ EXPORT_SYMBOL_GPL(regulator_list_voltage);
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
+ struct regulator_dev *rdev = regulator->rdev;
int i, voltages, ret;
+ /* If we can't change voltage check the current voltage */
+ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ ret = regulator_get_voltage(regulator);
+ if (ret >= 0)
+ return (min_uV >= ret && ret <= max_uV);
+ else
+ return ret;
+ }
+
ret = regulator_count_voltages(regulator);
if (ret < 0)
return ret;
@@ -2045,11 +2062,22 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
{
int ret, voltage;
+ /* Allow uV_step to be 0 for fixed voltage */
+ if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) {
+ if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
if (!rdev->desc->uV_step) {
BUG_ON(!rdev->desc->uV_step);
return -EINVAL;
}
+ if (min_uV < rdev->desc->min_uV)
+ min_uV = rdev->desc->min_uV;
+
ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
if (ret < 0)
return ret;
@@ -2068,7 +2096,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
{
int ret;
int delay = 0;
- int best_val;
+ int best_val = 0;
unsigned int selector;
int old_selector = -1;
@@ -2081,7 +2109,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
* If we can't obtain the old selector there is not enough
* info to call set_voltage_time_sel().
*/
- if (rdev->desc->ops->set_voltage_time_sel &&
+ if (_regulator_is_enabled(rdev) &&
+ rdev->desc->ops->set_voltage_time_sel &&
rdev->desc->ops->get_voltage_sel) {
old_selector = rdev->desc->ops->get_voltage_sel(rdev);
if (old_selector < 0)
@@ -2091,29 +2120,45 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
if (rdev->desc->ops->set_voltage) {
ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
&selector);
+
+ if (ret >= 0) {
+ if (rdev->desc->ops->list_voltage)
+ best_val = rdev->desc->ops->list_voltage(rdev,
+ selector);
+ else
+ best_val = _regulator_get_voltage(rdev);
+ }
+
} else if (rdev->desc->ops->set_voltage_sel) {
- if (rdev->desc->ops->map_voltage)
+ if (rdev->desc->ops->map_voltage) {
ret = rdev->desc->ops->map_voltage(rdev, min_uV,
max_uV);
- else
- ret = regulator_map_voltage_iterate(rdev, min_uV,
- max_uV);
+ } else {
+ if (rdev->desc->ops->list_voltage ==
+ regulator_list_voltage_linear)
+ ret = regulator_map_voltage_linear(rdev,
+ min_uV, max_uV);
+ else
+ ret = regulator_map_voltage_iterate(rdev,
+ min_uV, max_uV);
+ }
if (ret >= 0) {
- selector = ret;
- ret = rdev->desc->ops->set_voltage_sel(rdev, ret);
+ best_val = rdev->desc->ops->list_voltage(rdev, ret);
+ if (min_uV <= best_val && max_uV >= best_val) {
+ selector = ret;
+ ret = rdev->desc->ops->set_voltage_sel(rdev,
+ ret);
+ } else {
+ ret = -EINVAL;
+ }
}
} else {
ret = -EINVAL;
}
- if (rdev->desc->ops->list_voltage)
- best_val = rdev->desc->ops->list_voltage(rdev, selector);
- else
- best_val = -1;
-
/* Call set_voltage_time_sel if successfully obtained old_selector */
- if (ret == 0 && old_selector >= 0 &&
+ if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 &&
rdev->desc->ops->set_voltage_time_sel) {
delay = rdev->desc->ops->set_voltage_time_sel(rdev,
@@ -2123,19 +2168,19 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
delay);
delay = 0;
}
- }
- /* Insert any necessary delays */
- if (delay >= 1000) {
- mdelay(delay / 1000);
- udelay(delay % 1000);
- } else if (delay) {
- udelay(delay);
+ /* Insert any necessary delays */
+ if (delay >= 1000) {
+ mdelay(delay / 1000);
+ udelay(delay % 1000);
+ } else if (delay) {
+ udelay(delay);
+ }
}
- if (ret == 0)
+ if (ret == 0 && best_val >= 0)
_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
- NULL);
+ (void *)best_val);
trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val);
@@ -2246,6 +2291,46 @@ int regulator_set_voltage_time(struct regulator *regulator,
EXPORT_SYMBOL_GPL(regulator_set_voltage_time);
/**
+ *regulator_set_voltage_time_sel - get raise/fall time
+ * @regulator: regulator source
+ * @old_selector: selector for starting voltage
+ * @new_selector: selector for target voltage
+ *
+ * Provided with the starting and target voltage selectors, this function
+ * returns time in microseconds required to rise or fall to this new voltage
+ *
+ * Drivers providing ramp_delay in regulation_constraints can use this as their
+ * set_voltage_time_sel() operation.
+ */
+int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ unsigned int ramp_delay = 0;
+ int old_volt, new_volt;
+
+ if (rdev->constraints->ramp_delay)
+ ramp_delay = rdev->constraints->ramp_delay;
+ else if (rdev->desc->ramp_delay)
+ ramp_delay = rdev->desc->ramp_delay;
+
+ if (ramp_delay == 0) {
+ rdev_warn(rdev, "ramp_delay not set\n");
+ return 0;
+ }
+
+ /* sanity check */
+ if (!rdev->desc->ops->list_voltage)
+ return -EINVAL;
+
+ old_volt = rdev->desc->ops->list_voltage(rdev, old_selector);
+ new_volt = rdev->desc->ops->list_voltage(rdev, new_selector);
+
+ return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
+}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
+
+/**
* regulator_sync_voltage - re-apply last regulator output voltage
* @regulator: regulator source
*
@@ -2516,9 +2601,12 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
{
struct regulator_dev *rdev = regulator->rdev;
struct regulator *consumer;
- int ret, output_uV, input_uV, total_uA_load = 0;
+ int ret, output_uV, input_uV = 0, total_uA_load = 0;
unsigned int mode;
+ if (rdev->supply)
+ input_uV = regulator_get_voltage(rdev->supply);
+
mutex_lock(&rdev->mutex);
/*
@@ -2551,10 +2639,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
goto out;
}
- /* get input voltage */
- input_uV = 0;
- if (rdev->supply)
- input_uV = regulator_get_voltage(rdev->supply);
+ /* No supply? Use constraint voltage */
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
@@ -2625,7 +2710,7 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
/* call rdev chain first */
- blocking_notifier_call_chain(&rdev->notifier, event, NULL);
+ blocking_notifier_call_chain(&rdev->notifier, event, data);
}
/**
@@ -2906,10 +2991,10 @@ int regulator_mode_to_status(unsigned int mode)
return REGULATOR_STATUS_NORMAL;
case REGULATOR_MODE_IDLE:
return REGULATOR_STATUS_IDLE;
- case REGULATOR_STATUS_STANDBY:
+ case REGULATOR_MODE_STANDBY:
return REGULATOR_STATUS_STANDBY;
default:
- return 0;
+ return REGULATOR_STATUS_UNDEFINED;
}
}
EXPORT_SYMBOL_GPL(regulator_mode_to_status);
@@ -3102,7 +3187,10 @@ regulator_register(const struct regulator_desc *regulator_desc,
rdev->reg_data = config->driver_data;
rdev->owner = regulator_desc->owner;
rdev->desc = regulator_desc;
- rdev->regmap = config->regmap;
+ if (config->regmap)
+ rdev->regmap = config->regmap;
+ else
+ rdev->regmap = dev_get_regmap(dev, NULL);
INIT_LIST_HEAD(&rdev->consumer_list);
INIT_LIST_HEAD(&rdev->list);
BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
@@ -3129,6 +3217,26 @@ regulator_register(const struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
+ if (config->ena_gpio) {
+ ret = gpio_request_one(config->ena_gpio,
+ GPIOF_DIR_OUT | config->ena_gpio_flags,
+ rdev_get_name(rdev));
+ if (ret != 0) {
+ rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
+ config->ena_gpio, ret);
+ goto clean;
+ }
+
+ rdev->ena_gpio = config->ena_gpio;
+ rdev->ena_gpio_invert = config->ena_gpio_invert;
+
+ if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
+ rdev->ena_gpio_state = 1;
+
+ if (rdev->ena_gpio_invert)
+ rdev->ena_gpio_state = !rdev->ena_gpio_state;
+ }
+
/* set regulator constraints */
if (init_data)
constraints = &init_data->constraints;
@@ -3197,6 +3305,8 @@ unset_supplies:
scrub:
if (rdev->supply)
regulator_put(rdev->supply);
+ if (rdev->ena_gpio)
+ gpio_free(rdev->ena_gpio);
kfree(rdev->constraints);
device_unregister(&rdev->dev);
/* device core frees rdev */
@@ -3230,6 +3340,8 @@ void regulator_unregister(struct regulator_dev *rdev)
unset_regulator_supplies(rdev);
list_del(&rdev->list);
kfree(rdev->constraints);
+ if (rdev->ena_gpio)
+ gpio_free(rdev->ena_gpio);
device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
@@ -3469,6 +3581,15 @@ static int __init regulator_init_complete(void)
struct regulation_constraints *c;
int enabled, ret;
+ /*
+ * Since DT doesn't provide an idiomatic mechanism for
+ * enabling full constraints and since it's much more natural
+ * with DT to provide them just assume that a DT enabled
+ * system has full constraints.
+ */
+ if (of_have_populated_dt())
+ has_full_constraints = true;
+
mutex_lock(&regulator_list_mutex);
/* If we have a full configuration then disable any regulators
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 1005f5f..36c5b92 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -107,6 +107,9 @@ static int da903x_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
struct device *da9034_dev = to_da903x_dev(rdev);
uint8_t val, mask;
+ if (rdev->desc->n_voltages == 1)
+ return -EINVAL;
+
val = selector << info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
@@ -120,6 +123,9 @@ static int da903x_get_voltage_sel(struct regulator_dev *rdev)
uint8_t val, mask;
int ret;
+ if (rdev->desc->n_voltages == 1)
+ return 0;
+
ret = da903x_read(da9034_dev, info->vol_reg, &val);
if (ret)
return ret;
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 88976d8..903299c 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -405,12 +405,12 @@ static int __devinit da9052_regulator_probe(struct platform_device *pdev)
if (!nproot)
return -ENODEV;
- for (np = of_get_next_child(nproot, NULL); np;
- np = of_get_next_child(nproot, np)) {
+ for_each_child_of_node(nproot, np) {
if (!of_node_cmp(np->name,
regulator->info->reg_desc.name)) {
config.init_data = of_get_regulator_init_data(
&pdev->dev, np);
+ config.of_node = np;
break;
}
}
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 968f97f..9dbb491 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev,
}
static struct of_regulator_match db8500_regulator_matches[] = {
- { .name = "db8500-vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
- { .name = "db8500-varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
- { .name = "db8500-vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
- { .name = "db8500-vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
- { .name = "db8500-vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
- { .name = "db8500-vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
- { .name = "db8500-vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
- { .name = "db8500-vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
- { .name = "db8500-sva-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
- { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
- { .name = "db8500-sva-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
- { .name = "db8500-sia-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
- { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
- { .name = "db8500-sia-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
- { .name = "db8500-sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
- { .name = "db8500-b2r2-mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
- { .name = "db8500-esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
- { .name = "db8500-esram12-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
- { .name = "db8500-esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
- { .name = "db8500-esram34-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
+ { .name = "db8500_vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
+ { .name = "db8500_varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
+ { .name = "db8500_vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
+ { .name = "db8500_vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
+ { .name = "db8500_vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
+ { .name = "db8500_vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
+ { .name = "db8500_vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
+ { .name = "db8500_vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
+ { .name = "db8500_sva_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
+ { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
+ { .name = "db8500_sva_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
+ { .name = "db8500_sia_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
+ { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
+ { .name = "db8500_sia_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
+ { .name = "db8500_sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
+ { .name = "db8500_b2r2_mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
+ { .name = "db8500_esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
+ { .name = "db8500_esram12_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
+ { .name = "db8500_esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
+ { .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
};
static __devinit int
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index cacd33c..f9d0279 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -1,4 +1,5 @@
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
@@ -13,17 +14,20 @@ static void regulator_fixed_release(struct device *dev)
{
struct fixed_regulator_data *data = container_of(dev,
struct fixed_regulator_data, pdev.dev);
+ kfree(data->cfg.supply_name);
kfree(data);
}
/**
- * regulator_register_fixed - register a no-op fixed regulator
+ * regulator_register_fixed_name - register a no-op fixed regulator
* @id: platform device id
+ * @name: name to be used for the regulator
* @supplies: consumers for this regulator
* @num_supplies: number of consumers
+ * @uv: voltage in microvolts
*/
-struct platform_device *regulator_register_fixed(int id,
- struct regulator_consumer_supply *supplies, int num_supplies)
+struct platform_device *regulator_register_always_on(int id, const char *name,
+ struct regulator_consumer_supply *supplies, int num_supplies, int uv)
{
struct fixed_regulator_data *data;
@@ -31,8 +35,13 @@ struct platform_device *regulator_register_fixed(int id,
if (!data)
return NULL;
- data->cfg.supply_name = "fixed-dummy";
- data->cfg.microvolts = 0;
+ data->cfg.supply_name = kstrdup(name, GFP_KERNEL);
+ if (!data->cfg.supply_name) {
+ kfree(data);
+ return NULL;
+ }
+
+ data->cfg.microvolts = uv;
data->cfg.gpio = -EINVAL;
data->cfg.enabled_at_boot = 1;
data->cfg.init_data = &data->init_data;
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f09fe7b..185468c 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -35,10 +35,6 @@ struct fixed_voltage_data {
struct regulator_desc desc;
struct regulator_dev *dev;
int microvolts;
- int gpio;
- unsigned startup_delay;
- bool enable_high;
- bool is_enabled;
};
@@ -61,11 +57,11 @@ of_get_fixed_voltage_config(struct device *dev)
config = devm_kzalloc(dev, sizeof(struct fixed_voltage_config),
GFP_KERNEL);
if (!config)
- return NULL;
+ return ERR_PTR(-ENOMEM);
config->init_data = of_get_regulator_init_data(dev, dev->of_node);
if (!config->init_data)
- return NULL;
+ return ERR_PTR(-EINVAL);
init_data = config->init_data;
init_data->constraints.apply_uV = 0;
@@ -76,13 +72,26 @@ of_get_fixed_voltage_config(struct device *dev)
} else {
dev_err(dev,
"Fixed regulator specified with variable voltages\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
if (init_data->constraints.boot_on)
config->enabled_at_boot = true;
config->gpio = of_get_named_gpio(np, "gpio", 0);
+ /*
+ * of_get_named_gpio() currently returns ENODEV rather than
+ * EPROBE_DEFER. This code attempts to be compatible with both
+ * for now; the ENODEV check can be removed once the API is fixed.
+ * of_get_named_gpio() doesn't differentiate between a missing
+ * property (which would be fine here, since the GPIO is optional)
+ * and some other error. Patches have been posted for both issues.
+ * Once they are check in, we should replace this with:
+ * if (config->gpio < 0 && config->gpio != -ENOENT)
+ */
+ if ((config->gpio == -ENODEV) || (config->gpio == -EPROBE_DEFER))
+ return ERR_PTR(-EPROBE_DEFER);
+
delay = of_get_property(np, "startup-delay-us", NULL);
if (delay)
config->startup_delay = be32_to_cpu(*delay);
@@ -93,41 +102,10 @@ of_get_fixed_voltage_config(struct device *dev)
if (of_find_property(np, "gpio-open-drain", NULL))
config->gpio_is_open_drain = true;
- return config;
-}
-
-static int fixed_voltage_is_enabled(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- return data->is_enabled;
-}
-
-static int fixed_voltage_enable(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- gpio_set_value_cansleep(data->gpio, data->enable_high);
- data->is_enabled = true;
-
- return 0;
-}
-
-static int fixed_voltage_disable(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- gpio_set_value_cansleep(data->gpio, !data->enable_high);
- data->is_enabled = false;
-
- return 0;
-}
+ if (of_find_property(np, "vin-supply", NULL))
+ config->input_supply = "vin";
-static int fixed_voltage_enable_time(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- return data->startup_delay;
+ return config;
}
static int fixed_voltage_get_voltage(struct regulator_dev *dev)
@@ -151,15 +129,6 @@ static int fixed_voltage_list_voltage(struct regulator_dev *dev,
return data->microvolts;
}
-static struct regulator_ops fixed_voltage_gpio_ops = {
- .is_enabled = fixed_voltage_is_enabled,
- .enable = fixed_voltage_enable,
- .disable = fixed_voltage_disable,
- .enable_time = fixed_voltage_enable_time,
- .get_voltage = fixed_voltage_get_voltage,
- .list_voltage = fixed_voltage_list_voltage,
-};
-
static struct regulator_ops fixed_voltage_ops = {
.get_voltage = fixed_voltage_get_voltage,
.list_voltage = fixed_voltage_list_voltage,
@@ -172,10 +141,13 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
struct regulator_config cfg = { };
int ret;
- if (pdev->dev.of_node)
+ if (pdev->dev.of_node) {
config = of_get_fixed_voltage_config(&pdev->dev);
- else
+ if (IS_ERR(config))
+ return PTR_ERR(config);
+ } else {
config = pdev->dev.platform_data;
+ }
if (!config)
return -ENOMEM;
@@ -196,59 +168,44 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
+ drvdata->desc.ops = &fixed_voltage_ops;
- if (config->microvolts)
- drvdata->desc.n_voltages = 1;
+ drvdata->desc.enable_time = config->startup_delay;
- drvdata->microvolts = config->microvolts;
- drvdata->gpio = config->gpio;
- drvdata->startup_delay = config->startup_delay;
-
- if (gpio_is_valid(config->gpio)) {
- int gpio_flag;
- drvdata->enable_high = config->enable_high;
-
- /* FIXME: Remove below print warning
- *
- * config->gpio must be set to -EINVAL by platform code if
- * GPIO control is not required. However, early adopters
- * not requiring GPIO control may forget to initialize
- * config->gpio to -EINVAL. This will cause GPIO 0 to be used
- * for GPIO control.
- *
- * This warning will be removed once there are a couple of users
- * for this driver.
- */
- if (!config->gpio)
- dev_warn(&pdev->dev,
- "using GPIO 0 for regulator enable control\n");
-
- /*
- * set output direction without changing state
- * to prevent glitch
- */
- drvdata->is_enabled = config->enabled_at_boot;
- ret = drvdata->is_enabled ?
- config->enable_high : !config->enable_high;
- gpio_flag = ret ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
-
- if (config->gpio_is_open_drain)
- gpio_flag |= GPIOF_OPEN_DRAIN;
-
- ret = gpio_request_one(config->gpio, gpio_flag,
- config->supply_name);
- if (ret) {
+ if (config->input_supply) {
+ drvdata->desc.supply_name = kstrdup(config->input_supply,
+ GFP_KERNEL);
+ if (!drvdata->desc.supply_name) {
dev_err(&pdev->dev,
- "Could not obtain regulator enable GPIO %d: %d\n",
- config->gpio, ret);
+ "Failed to allocate input supply\n");
+ ret = -ENOMEM;
goto err_name;
}
+ }
+
+ if (config->microvolts)
+ drvdata->desc.n_voltages = 1;
- drvdata->desc.ops = &fixed_voltage_gpio_ops;
+ drvdata->microvolts = config->microvolts;
+ if (config->gpio >= 0)
+ cfg.ena_gpio = config->gpio;
+ cfg.ena_gpio_invert = !config->enable_high;
+ if (config->enabled_at_boot) {
+ if (config->enable_high) {
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ } else {
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ }
} else {
- drvdata->desc.ops = &fixed_voltage_ops;
+ if (config->enable_high) {
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ } else {
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ }
}
+ if (config->gpio_is_open_drain)
+ cfg.ena_gpio_flags |= GPIOF_OPEN_DRAIN;
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
@@ -259,7 +216,7 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- goto err_gpio;
+ goto err_input;
}
platform_set_drvdata(pdev, drvdata);
@@ -269,9 +226,8 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
return 0;
-err_gpio:
- if (gpio_is_valid(config->gpio))
- gpio_free(config->gpio);
+err_input:
+ kfree(drvdata->desc.supply_name);
err_name:
kfree(drvdata->desc.name);
err:
@@ -283,8 +239,7 @@ static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
regulator_unregister(drvdata->dev);
- if (gpio_is_valid(drvdata->gpio))
- gpio_free(drvdata->gpio);
+ kfree(drvdata->desc.supply_name);
kfree(drvdata->desc.name);
return 0;
@@ -296,8 +251,6 @@ static const struct of_device_id fixed_of_match[] __devinitconst = {
{},
};
MODULE_DEVICE_TABLE(of, fixed_of_match);
-#else
-#define fixed_of_match NULL
#endif
static struct platform_driver regulator_fixed_voltage_driver = {
@@ -306,7 +259,7 @@ static struct platform_driver regulator_fixed_voltage_driver = {
.driver = {
.name = "reg-fixed-voltage",
.owner = THIS_MODULE,
- .of_match_table = fixed_of_match,
+ .of_match_table = of_match_ptr(fixed_of_match),
},
};
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 9997d7a..34b67be 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -36,11 +36,6 @@ struct gpio_regulator_data {
struct regulator_desc desc;
struct regulator_dev *dev;
- int enable_gpio;
- bool enable_high;
- bool is_enabled;
- unsigned startup_delay;
-
struct gpio *gpios;
int nr_gpios;
@@ -50,44 +45,6 @@ struct gpio_regulator_data {
int state;
};
-static int gpio_regulator_is_enabled(struct regulator_dev *dev)
-{
- struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-
- return data->is_enabled;
-}
-
-static int gpio_regulator_enable(struct regulator_dev *dev)
-{
- struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-
- if (gpio_is_valid(data->enable_gpio)) {
- gpio_set_value_cansleep(data->enable_gpio, data->enable_high);
- data->is_enabled = true;
- }
-
- return 0;
-}
-
-static int gpio_regulator_disable(struct regulator_dev *dev)
-{
- struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-
- if (gpio_is_valid(data->enable_gpio)) {
- gpio_set_value_cansleep(data->enable_gpio, !data->enable_high);
- data->is_enabled = false;
- }
-
- return 0;
-}
-
-static int gpio_regulator_enable_time(struct regulator_dev *dev)
-{
- struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-
- return data->startup_delay;
-}
-
static int gpio_regulator_get_value(struct regulator_dev *dev)
{
struct gpio_regulator_data *data = rdev_get_drvdata(dev);
@@ -101,16 +58,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
}
static int gpio_regulator_set_value(struct regulator_dev *dev,
- int min, int max)
+ int min, int max, unsigned *selector)
{
struct gpio_regulator_data *data = rdev_get_drvdata(dev);
- int ptr, target, state, best_val = INT_MAX;
+ int ptr, target = 0, state, best_val = INT_MAX;
for (ptr = 0; ptr < data->nr_states; ptr++)
if (data->states[ptr].value < best_val &&
data->states[ptr].value >= min &&
- data->states[ptr].value <= max)
+ data->states[ptr].value <= max) {
target = data->states[ptr].gpios;
+ best_val = data->states[ptr].value;
+ if (selector)
+ *selector = ptr;
+ }
if (best_val == INT_MAX)
return -EINVAL;
@@ -128,7 +89,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
int min_uV, int max_uV,
unsigned *selector)
{
- return gpio_regulator_set_value(dev, min_uV, max_uV);
+ return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
}
static int gpio_regulator_list_voltage(struct regulator_dev *dev,
@@ -145,24 +106,16 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
int min_uA, int max_uA)
{
- return gpio_regulator_set_value(dev, min_uA, max_uA);
+ return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
}
static struct regulator_ops gpio_regulator_voltage_ops = {
- .is_enabled = gpio_regulator_is_enabled,
- .enable = gpio_regulator_enable,
- .disable = gpio_regulator_disable,
- .enable_time = gpio_regulator_enable_time,
.get_voltage = gpio_regulator_get_value,
.set_voltage = gpio_regulator_set_voltage,
.list_voltage = gpio_regulator_list_voltage,
};
static struct regulator_ops gpio_regulator_current_ops = {
- .is_enabled = gpio_regulator_is_enabled,
- .enable = gpio_regulator_enable,
- .disable = gpio_regulator_disable,
- .enable_time = gpio_regulator_enable_time,
.get_current_limit = gpio_regulator_get_value,
.set_current_limit = gpio_regulator_set_current_limit,
};
@@ -209,6 +162,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
drvdata->nr_states = config->nr_states;
drvdata->desc.owner = THIS_MODULE;
+ drvdata->desc.enable_time = config->startup_delay;
/* handle regulator type*/
switch (config->type) {
@@ -228,52 +182,12 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
break;
}
- drvdata->enable_gpio = config->enable_gpio;
- drvdata->startup_delay = config->startup_delay;
-
- if (gpio_is_valid(config->enable_gpio)) {
- drvdata->enable_high = config->enable_high;
-
- ret = gpio_request(config->enable_gpio, config->supply_name);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not obtain regulator enable GPIO %d: %d\n",
- config->enable_gpio, ret);
- goto err_memstate;
- }
-
- /* set output direction without changing state
- * to prevent glitch
- */
- if (config->enabled_at_boot) {
- drvdata->is_enabled = true;
- ret = gpio_direction_output(config->enable_gpio,
- config->enable_high);
- } else {
- drvdata->is_enabled = false;
- ret = gpio_direction_output(config->enable_gpio,
- !config->enable_high);
- }
-
- if (ret) {
- dev_err(&pdev->dev,
- "Could not configure regulator enable GPIO %d direction: %d\n",
- config->enable_gpio, ret);
- goto err_enablegpio;
- }
- } else {
- /* Regulator without GPIO control is considered
- * always enabled
- */
- drvdata->is_enabled = true;
- }
-
drvdata->nr_gpios = config->nr_gpios;
ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
if (ret) {
dev_err(&pdev->dev,
"Could not obtain regulator setting GPIOs: %d\n", ret);
- goto err_enablegpio;
+ goto err_memstate;
}
/* build initial state from gpio init data. */
@@ -286,7 +200,22 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
- cfg.driver_data = &drvdata;
+ cfg.driver_data = drvdata;
+
+ if (config->enable_gpio >= 0)
+ cfg.ena_gpio = config->enable_gpio;
+ cfg.ena_gpio_invert = !config->enable_high;
+ if (config->enabled_at_boot) {
+ if (config->enable_high)
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ else
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ } else {
+ if (config->enable_high)
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ else
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ }
drvdata->dev = regulator_register(&drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
@@ -301,9 +230,6 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
err_stategpio:
gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
-err_enablegpio:
- if (gpio_is_valid(config->enable_gpio))
- gpio_free(config->enable_gpio);
err_memstate:
kfree(drvdata->states);
err_memgpio:
@@ -325,9 +251,6 @@ static int __devexit gpio_regulator_remove(struct platform_device *pdev)
kfree(drvdata->states);
kfree(drvdata->gpios);
- if (gpio_is_valid(drvdata->enable_gpio))
- gpio_free(drvdata->enable_gpio);
-
kfree(drvdata->desc.name);
return 0;
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 56d273f..1d145a0 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -75,19 +75,12 @@ static struct regulator_ops isl_core_ops = {
static int isl6271a_get_fixed_voltage(struct regulator_dev *dev)
{
- int id = rdev_get_id(dev);
- return (id == 1) ? 1100000 : 1300000;
-}
-
-static int isl6271a_list_fixed_voltage(struct regulator_dev *dev, unsigned selector)
-{
- int id = rdev_get_id(dev);
- return (id == 1) ? 1100000 : 1300000;
+ return dev->desc->min_uV;
}
static struct regulator_ops isl_fixed_ops = {
.get_voltage = isl6271a_get_fixed_voltage,
- .list_voltage = isl6271a_list_fixed_voltage,
+ .list_voltage = regulator_list_voltage_linear,
};
static const struct regulator_desc isl_rd[] = {
@@ -107,6 +100,7 @@ static const struct regulator_desc isl_rd[] = {
.ops = &isl_fixed_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = 1100000,
}, {
.name = "LDO2",
.id = 2,
@@ -114,6 +108,7 @@ static const struct regulator_desc isl_rd[] = {
.ops = &isl_fixed_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = 1300000,
},
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 981bea9..7c6e3b8 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -65,11 +65,11 @@ static const int buck_base_addr[] = {
#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x])
#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1)
-static const int buck_voltage_map[] = {
- 0, 800, 850, 900, 950, 1000, 1050, 1100,
- 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
- 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
- 3000, 3300,
+static const unsigned int buck_voltage_map[] = {
+ 0, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
+ 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
+ 1550000, 1600000, 1650000, 1700000, 1800000, 1900000, 2500000, 2800000,
+ 3000000, 3300000,
};
#define BUCK_TARGET_VOL_MASK 0x3f
@@ -98,39 +98,19 @@ static const int buck_voltage_map[] = {
#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2)
#define LDO_VOL_CONTR_MASK 0x0f
-static const int ldo45_voltage_map[] = {
- 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
- 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
+static const unsigned int ldo45_voltage_map[] = {
+ 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000,
+ 1400000, 1500000, 1800000, 1900000, 2500000, 2800000, 3000000, 3300000,
};
-static const int ldo123_voltage_map[] = {
- 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
- 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
+static const unsigned int ldo123_voltage_map[] = {
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000,
};
-static const int *ldo_voltage_map[] = {
- ldo123_voltage_map, /* LDO1 */
- ldo123_voltage_map, /* LDO2 */
- ldo123_voltage_map, /* LDO3 */
- ldo45_voltage_map, /* LDO4 */
- ldo45_voltage_map, /* LDO5 */
-};
-
-#define LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[(x - LP3971_LDO1)])
-
#define LDO_VOL_MIN_IDX 0x00
#define LDO_VOL_MAX_IDX 0x0f
-static int lp3971_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
-{
- int ldo = rdev_get_id(dev) - LP3971_LDO1;
-
- if (index > LDO_VOL_MAX_IDX)
- return -EINVAL;
-
- return 1000 * LDO_VOL_VALUE_MAP(ldo)[index];
-}
-
static int lp3971_ldo_is_enabled(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
@@ -169,7 +149,7 @@ static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo));
val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK;
- return 1000 * LDO_VOL_VALUE_MAP(ldo)[val];
+ return dev->desc->volt_table[val];
}
static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -184,7 +164,7 @@ static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
}
static struct regulator_ops lp3971_ldo_ops = {
- .list_voltage = lp3971_ldo_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = lp3971_ldo_is_enabled,
.enable = lp3971_ldo_enable,
.disable = lp3971_ldo_disable,
@@ -192,14 +172,6 @@ static struct regulator_ops lp3971_ldo_ops = {
.set_voltage_sel = lp3971_ldo_set_voltage_sel,
};
-static int lp3971_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
-{
- if (index < BUCK_TARGET_VOL_MIN_IDX || index > BUCK_TARGET_VOL_MAX_IDX)
- return -EINVAL;
-
- return 1000 * buck_voltage_map[index];
-}
-
static int lp3971_dcdc_is_enabled(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
@@ -240,7 +212,7 @@ static int lp3971_dcdc_get_voltage(struct regulator_dev *dev)
reg &= BUCK_TARGET_VOL_MASK;
if (reg <= BUCK_TARGET_VOL_MAX_IDX)
- val = 1000 * buck_voltage_map[reg];
+ val = buck_voltage_map[reg];
else {
val = 0;
dev_warn(&dev->dev, "chip reported incorrect voltage value.\n");
@@ -273,7 +245,7 @@ static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
}
static struct regulator_ops lp3971_dcdc_ops = {
- .list_voltage = lp3971_dcdc_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = lp3971_dcdc_is_enabled,
.enable = lp3971_dcdc_enable,
.disable = lp3971_dcdc_disable,
@@ -287,6 +259,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO1,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .volt_table = ldo123_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -295,6 +268,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO2,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .volt_table = ldo123_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -303,6 +277,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO3,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .volt_table = ldo123_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -311,6 +286,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO4,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo45_voltage_map),
+ .volt_table = ldo45_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -319,6 +295,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO5,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo45_voltage_map),
+ .volt_table = ldo45_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -327,6 +304,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_DCDC1,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .volt_table = buck_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -335,6 +313,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_DCDC2,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .volt_table = buck_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -343,6 +322,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_DCDC3,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .volt_table = buck_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index de073df..3cdc755 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -74,54 +74,40 @@ struct lp3972 {
#define LP3972_OVER2_LDO4_EN BIT(4)
#define LP3972_OVER1_S_EN BIT(2)
-static const int ldo1_voltage_map[] = {
- 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
- 1900, 1925, 1950, 1975, 2000,
+static const unsigned int ldo1_voltage_map[] = {
+ 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000,
+ 1900000, 1925000, 1950000, 1975000, 2000000,
};
-static const int ldo23_voltage_map[] = {
- 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
- 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
+static const unsigned int ldo23_voltage_map[] = {
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000,
};
-static const int ldo4_voltage_map[] = {
- 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
- 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
+static const unsigned int ldo4_voltage_map[] = {
+ 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000,
+ 1400000, 1500000, 1800000, 1900000, 2500000, 2800000, 3000000, 3300000,
};
-static const int ldo5_voltage_map[] = {
- 0, 0, 0, 0, 0, 850, 875, 900,
- 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+static const unsigned int ldo5_voltage_map[] = {
+ 0, 0, 0, 0, 0, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
};
-static const int buck1_voltage_map[] = {
- 725, 750, 775, 800, 825, 850, 875, 900,
- 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+static const unsigned int buck1_voltage_map[] = {
+ 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
};
-static const int buck23_voltage_map[] = {
- 0, 800, 850, 900, 950, 1000, 1050, 1100,
- 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
- 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
- 3000, 3300,
-};
-
-static const int *ldo_voltage_map[] = {
- ldo1_voltage_map,
- ldo23_voltage_map,
- ldo23_voltage_map,
- ldo4_voltage_map,
- ldo5_voltage_map,
-};
-
-static const int *buck_voltage_map[] = {
- buck1_voltage_map,
- buck23_voltage_map,
- buck23_voltage_map,
+static const unsigned int buck23_voltage_map[] = {
+ 0, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
+ 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
+ 1550000, 1600000, 1650000, 1700000, 1800000, 1900000, 2500000, 2800000,
+ 3000000, 3300000,
};
static const int ldo_output_enable_mask[] = {
@@ -160,7 +146,6 @@ static const int buck_base_addr[] = {
LP3972_B3TV_REG,
};
-#define LP3972_LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[x])
#define LP3972_LDO_OUTPUT_ENABLE_MASK(x) (ldo_output_enable_mask[x])
#define LP3972_LDO_OUTPUT_ENABLE_REG(x) (ldo_output_enable_addr[x])
@@ -177,7 +162,6 @@ static const int buck_base_addr[] = {
#define LP3972_LDO_VOL_MIN_IDX(x) (((x) == 4) ? 0x05 : 0x00)
#define LP3972_LDO_VOL_MAX_IDX(x) ((x) ? (((x) == 4) ? 0x1f : 0x0f) : 0x0c)
-#define LP3972_BUCK_VOL_VALUE_MAP(x) (buck_voltage_map[x])
#define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x])
#define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x])
#define LP3972_BUCK_VOL_MASK 0x1f
@@ -242,17 +226,6 @@ static int lp3972_set_bits(struct lp3972 *lp3972, u8 reg, u16 mask, u16 val)
return ret;
}
-static int lp3972_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
-{
- int ldo = rdev_get_id(dev) - LP3972_LDO1;
-
- if (index < LP3972_LDO_VOL_MIN_IDX(ldo) ||
- index > LP3972_LDO_VOL_MAX_IDX(ldo))
- return -EINVAL;
-
- return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[index];
-}
-
static int lp3972_ldo_is_enabled(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
@@ -294,7 +267,7 @@ static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo));
val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask;
- return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[val];
+ return dev->desc->volt_table[val];
}
static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -337,7 +310,7 @@ static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
}
static struct regulator_ops lp3972_ldo_ops = {
- .list_voltage = lp3972_ldo_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = lp3972_ldo_is_enabled,
.enable = lp3972_ldo_enable,
.disable = lp3972_ldo_disable,
@@ -345,17 +318,6 @@ static struct regulator_ops lp3972_ldo_ops = {
.set_voltage_sel = lp3972_ldo_set_voltage_sel,
};
-static int lp3972_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
-{
- int buck = rdev_get_id(dev) - LP3972_DCDC1;
-
- if (index < LP3972_BUCK_VOL_MIN_IDX(buck) ||
- index > LP3972_BUCK_VOL_MAX_IDX(buck))
- return -EINVAL;
-
- return 1000 * buck_voltage_map[buck][index];
-}
-
static int lp3972_dcdc_is_enabled(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
@@ -401,7 +363,7 @@ static int lp3972_dcdc_get_voltage(struct regulator_dev *dev)
reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck));
reg &= LP3972_BUCK_VOL_MASK;
if (reg <= LP3972_BUCK_VOL_MAX_IDX(buck))
- val = 1000 * buck_voltage_map[buck][reg];
+ val = dev->desc->volt_table[reg];
else {
val = 0;
dev_warn(&dev->dev, "chip reported incorrect voltage value."
@@ -436,7 +398,7 @@ static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
}
static struct regulator_ops lp3972_dcdc_ops = {
- .list_voltage = lp3972_dcdc_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = lp3972_dcdc_is_enabled,
.enable = lp3972_dcdc_enable,
.disable = lp3972_dcdc_disable,
@@ -450,6 +412,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO1,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo1_voltage_map),
+ .volt_table = ldo1_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -458,6 +421,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO2,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo23_voltage_map),
+ .volt_table = ldo23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -466,6 +430,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO3,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo23_voltage_map),
+ .volt_table = ldo23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -474,6 +439,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO4,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo4_voltage_map),
+ .volt_table = ldo4_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -482,6 +448,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO5,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo5_voltage_map),
+ .volt_table = ldo5_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -490,6 +457,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_DCDC1,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck1_voltage_map),
+ .volt_table = buck1_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -498,6 +466,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_DCDC2,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck23_voltage_map),
+ .volt_table = buck23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -506,6 +475,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_DCDC3,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck23_voltage_map),
+ .volt_table = buck23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
new file mode 100644
index 0000000..212c38e
--- /dev/null
+++ b/drivers/regulator/lp872x.c
@@ -0,0 +1,943 @@
+/*
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/regulator/lp872x.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+
+/* Registers : LP8720/8725 shared */
+#define LP872X_GENERAL_CFG 0x00
+#define LP872X_LDO1_VOUT 0x01
+#define LP872X_LDO2_VOUT 0x02
+#define LP872X_LDO3_VOUT 0x03
+#define LP872X_LDO4_VOUT 0x04
+#define LP872X_LDO5_VOUT 0x05
+
+/* Registers : LP8720 */
+#define LP8720_BUCK_VOUT1 0x06
+#define LP8720_BUCK_VOUT2 0x07
+#define LP8720_ENABLE 0x08
+
+/* Registers : LP8725 */
+#define LP8725_LILO1_VOUT 0x06
+#define LP8725_LILO2_VOUT 0x07
+#define LP8725_BUCK1_VOUT1 0x08
+#define LP8725_BUCK1_VOUT2 0x09
+#define LP8725_BUCK2_VOUT1 0x0A
+#define LP8725_BUCK2_VOUT2 0x0B
+#define LP8725_BUCK_CTRL 0x0C
+#define LP8725_LDO_CTRL 0x0D
+
+/* Mask/shift : LP8720/LP8725 shared */
+#define LP872X_VOUT_M 0x1F
+#define LP872X_START_DELAY_M 0xE0
+#define LP872X_START_DELAY_S 5
+#define LP872X_EN_LDO1_M BIT(0)
+#define LP872X_EN_LDO2_M BIT(1)
+#define LP872X_EN_LDO3_M BIT(2)
+#define LP872X_EN_LDO4_M BIT(3)
+#define LP872X_EN_LDO5_M BIT(4)
+
+/* Mask/shift : LP8720 */
+#define LP8720_TIMESTEP_S 0 /* Addr 00h */
+#define LP8720_TIMESTEP_M BIT(0)
+#define LP8720_EXT_DVS_M BIT(2)
+#define LP8720_BUCK_FPWM_S 5 /* Addr 07h */
+#define LP8720_BUCK_FPWM_M BIT(5)
+#define LP8720_EN_BUCK_M BIT(5) /* Addr 08h */
+#define LP8720_DVS_SEL_M BIT(7)
+
+/* Mask/shift : LP8725 */
+#define LP8725_TIMESTEP_M 0xC0 /* Addr 00h */
+#define LP8725_TIMESTEP_S 6
+#define LP8725_BUCK1_EN_M BIT(0)
+#define LP8725_DVS1_M BIT(2)
+#define LP8725_DVS2_M BIT(3)
+#define LP8725_BUCK2_EN_M BIT(4)
+#define LP8725_BUCK_CL_M 0xC0 /* Addr 09h, 0Bh */
+#define LP8725_BUCK_CL_S 6
+#define LP8725_BUCK1_FPWM_S 1 /* Addr 0Ch */
+#define LP8725_BUCK1_FPWM_M BIT(1)
+#define LP8725_BUCK2_FPWM_S 5
+#define LP8725_BUCK2_FPWM_M BIT(5)
+#define LP8725_EN_LILO1_M BIT(5) /* Addr 0Dh */
+#define LP8725_EN_LILO2_M BIT(6)
+
+/* PWM mode */
+#define LP872X_FORCE_PWM 1
+#define LP872X_AUTO_PWM 0
+
+#define LP8720_NUM_REGULATORS 6
+#define LP8725_NUM_REGULATORS 9
+#define EXTERN_DVS_USED 0
+#define MAX_DELAY 6
+
+/* dump registers in regmap-debugfs */
+#define MAX_REGISTERS 0x0F
+
+enum lp872x_id {
+ LP8720,
+ LP8725,
+};
+
+struct lp872x {
+ struct regmap *regmap;
+ struct device *dev;
+ enum lp872x_id chipid;
+ struct lp872x_platform_data *pdata;
+ struct regulator_dev **regulators;
+ int num_regulators;
+ enum lp872x_dvs_state dvs_pin;
+ int dvs_gpio;
+};
+
+/* LP8720/LP8725 shared voltage table for LDOs */
+static const unsigned int lp872x_ldo_vtbl[] = {
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
+ 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 2000000,
+ 2100000, 2200000, 2300000, 2400000, 2500000, 2600000, 2650000, 2700000,
+ 2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3100000, 3300000,
+};
+
+/* LP8720 LDO4 voltage table */
+static const unsigned int lp8720_ldo4_vtbl[] = {
+ 800000, 850000, 900000, 1000000, 1100000, 1200000, 1250000, 1300000,
+ 1350000, 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000,
+ 1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000,
+ 2400000, 2500000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000,
+};
+
+/* LP8725 LILO(Low Input Low Output) voltage table */
+static const unsigned int lp8725_lilo_vtbl[] = {
+ 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000,
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
+};
+
+/* LP8720 BUCK voltage table */
+#define EXT_R 0 /* external resistor divider */
+static const unsigned int lp8720_buck_vtbl[] = {
+ EXT_R, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
+ 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
+ 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000,
+ 1950000, 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000,
+};
+
+/* LP8725 BUCK voltage table */
+static const unsigned int lp8725_buck_vtbl[] = {
+ 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000,
+ 1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000,
+ 2400000, 2500000, 2600000, 2700000, 2800000, 2850000, 2900000, 3000000,
+};
+
+/* LP8725 BUCK current limit */
+static const unsigned int lp8725_buck_uA[] = {
+ 460000, 780000, 1050000, 1370000,
+};
+
+static int lp872x_read_byte(struct lp872x *lp, u8 addr, u8 *data)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(lp->regmap, addr, &val);
+ if (ret < 0) {
+ dev_err(lp->dev, "failed to read 0x%.2x\n", addr);
+ return ret;
+ }
+
+ *data = (u8)val;
+ return 0;
+}
+
+static inline int lp872x_write_byte(struct lp872x *lp, u8 addr, u8 data)
+{
+ return regmap_write(lp->regmap, addr, data);
+}
+
+static inline int lp872x_update_bits(struct lp872x *lp, u8 addr,
+ unsigned int mask, u8 data)
+{
+ return regmap_update_bits(lp->regmap, addr, mask, data);
+}
+
+static int _rdev_to_offset(struct regulator_dev *rdev)
+{
+ enum lp872x_regulator_id id = rdev_get_id(rdev);
+
+ switch (id) {
+ case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
+ return id;
+ case LP8725_ID_LDO1 ... LP8725_ID_BUCK2:
+ return id - LP8725_ID_BASE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lp872x_get_timestep_usec(struct lp872x *lp)
+{
+ enum lp872x_id chip = lp->chipid;
+ u8 val, mask, shift;
+ int *time_usec, size, ret;
+ int lp8720_time_usec[] = { 25, 50 };
+ int lp8725_time_usec[] = { 32, 64, 128, 256 };
+
+ switch (chip) {
+ case LP8720:
+ mask = LP8720_TIMESTEP_M;
+ shift = LP8720_TIMESTEP_S;
+ time_usec = &lp8720_time_usec[0];
+ size = ARRAY_SIZE(lp8720_time_usec);
+ break;
+ case LP8725:
+ mask = LP8725_TIMESTEP_M;
+ shift = LP8725_TIMESTEP_S;
+ time_usec = &lp8725_time_usec[0];
+ size = ARRAY_SIZE(lp8725_time_usec);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
+ if (ret)
+ return -EINVAL;
+
+ val = (val & mask) >> shift;
+ if (val >= size)
+ return -EINVAL;
+
+ return *(time_usec + val);
+}
+
+static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id regulator = rdev_get_id(rdev);
+ int time_step_us = lp872x_get_timestep_usec(lp);
+ int ret, offset;
+ u8 addr, val;
+
+ if (time_step_us < 0)
+ return -EINVAL;
+
+ switch (regulator) {
+ case LP8720_ID_LDO1 ... LP8720_ID_LDO5:
+ case LP8725_ID_LDO1 ... LP8725_ID_LILO2:
+ offset = _rdev_to_offset(rdev);
+ if (offset < 0)
+ return -EINVAL;
+
+ addr = LP872X_LDO1_VOUT + offset;
+ break;
+ case LP8720_ID_BUCK:
+ addr = LP8720_BUCK_VOUT1;
+ break;
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK1_VOUT1;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK2_VOUT1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = lp872x_read_byte(lp, addr, &val);
+ if (ret)
+ return ret;
+
+ val = (val & LP872X_START_DELAY_M) >> LP872X_START_DELAY_S;
+
+ return val > MAX_DELAY ? 0 : val * time_step_us;
+}
+
+static void lp872x_set_dvs(struct lp872x *lp, int gpio)
+{
+ enum lp872x_dvs_sel dvs_sel = lp->pdata->dvs->vsel;
+ enum lp872x_dvs_state state;
+
+ state = dvs_sel == SEL_V1 ? DVS_HIGH : DVS_LOW;
+ gpio_set_value(gpio, state);
+ lp->dvs_pin = state;
+}
+
+static u8 lp872x_select_buck_vout_addr(struct lp872x *lp,
+ enum lp872x_regulator_id buck)
+{
+ u8 val, addr;
+
+ if (lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val))
+ return 0;
+
+ switch (buck) {
+ case LP8720_ID_BUCK:
+ if (val & LP8720_EXT_DVS_M) {
+ addr = (lp->dvs_pin == DVS_HIGH) ?
+ LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
+ } else {
+ if (lp872x_read_byte(lp, LP8720_ENABLE, &val))
+ return 0;
+
+ addr = val & LP8720_DVS_SEL_M ?
+ LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
+ }
+ break;
+ case LP8725_ID_BUCK1:
+ if (val & LP8725_DVS1_M)
+ addr = LP8725_BUCK1_VOUT1;
+ else
+ addr = (lp->dvs_pin == DVS_HIGH) ?
+ LP8725_BUCK1_VOUT1 : LP8725_BUCK1_VOUT2;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = val & LP8725_DVS2_M ?
+ LP8725_BUCK2_VOUT1 : LP8725_BUCK2_VOUT2;
+ break;
+ default:
+ return 0;
+ }
+
+ return addr;
+}
+
+static bool lp872x_is_valid_buck_addr(u8 addr)
+{
+ switch (addr) {
+ case LP8720_BUCK_VOUT1:
+ case LP8720_BUCK_VOUT2:
+ case LP8725_BUCK1_VOUT1:
+ case LP8725_BUCK1_VOUT2:
+ case LP8725_BUCK2_VOUT1:
+ case LP8725_BUCK2_VOUT2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int lp872x_buck_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, mask = LP872X_VOUT_M;
+ struct lp872x_dvs *dvs = lp->pdata->dvs;
+
+ if (dvs && gpio_is_valid(dvs->gpio))
+ lp872x_set_dvs(lp, dvs->gpio);
+
+ addr = lp872x_select_buck_vout_addr(lp, buck);
+ if (!lp872x_is_valid_buck_addr(addr))
+ return -EINVAL;
+
+ return lp872x_update_bits(lp, addr, mask, selector);
+}
+
+static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, val;
+ int ret;
+
+ addr = lp872x_select_buck_vout_addr(lp, buck);
+ if (!lp872x_is_valid_buck_addr(addr))
+ return -EINVAL;
+
+ ret = lp872x_read_byte(lp, addr, &val);
+ if (ret)
+ return ret;
+
+ return val & LP872X_VOUT_M;
+}
+
+static int lp8725_buck_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ int i, max = ARRAY_SIZE(lp8725_buck_uA);
+ u8 addr, val;
+
+ switch (buck) {
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK1_VOUT2;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK2_VOUT2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < max ; i++)
+ if (lp8725_buck_uA[i] >= min_uA &&
+ lp8725_buck_uA[i] <= max_uA)
+ break;
+
+ if (i == max)
+ return -EINVAL;
+
+ val = i << LP8725_BUCK_CL_S;
+
+ return lp872x_update_bits(lp, addr, LP8725_BUCK_CL_M, val);
+}
+
+static int lp8725_buck_get_current_limit(struct regulator_dev *rdev)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, val;
+ int ret;
+
+ switch (buck) {
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK1_VOUT2;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK2_VOUT2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = lp872x_read_byte(lp, addr, &val);
+ if (ret)
+ return ret;
+
+ val = (val & LP8725_BUCK_CL_M) >> LP8725_BUCK_CL_S;
+
+ return (val < ARRAY_SIZE(lp8725_buck_uA)) ?
+ lp8725_buck_uA[val] : -EINVAL;
+}
+
+static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, mask, shift, val;
+
+ switch (buck) {
+ case LP8720_ID_BUCK:
+ addr = LP8720_BUCK_VOUT2;
+ mask = LP8720_BUCK_FPWM_M;
+ shift = LP8720_BUCK_FPWM_S;
+ break;
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK_CTRL;
+ mask = LP8725_BUCK1_FPWM_M;
+ shift = LP8725_BUCK1_FPWM_S;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK_CTRL;
+ mask = LP8725_BUCK2_FPWM_M;
+ shift = LP8725_BUCK2_FPWM_S;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode == REGULATOR_MODE_FAST)
+ val = LP872X_FORCE_PWM << shift;
+ else if (mode == REGULATOR_MODE_NORMAL)
+ val = LP872X_AUTO_PWM << shift;
+ else
+ return -EINVAL;
+
+ return lp872x_update_bits(lp, addr, mask, val);
+}
+
+static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, mask, val;
+ int ret;
+
+ switch (buck) {
+ case LP8720_ID_BUCK:
+ addr = LP8720_BUCK_VOUT2;
+ mask = LP8720_BUCK_FPWM_M;
+ break;
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK_CTRL;
+ mask = LP8725_BUCK1_FPWM_M;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK_CTRL;
+ mask = LP8725_BUCK2_FPWM_M;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = lp872x_read_byte(lp, addr, &val);
+ if (ret)
+ return ret;
+
+ return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops lp872x_ldo_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp872x_regulator_enable_time,
+};
+
+static struct regulator_ops lp8720_buck_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = lp872x_buck_set_voltage_sel,
+ .get_voltage_sel = lp872x_buck_get_voltage_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp872x_regulator_enable_time,
+ .set_mode = lp872x_buck_set_mode,
+ .get_mode = lp872x_buck_get_mode,
+};
+
+static struct regulator_ops lp8725_buck_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = lp872x_buck_set_voltage_sel,
+ .get_voltage_sel = lp872x_buck_get_voltage_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp872x_regulator_enable_time,
+ .set_mode = lp872x_buck_set_mode,
+ .get_mode = lp872x_buck_get_mode,
+ .set_current_limit = lp8725_buck_set_current_limit,
+ .get_current_limit = lp8725_buck_get_current_limit,
+};
+
+static struct regulator_desc lp8720_regulator_desc[] = {
+ {
+ .name = "ldo1",
+ .id = LP8720_ID_LDO1,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO1_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO1_M,
+ },
+ {
+ .name = "ldo2",
+ .id = LP8720_ID_LDO2,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO2_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO2_M,
+ },
+ {
+ .name = "ldo3",
+ .id = LP8720_ID_LDO3,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO3_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO3_M,
+ },
+ {
+ .name = "ldo4",
+ .id = LP8720_ID_LDO4,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp8720_ldo4_vtbl),
+ .volt_table = lp8720_ldo4_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO4_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO4_M,
+ },
+ {
+ .name = "ldo5",
+ .id = LP8720_ID_LDO5,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO5_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO5_M,
+ },
+ {
+ .name = "buck",
+ .id = LP8720_ID_BUCK,
+ .ops = &lp8720_buck_ops,
+ .n_voltages = ARRAY_SIZE(lp8720_buck_vtbl),
+ .volt_table = lp8720_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP8720_EN_BUCK_M,
+ },
+};
+
+static struct regulator_desc lp8725_regulator_desc[] = {
+ {
+ .name = "ldo1",
+ .id = LP8725_ID_LDO1,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO1_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO1_M,
+ },
+ {
+ .name = "ldo2",
+ .id = LP8725_ID_LDO2,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO2_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO2_M,
+ },
+ {
+ .name = "ldo3",
+ .id = LP8725_ID_LDO3,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO3_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO3_M,
+ },
+ {
+ .name = "ldo4",
+ .id = LP8725_ID_LDO4,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO4_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO4_M,
+ },
+ {
+ .name = "ldo5",
+ .id = LP8725_ID_LDO5,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO5_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO5_M,
+ },
+ {
+ .name = "lilo1",
+ .id = LP8725_ID_LILO1,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl),
+ .volt_table = lp8725_lilo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8725_LILO1_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP8725_EN_LILO1_M,
+ },
+ {
+ .name = "lilo2",
+ .id = LP8725_ID_LILO2,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl),
+ .volt_table = lp8725_lilo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8725_LILO2_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP8725_EN_LILO2_M,
+ },
+ {
+ .name = "buck1",
+ .id = LP8725_ID_BUCK1,
+ .ops = &lp8725_buck_ops,
+ .n_voltages = ARRAY_SIZE(lp8725_buck_vtbl),
+ .volt_table = lp8725_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP872X_GENERAL_CFG,
+ .enable_mask = LP8725_BUCK1_EN_M,
+ },
+ {
+ .name = "buck2",
+ .id = LP8725_ID_BUCK2,
+ .ops = &lp8725_buck_ops,
+ .n_voltages = ARRAY_SIZE(lp8725_buck_vtbl),
+ .volt_table = lp8725_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP872X_GENERAL_CFG,
+ .enable_mask = LP8725_BUCK2_EN_M,
+ },
+};
+
+static int lp872x_check_dvs_validity(struct lp872x *lp)
+{
+ struct lp872x_dvs *dvs = lp->pdata->dvs;
+ u8 val = 0;
+ int ret;
+
+ ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
+ if (ret)
+ return ret;
+
+ ret = 0;
+ if (lp->chipid == LP8720) {
+ if (val & LP8720_EXT_DVS_M)
+ ret = dvs ? 0 : -EINVAL;
+ } else {
+ if ((val & LP8725_DVS1_M) == EXTERN_DVS_USED)
+ ret = dvs ? 0 : -EINVAL;
+ }
+
+ return ret;
+}
+
+static int lp872x_init_dvs(struct lp872x *lp)
+{
+ int ret, gpio;
+ struct lp872x_dvs *dvs = lp->pdata->dvs;
+ enum lp872x_dvs_state pinstate;
+
+ ret = lp872x_check_dvs_validity(lp);
+ if (ret) {
+ dev_warn(lp->dev, "invalid dvs data: %d\n", ret);
+ return ret;
+ }
+
+ gpio = dvs->gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(lp->dev, "invalid gpio: %d\n", gpio);
+ return -EINVAL;
+ }
+
+ pinstate = dvs->init_state;
+ ret = devm_gpio_request_one(lp->dev, gpio, pinstate, "LP872X DVS");
+ if (ret) {
+ dev_err(lp->dev, "gpio request err: %d\n", ret);
+ return ret;
+ }
+
+ lp->dvs_pin = pinstate;
+ lp->dvs_gpio = gpio;
+
+ return 0;
+}
+
+static int lp872x_config(struct lp872x *lp)
+{
+ struct lp872x_platform_data *pdata = lp->pdata;
+ int ret;
+
+ if (!pdata->update_config)
+ return 0;
+
+ ret = lp872x_write_byte(lp, LP872X_GENERAL_CFG, pdata->general_config);
+ if (ret)
+ return ret;
+
+ return lp872x_init_dvs(lp);
+}
+
+static struct regulator_init_data
+*lp872x_find_regulator_init_data(int id, struct lp872x *lp)
+{
+ int i;
+
+ for (i = 0; i < lp->num_regulators; i++) {
+ if (lp->pdata->regulator_data[i].id == id)
+ return lp->pdata->regulator_data[i].init_data;
+ }
+
+ return NULL;
+}
+
+static int lp872x_regulator_register(struct lp872x *lp)
+{
+ struct regulator_desc *desc;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ int i, ret;
+
+ for (i = 0 ; i < lp->num_regulators ; i++) {
+ desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] :
+ &lp8725_regulator_desc[i];
+
+ cfg.dev = lp->dev;
+ cfg.init_data = lp872x_find_regulator_init_data(desc->id, lp);
+ cfg.driver_data = lp;
+ cfg.regmap = lp->regmap;
+
+ rdev = regulator_register(desc, &cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(lp->dev, "regulator register err");
+ ret = PTR_ERR(rdev);
+ goto err;
+ }
+
+ *(lp->regulators + i) = rdev;
+ }
+
+ return 0;
+err:
+ while (--i >= 0) {
+ rdev = *(lp->regulators + i);
+ regulator_unregister(rdev);
+ }
+ return ret;
+}
+
+static void lp872x_regulator_unregister(struct lp872x *lp)
+{
+ struct regulator_dev *rdev;
+ int i;
+
+ for (i = 0 ; i < lp->num_regulators ; i++) {
+ rdev = *(lp->regulators + i);
+ regulator_unregister(rdev);
+ }
+}
+
+static const struct regmap_config lp872x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX_REGISTERS,
+};
+
+static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
+{
+ struct lp872x *lp;
+ struct lp872x_platform_data *pdata = cl->dev.platform_data;
+ int ret, size, num_regulators;
+ const int lp872x_num_regulators[] = {
+ [LP8720] = LP8720_NUM_REGULATORS,
+ [LP8725] = LP8725_NUM_REGULATORS,
+ };
+
+ if (!pdata) {
+ dev_err(&cl->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
+ if (!lp)
+ goto err_mem;
+
+ num_regulators = lp872x_num_regulators[id->driver_data];
+ size = sizeof(struct regulator_dev *) * num_regulators;
+
+ lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL);
+ if (!lp->regulators)
+ goto err_mem;
+
+ lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
+ if (IS_ERR(lp->regmap)) {
+ ret = PTR_ERR(lp->regmap);
+ dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
+ goto err_dev;
+ }
+
+ lp->dev = &cl->dev;
+ lp->pdata = pdata;
+ lp->chipid = id->driver_data;
+ lp->num_regulators = num_regulators;
+ i2c_set_clientdata(cl, lp);
+
+ ret = lp872x_config(lp);
+ if (ret)
+ goto err_dev;
+
+ return lp872x_regulator_register(lp);
+
+err_mem:
+ return -ENOMEM;
+err_dev:
+ return ret;
+}
+
+static int __devexit lp872x_remove(struct i2c_client *cl)
+{
+ struct lp872x *lp = i2c_get_clientdata(cl);
+
+ lp872x_regulator_unregister(lp);
+ return 0;
+}
+
+static const struct i2c_device_id lp872x_ids[] = {
+ {"lp8720", LP8720},
+ {"lp8725", LP8725},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp872x_ids);
+
+static struct i2c_driver lp872x_driver = {
+ .driver = {
+ .name = "lp872x",
+ .owner = THIS_MODULE,
+ },
+ .probe = lp872x_probe,
+ .remove = __devexit_p(lp872x_remove),
+ .id_table = lp872x_ids,
+};
+
+module_i2c_driver(lp872x_driver);
+
+MODULE_DESCRIPTION("TI/National Semiconductor LP872x PMU Regulator Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
new file mode 100644
index 0000000..6356e82
--- /dev/null
+++ b/drivers/regulator/lp8788-buck.c
@@ -0,0 +1,629 @@
+/*
+ * TI LP8788 MFD - buck regulator driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/gpio.h>
+
+/* register address */
+#define LP8788_EN_BUCK 0x0C
+#define LP8788_BUCK_DVS_SEL 0x1D
+#define LP8788_BUCK1_VOUT0 0x1E
+#define LP8788_BUCK1_VOUT1 0x1F
+#define LP8788_BUCK1_VOUT2 0x20
+#define LP8788_BUCK1_VOUT3 0x21
+#define LP8788_BUCK2_VOUT0 0x22
+#define LP8788_BUCK2_VOUT1 0x23
+#define LP8788_BUCK2_VOUT2 0x24
+#define LP8788_BUCK2_VOUT3 0x25
+#define LP8788_BUCK3_VOUT 0x26
+#define LP8788_BUCK4_VOUT 0x27
+#define LP8788_BUCK1_TIMESTEP 0x28
+#define LP8788_BUCK_PWM 0x2D
+
+/* mask/shift bits */
+#define LP8788_EN_BUCK1_M BIT(0) /* Addr 0Ch */
+#define LP8788_EN_BUCK2_M BIT(1)
+#define LP8788_EN_BUCK3_M BIT(2)
+#define LP8788_EN_BUCK4_M BIT(3)
+#define LP8788_BUCK1_DVS_SEL_M 0x04 /* Addr 1Dh */
+#define LP8788_BUCK1_DVS_M 0x03
+#define LP8788_BUCK1_DVS_S 0
+#define LP8788_BUCK2_DVS_SEL_M 0x40
+#define LP8788_BUCK2_DVS_M 0x30
+#define LP8788_BUCK2_DVS_S 4
+#define LP8788_BUCK1_DVS_I2C BIT(2)
+#define LP8788_BUCK2_DVS_I2C BIT(6)
+#define LP8788_BUCK1_DVS_PIN (0 << 2)
+#define LP8788_BUCK2_DVS_PIN (0 << 6)
+#define LP8788_VOUT_M 0x1F /* Addr 1Eh ~ 27h */
+#define LP8788_STARTUP_TIME_M 0xF8 /* Addr 28h ~ 2Bh */
+#define LP8788_STARTUP_TIME_S 3
+#define LP8788_FPWM_BUCK1_M BIT(0) /* Addr 2Dh */
+#define LP8788_FPWM_BUCK1_S 0
+#define LP8788_FPWM_BUCK2_M BIT(1)
+#define LP8788_FPWM_BUCK2_S 1
+#define LP8788_FPWM_BUCK3_M BIT(2)
+#define LP8788_FPWM_BUCK3_S 2
+#define LP8788_FPWM_BUCK4_M BIT(3)
+#define LP8788_FPWM_BUCK4_S 3
+
+#define INVALID_ADDR 0xFF
+#define LP8788_FORCE_PWM 1
+#define LP8788_AUTO_PWM 0
+#define PIN_LOW 0
+#define PIN_HIGH 1
+#define ENABLE_TIME_USEC 32
+
+enum lp8788_dvs_state {
+ DVS_LOW = GPIOF_OUT_INIT_LOW,
+ DVS_HIGH = GPIOF_OUT_INIT_HIGH,
+};
+
+enum lp8788_dvs_mode {
+ REGISTER,
+ EXTPIN,
+};
+
+enum lp8788_buck_id {
+ BUCK1,
+ BUCK2,
+ BUCK3,
+ BUCK4,
+};
+
+struct lp8788_pwm_map {
+ u8 mask;
+ u8 shift;
+};
+
+struct lp8788_buck {
+ struct lp8788 *lp;
+ struct regulator_dev *regulator;
+ struct lp8788_pwm_map *pmap;
+ void *dvs;
+};
+
+/* BUCK 1 ~ 4 voltage table */
+static const int lp8788_buck_vtbl[] = {
+ 500000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
+ 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
+ 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000,
+ 1950000, 2000000,
+};
+
+/* buck pwm mode selection : used for set/get_mode in regulator ops
+ * @forced pwm : fast mode
+ * @auto pwm : normal mode
+ */
+static struct lp8788_pwm_map buck_pmap[] = {
+ [BUCK1] = {
+ .mask = LP8788_FPWM_BUCK1_M,
+ .shift = LP8788_FPWM_BUCK1_S,
+ },
+ [BUCK2] = {
+ .mask = LP8788_FPWM_BUCK2_M,
+ .shift = LP8788_FPWM_BUCK2_S,
+ },
+ [BUCK3] = {
+ .mask = LP8788_FPWM_BUCK3_M,
+ .shift = LP8788_FPWM_BUCK3_S,
+ },
+ [BUCK4] = {
+ .mask = LP8788_FPWM_BUCK4_M,
+ .shift = LP8788_FPWM_BUCK4_S,
+ },
+};
+
+static const u8 buck1_vout_addr[] = {
+ LP8788_BUCK1_VOUT0, LP8788_BUCK1_VOUT1,
+ LP8788_BUCK1_VOUT2, LP8788_BUCK1_VOUT3,
+};
+
+static const u8 buck2_vout_addr[] = {
+ LP8788_BUCK2_VOUT0, LP8788_BUCK2_VOUT1,
+ LP8788_BUCK2_VOUT2, LP8788_BUCK2_VOUT3,
+};
+
+static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
+{
+ struct lp8788_buck1_dvs *dvs = (struct lp8788_buck1_dvs *)buck->dvs;
+ enum lp8788_dvs_state pinstate;
+
+ if (!dvs)
+ return;
+
+ pinstate = dvs->vsel == DVS_SEL_V0 ? DVS_LOW : DVS_HIGH;
+ if (gpio_is_valid(dvs->gpio))
+ gpio_set_value(dvs->gpio, pinstate);
+}
+
+static void lp8788_buck2_set_dvs(struct lp8788_buck *buck)
+{
+ struct lp8788_buck2_dvs *dvs = (struct lp8788_buck2_dvs *)buck->dvs;
+ enum lp8788_dvs_state pin1, pin2;
+
+ if (!dvs)
+ return;
+
+ switch (dvs->vsel) {
+ case DVS_SEL_V0:
+ pin1 = DVS_LOW;
+ pin2 = DVS_LOW;
+ break;
+ case DVS_SEL_V1:
+ pin1 = DVS_HIGH;
+ pin2 = DVS_LOW;
+ break;
+ case DVS_SEL_V2:
+ pin1 = DVS_LOW;
+ pin2 = DVS_HIGH;
+ break;
+ case DVS_SEL_V3:
+ pin1 = DVS_HIGH;
+ pin2 = DVS_HIGH;
+ break;
+ default:
+ return;
+ }
+
+ if (gpio_is_valid(dvs->gpio[0]))
+ gpio_set_value(dvs->gpio[0], pin1);
+
+ if (gpio_is_valid(dvs->gpio[1]))
+ gpio_set_value(dvs->gpio[1], pin2);
+}
+
+static void lp8788_set_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
+{
+ switch (id) {
+ case BUCK1:
+ lp8788_buck1_set_dvs(buck);
+ break;
+ case BUCK2:
+ lp8788_buck2_set_dvs(buck);
+ break;
+ default:
+ break;
+ }
+}
+
+static enum lp8788_dvs_mode
+lp8788_get_buck_dvs_ctrl_mode(struct lp8788_buck *buck, enum lp8788_buck_id id)
+{
+ u8 val, mask;
+
+ switch (id) {
+ case BUCK1:
+ mask = LP8788_BUCK1_DVS_SEL_M;
+ break;
+ case BUCK2:
+ mask = LP8788_BUCK2_DVS_SEL_M;
+ break;
+ default:
+ return REGISTER;
+ }
+
+ lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
+
+ return val & mask ? REGISTER : EXTPIN;
+}
+
+static bool lp8788_is_valid_buck_addr(u8 addr)
+{
+ switch (addr) {
+ case LP8788_BUCK1_VOUT0:
+ case LP8788_BUCK1_VOUT1:
+ case LP8788_BUCK1_VOUT2:
+ case LP8788_BUCK1_VOUT3:
+ case LP8788_BUCK2_VOUT0:
+ case LP8788_BUCK2_VOUT1:
+ case LP8788_BUCK2_VOUT2:
+ case LP8788_BUCK2_VOUT3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
+ enum lp8788_buck_id id)
+{
+ enum lp8788_dvs_mode mode = lp8788_get_buck_dvs_ctrl_mode(buck, id);
+ struct lp8788_buck1_dvs *b1_dvs;
+ struct lp8788_buck2_dvs *b2_dvs;
+ u8 val, idx, addr;
+ int pin1, pin2;
+
+ switch (id) {
+ case BUCK1:
+ if (mode == EXTPIN) {
+ b1_dvs = (struct lp8788_buck1_dvs *)buck->dvs;
+ if (!b1_dvs)
+ goto err;
+
+ idx = gpio_get_value(b1_dvs->gpio) ? 1 : 0;
+ } else {
+ lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
+ idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S;
+ }
+ addr = buck1_vout_addr[idx];
+ break;
+ case BUCK2:
+ if (mode == EXTPIN) {
+ b2_dvs = (struct lp8788_buck2_dvs *)buck->dvs;
+ if (!b2_dvs)
+ goto err;
+
+ pin1 = gpio_get_value(b2_dvs->gpio[0]);
+ pin2 = gpio_get_value(b2_dvs->gpio[1]);
+
+ if (pin1 == PIN_LOW && pin2 == PIN_LOW)
+ idx = 0;
+ else if (pin1 == PIN_LOW && pin2 == PIN_HIGH)
+ idx = 2;
+ else if (pin1 == PIN_HIGH && pin2 == PIN_LOW)
+ idx = 1;
+ else
+ idx = 3;
+ } else {
+ lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
+ idx = (val & LP8788_BUCK2_DVS_M) >> LP8788_BUCK2_DVS_S;
+ }
+ addr = buck2_vout_addr[idx];
+ break;
+ default:
+ goto err;
+ }
+
+ return addr;
+err:
+ return INVALID_ADDR;
+}
+
+static int lp8788_buck12_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ enum lp8788_buck_id id = rdev_get_id(rdev);
+ u8 addr;
+
+ if (buck->dvs)
+ lp8788_set_dvs(buck, id);
+
+ addr = lp8788_select_buck_vout_addr(buck, id);
+ if (!lp8788_is_valid_buck_addr(addr))
+ return -EINVAL;
+
+ return lp8788_update_bits(buck->lp, addr, LP8788_VOUT_M, selector);
+}
+
+static int lp8788_buck12_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ enum lp8788_buck_id id = rdev_get_id(rdev);
+ int ret;
+ u8 val, addr;
+
+ addr = lp8788_select_buck_vout_addr(buck, id);
+ if (!lp8788_is_valid_buck_addr(addr))
+ return -EINVAL;
+
+ ret = lp8788_read_byte(buck->lp, addr, &val);
+ if (ret)
+ return ret;
+
+ return val & LP8788_VOUT_M;
+}
+
+static int lp8788_buck_enable_time(struct regulator_dev *rdev)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ enum lp8788_buck_id id = rdev_get_id(rdev);
+ u8 val, addr = LP8788_BUCK1_TIMESTEP + id;
+
+ if (lp8788_read_byte(buck->lp, addr, &val))
+ return -EINVAL;
+
+ val = (val & LP8788_STARTUP_TIME_M) >> LP8788_STARTUP_TIME_S;
+
+ return ENABLE_TIME_USEC * val;
+}
+
+static int lp8788_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ struct lp8788_pwm_map *pmap = buck->pmap;
+ u8 val;
+
+ if (!pmap)
+ return -EINVAL;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = LP8788_FORCE_PWM << pmap->shift;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = LP8788_AUTO_PWM << pmap->shift;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return lp8788_update_bits(buck->lp, LP8788_BUCK_PWM, pmap->mask, val);
+}
+
+static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ struct lp8788_pwm_map *pmap = buck->pmap;
+ u8 val;
+ int ret;
+
+ if (!pmap)
+ return -EINVAL;
+
+ ret = lp8788_read_byte(buck->lp, LP8788_BUCK_PWM, &val);
+ if (ret)
+ return ret;
+
+ return val & pmap->mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops lp8788_buck12_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = lp8788_buck12_set_voltage_sel,
+ .get_voltage_sel = lp8788_buck12_get_voltage_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp8788_buck_enable_time,
+ .set_mode = lp8788_buck_set_mode,
+ .get_mode = lp8788_buck_get_mode,
+};
+
+static struct regulator_ops lp8788_buck34_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp8788_buck_enable_time,
+ .set_mode = lp8788_buck_set_mode,
+ .get_mode = lp8788_buck_get_mode,
+};
+
+static struct regulator_desc lp8788_buck_desc[] = {
+ {
+ .name = "buck1",
+ .id = BUCK1,
+ .ops = &lp8788_buck12_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
+ .volt_table = lp8788_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_BUCK,
+ .enable_mask = LP8788_EN_BUCK1_M,
+ },
+ {
+ .name = "buck2",
+ .id = BUCK2,
+ .ops = &lp8788_buck12_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
+ .volt_table = lp8788_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_BUCK,
+ .enable_mask = LP8788_EN_BUCK2_M,
+ },
+ {
+ .name = "buck3",
+ .id = BUCK3,
+ .ops = &lp8788_buck34_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
+ .volt_table = lp8788_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_BUCK3_VOUT,
+ .vsel_mask = LP8788_VOUT_M,
+ .enable_reg = LP8788_EN_BUCK,
+ .enable_mask = LP8788_EN_BUCK3_M,
+ },
+ {
+ .name = "buck4",
+ .id = BUCK4,
+ .ops = &lp8788_buck34_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
+ .volt_table = lp8788_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_BUCK4_VOUT,
+ .vsel_mask = LP8788_VOUT_M,
+ .enable_reg = LP8788_EN_BUCK,
+ .enable_mask = LP8788_EN_BUCK4_M,
+ },
+};
+
+static int lp8788_set_default_dvs_ctrl_mode(struct lp8788 *lp,
+ enum lp8788_buck_id id)
+{
+ u8 mask, val;
+
+ switch (id) {
+ case BUCK1:
+ mask = LP8788_BUCK1_DVS_SEL_M;
+ val = LP8788_BUCK1_DVS_I2C;
+ break;
+ case BUCK2:
+ mask = LP8788_BUCK2_DVS_SEL_M;
+ val = LP8788_BUCK2_DVS_I2C;
+ break;
+ default:
+ return 0;
+ }
+
+ return lp8788_update_bits(lp, LP8788_BUCK_DVS_SEL, mask, val);
+}
+
+static int _gpio_request(struct lp8788_buck *buck, int gpio, char *name)
+{
+ struct device *dev = buck->lp->dev;
+
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "invalid gpio: %d\n", gpio);
+ return -EINVAL;
+ }
+
+ return devm_gpio_request_one(dev, gpio, DVS_LOW, name);
+}
+
+static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
+ enum lp8788_buck_id id)
+{
+ struct lp8788_platform_data *pdata = buck->lp->pdata;
+ char *b1_name = "LP8788_B1_DVS";
+ char *b2_name[] = { "LP8788_B2_DVS1", "LP8788_B2_DVS2" };
+ int i, gpio, ret;
+
+ switch (id) {
+ case BUCK1:
+ gpio = pdata->buck1_dvs->gpio;
+ ret = _gpio_request(buck, gpio, b1_name);
+ if (ret)
+ return ret;
+
+ buck->dvs = pdata->buck1_dvs;
+ break;
+ case BUCK2:
+ for (i = 0 ; i < LP8788_NUM_BUCK2_DVS ; i++) {
+ gpio = pdata->buck2_dvs->gpio[i];
+ ret = _gpio_request(buck, gpio, b2_name[i]);
+ if (ret)
+ return ret;
+ }
+ buck->dvs = pdata->buck2_dvs;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
+{
+ struct lp8788_platform_data *pdata = buck->lp->pdata;
+ u8 mask[] = { LP8788_BUCK1_DVS_SEL_M, LP8788_BUCK2_DVS_SEL_M };
+ u8 val[] = { LP8788_BUCK1_DVS_PIN, LP8788_BUCK2_DVS_PIN };
+
+ /* no dvs for buck3, 4 */
+ if (id == BUCK3 || id == BUCK4)
+ return 0;
+
+ /* no dvs platform data, then dvs will be selected by I2C registers */
+ if (!pdata)
+ goto set_default_dvs_mode;
+
+ if ((id == BUCK1 && !pdata->buck1_dvs) ||
+ (id == BUCK2 && !pdata->buck2_dvs))
+ goto set_default_dvs_mode;
+
+ if (lp8788_dvs_gpio_request(buck, id))
+ goto set_default_dvs_mode;
+
+ return lp8788_update_bits(buck->lp, LP8788_BUCK_DVS_SEL, mask[id],
+ val[id]);
+
+set_default_dvs_mode:
+ return lp8788_set_default_dvs_ctrl_mode(buck->lp, id);
+}
+
+static __devinit int lp8788_buck_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ int id = pdev->id;
+ struct lp8788_buck *buck;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ int ret;
+
+ buck = devm_kzalloc(lp->dev, sizeof(struct lp8788_buck), GFP_KERNEL);
+ if (!buck)
+ return -ENOMEM;
+
+ buck->lp = lp;
+ buck->pmap = &buck_pmap[id];
+
+ ret = lp8788_init_dvs(buck, id);
+ if (ret)
+ return ret;
+
+ cfg.dev = lp->dev;
+ cfg.init_data = lp->pdata ? lp->pdata->buck_data[id] : NULL;
+ cfg.driver_data = buck;
+ cfg.regmap = lp->regmap;
+
+ rdev = regulator_register(&lp8788_buck_desc[id], &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(lp->dev, "BUCK%d regulator register err = %d\n",
+ id + 1, ret);
+ return ret;
+ }
+
+ buck->regulator = rdev;
+ platform_set_drvdata(pdev, buck);
+
+ return 0;
+}
+
+static int __devexit lp8788_buck_remove(struct platform_device *pdev)
+{
+ struct lp8788_buck *buck = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ regulator_unregister(buck->regulator);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_buck_driver = {
+ .probe = lp8788_buck_probe,
+ .remove = __devexit_p(lp8788_buck_remove),
+ .driver = {
+ .name = LP8788_DEV_BUCK,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init lp8788_buck_init(void)
+{
+ return platform_driver_register(&lp8788_buck_driver);
+}
+subsys_initcall(lp8788_buck_init);
+
+static void __exit lp8788_buck_exit(void)
+{
+ platform_driver_unregister(&lp8788_buck_driver);
+}
+module_exit(lp8788_buck_exit);
+
+MODULE_DESCRIPTION("TI LP8788 BUCK Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-buck");
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
new file mode 100644
index 0000000..d2122e4
--- /dev/null
+++ b/drivers/regulator/lp8788-ldo.c
@@ -0,0 +1,842 @@
+/*
+ * TI LP8788 MFD - ldo regulator driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/gpio.h>
+#include <linux/mfd/lp8788.h>
+
+/* register address */
+#define LP8788_EN_LDO_A 0x0D /* DLDO 1 ~ 8 */
+#define LP8788_EN_LDO_B 0x0E /* DLDO 9 ~ 12, ALDO 1 ~ 4 */
+#define LP8788_EN_LDO_C 0x0F /* ALDO 5 ~ 10 */
+#define LP8788_EN_SEL 0x10
+#define LP8788_DLDO1_VOUT 0x2E
+#define LP8788_DLDO2_VOUT 0x2F
+#define LP8788_DLDO3_VOUT 0x30
+#define LP8788_DLDO4_VOUT 0x31
+#define LP8788_DLDO5_VOUT 0x32
+#define LP8788_DLDO6_VOUT 0x33
+#define LP8788_DLDO7_VOUT 0x34
+#define LP8788_DLDO8_VOUT 0x35
+#define LP8788_DLDO9_VOUT 0x36
+#define LP8788_DLDO10_VOUT 0x37
+#define LP8788_DLDO11_VOUT 0x38
+#define LP8788_DLDO12_VOUT 0x39
+#define LP8788_ALDO1_VOUT 0x3A
+#define LP8788_ALDO2_VOUT 0x3B
+#define LP8788_ALDO3_VOUT 0x3C
+#define LP8788_ALDO4_VOUT 0x3D
+#define LP8788_ALDO5_VOUT 0x3E
+#define LP8788_ALDO6_VOUT 0x3F
+#define LP8788_ALDO7_VOUT 0x40
+#define LP8788_ALDO8_VOUT 0x41
+#define LP8788_ALDO9_VOUT 0x42
+#define LP8788_ALDO10_VOUT 0x43
+#define LP8788_DLDO1_TIMESTEP 0x44
+
+/* mask/shift bits */
+#define LP8788_EN_DLDO1_M BIT(0) /* Addr 0Dh ~ 0Fh */
+#define LP8788_EN_DLDO2_M BIT(1)
+#define LP8788_EN_DLDO3_M BIT(2)
+#define LP8788_EN_DLDO4_M BIT(3)
+#define LP8788_EN_DLDO5_M BIT(4)
+#define LP8788_EN_DLDO6_M BIT(5)
+#define LP8788_EN_DLDO7_M BIT(6)
+#define LP8788_EN_DLDO8_M BIT(7)
+#define LP8788_EN_DLDO9_M BIT(0)
+#define LP8788_EN_DLDO10_M BIT(1)
+#define LP8788_EN_DLDO11_M BIT(2)
+#define LP8788_EN_DLDO12_M BIT(3)
+#define LP8788_EN_ALDO1_M BIT(4)
+#define LP8788_EN_ALDO2_M BIT(5)
+#define LP8788_EN_ALDO3_M BIT(6)
+#define LP8788_EN_ALDO4_M BIT(7)
+#define LP8788_EN_ALDO5_M BIT(0)
+#define LP8788_EN_ALDO6_M BIT(1)
+#define LP8788_EN_ALDO7_M BIT(2)
+#define LP8788_EN_ALDO8_M BIT(3)
+#define LP8788_EN_ALDO9_M BIT(4)
+#define LP8788_EN_ALDO10_M BIT(5)
+#define LP8788_EN_SEL_DLDO911_M BIT(0) /* Addr 10h */
+#define LP8788_EN_SEL_DLDO7_M BIT(1)
+#define LP8788_EN_SEL_ALDO7_M BIT(2)
+#define LP8788_EN_SEL_ALDO5_M BIT(3)
+#define LP8788_EN_SEL_ALDO234_M BIT(4)
+#define LP8788_EN_SEL_ALDO1_M BIT(5)
+#define LP8788_VOUT_5BIT_M 0x1F /* Addr 2Eh ~ 43h */
+#define LP8788_VOUT_4BIT_M 0x0F
+#define LP8788_VOUT_3BIT_M 0x07
+#define LP8788_VOUT_1BIT_M 0x01
+#define LP8788_STARTUP_TIME_M 0xF8 /* Addr 44h ~ 59h */
+#define LP8788_STARTUP_TIME_S 3
+
+#define ENABLE_TIME_USEC 32
+#define ENABLE GPIOF_OUT_INIT_HIGH
+#define DISABLE GPIOF_OUT_INIT_LOW
+
+enum lp8788_enable_mode {
+ REGISTER,
+ EXTPIN,
+};
+
+enum lp8788_ldo_id {
+ DLDO1,
+ DLDO2,
+ DLDO3,
+ DLDO4,
+ DLDO5,
+ DLDO6,
+ DLDO7,
+ DLDO8,
+ DLDO9,
+ DLDO10,
+ DLDO11,
+ DLDO12,
+ ALDO1,
+ ALDO2,
+ ALDO3,
+ ALDO4,
+ ALDO5,
+ ALDO6,
+ ALDO7,
+ ALDO8,
+ ALDO9,
+ ALDO10,
+};
+
+struct lp8788_ldo {
+ struct lp8788 *lp;
+ struct regulator_desc *desc;
+ struct regulator_dev *regulator;
+ struct lp8788_ldo_enable_pin *en_pin;
+};
+
+/* DLDO 1, 2, 3, 9 voltage table */
+const int lp8788_dldo1239_vtbl[] = {
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2900000, 3000000, 2850000, 2850000, 2850000,
+ 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000,
+ 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000,
+};
+
+/* DLDO 4 voltage table */
+static const int lp8788_dldo4_vtbl[] = { 1800000, 3000000 };
+
+/* DLDO 5, 7, 8 and ALDO 6 voltage table */
+static const int lp8788_dldo578_aldo6_vtbl[] = {
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2900000, 3000000, 3000000, 3000000, 3000000,
+};
+
+/* DLDO 6 voltage table */
+static const int lp8788_dldo6_vtbl[] = {
+ 3000000, 3100000, 3200000, 3300000, 3400000, 3500000, 3600000, 3600000,
+};
+
+/* DLDO 10, 11 voltage table */
+static const int lp8788_dldo1011_vtbl[] = {
+ 1100000, 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000,
+ 1500000, 1500000, 1500000, 1500000, 1500000, 1500000, 1500000, 1500000,
+};
+
+/* ALDO 1 voltage table */
+static const int lp8788_aldo1_vtbl[] = { 1800000, 2850000 };
+
+/* ALDO 7 voltage table */
+static const int lp8788_aldo7_vtbl[] = {
+ 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
+};
+
+static enum lp8788_ldo_id lp8788_dldo_id[] = {
+ DLDO1,
+ DLDO2,
+ DLDO3,
+ DLDO4,
+ DLDO5,
+ DLDO6,
+ DLDO7,
+ DLDO8,
+ DLDO9,
+ DLDO10,
+ DLDO11,
+ DLDO12,
+};
+
+static enum lp8788_ldo_id lp8788_aldo_id[] = {
+ ALDO1,
+ ALDO2,
+ ALDO3,
+ ALDO4,
+ ALDO5,
+ ALDO6,
+ ALDO7,
+ ALDO8,
+ ALDO9,
+ ALDO10,
+};
+
+/* DLDO 7, 9 and 11, ALDO 1 ~ 5 and 7
+ : can be enabled either by external pin or by i2c register */
+static enum lp8788_enable_mode
+lp8788_get_ldo_enable_mode(struct lp8788_ldo *ldo, enum lp8788_ldo_id id)
+{
+ int ret;
+ u8 val, mask;
+
+ ret = lp8788_read_byte(ldo->lp, LP8788_EN_SEL, &val);
+ if (ret)
+ return ret;
+
+ switch (id) {
+ case DLDO7:
+ mask = LP8788_EN_SEL_DLDO7_M;
+ break;
+ case DLDO9:
+ case DLDO11:
+ mask = LP8788_EN_SEL_DLDO911_M;
+ break;
+ case ALDO1:
+ mask = LP8788_EN_SEL_ALDO1_M;
+ break;
+ case ALDO2 ... ALDO4:
+ mask = LP8788_EN_SEL_ALDO234_M;
+ break;
+ case ALDO5:
+ mask = LP8788_EN_SEL_ALDO5_M;
+ break;
+ case ALDO7:
+ mask = LP8788_EN_SEL_ALDO7_M;
+ break;
+ default:
+ return REGISTER;
+ }
+
+ return val & mask ? EXTPIN : REGISTER;
+}
+
+static int lp8788_ldo_ctrl_by_extern_pin(struct lp8788_ldo *ldo, int pinstate)
+{
+ struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
+
+ if (!pin)
+ return -EINVAL;
+
+ if (gpio_is_valid(pin->gpio))
+ gpio_set_value(pin->gpio, pinstate);
+
+ return 0;
+}
+
+static int lp8788_ldo_is_enabled_by_extern_pin(struct lp8788_ldo *ldo)
+{
+ struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
+
+ if (!pin)
+ return -EINVAL;
+
+ return gpio_get_value(pin->gpio) ? 1 : 0;
+}
+
+static int lp8788_ldo_enable(struct regulator_dev *rdev)
+{
+ struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+ enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
+
+ switch (mode) {
+ case EXTPIN:
+ return lp8788_ldo_ctrl_by_extern_pin(ldo, ENABLE);
+ case REGISTER:
+ return regulator_enable_regmap(rdev);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lp8788_ldo_disable(struct regulator_dev *rdev)
+{
+ struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+ enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
+
+ switch (mode) {
+ case EXTPIN:
+ return lp8788_ldo_ctrl_by_extern_pin(ldo, DISABLE);
+ case REGISTER:
+ return regulator_disable_regmap(rdev);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lp8788_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+ enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
+
+ switch (mode) {
+ case EXTPIN:
+ return lp8788_ldo_is_enabled_by_extern_pin(ldo);
+ case REGISTER:
+ return regulator_is_enabled_regmap(rdev);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lp8788_ldo_enable_time(struct regulator_dev *rdev)
+{
+ struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+ u8 val, addr = LP8788_DLDO1_TIMESTEP + id;
+
+ if (lp8788_read_byte(ldo->lp, addr, &val))
+ return -EINVAL;
+
+ val = (val & LP8788_STARTUP_TIME_M) >> LP8788_STARTUP_TIME_S;
+
+ return ENABLE_TIME_USEC * val;
+}
+
+static int lp8788_ldo_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+
+ switch (id) {
+ case ALDO2 ... ALDO5:
+ return 2850000;
+ case DLDO12:
+ case ALDO8 ... ALDO9:
+ return 2500000;
+ case ALDO10:
+ return 1100000;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct regulator_ops lp8788_ldo_voltage_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = lp8788_ldo_enable,
+ .disable = lp8788_ldo_disable,
+ .is_enabled = lp8788_ldo_is_enabled,
+ .enable_time = lp8788_ldo_enable_time,
+};
+
+static struct regulator_ops lp8788_ldo_voltage_fixed_ops = {
+ .get_voltage = lp8788_ldo_fixed_get_voltage,
+ .enable = lp8788_ldo_enable,
+ .disable = lp8788_ldo_disable,
+ .is_enabled = lp8788_ldo_is_enabled,
+ .enable_time = lp8788_ldo_enable_time,
+};
+
+static struct regulator_desc lp8788_dldo_desc[] = {
+ {
+ .name = "dldo1",
+ .id = DLDO1,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
+ .volt_table = lp8788_dldo1239_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO1_VOUT,
+ .vsel_mask = LP8788_VOUT_5BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO1_M,
+ },
+ {
+ .name = "dldo2",
+ .id = DLDO2,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
+ .volt_table = lp8788_dldo1239_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO2_VOUT,
+ .vsel_mask = LP8788_VOUT_5BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO2_M,
+ },
+ {
+ .name = "dldo3",
+ .id = DLDO3,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
+ .volt_table = lp8788_dldo1239_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO3_VOUT,
+ .vsel_mask = LP8788_VOUT_5BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO3_M,
+ },
+ {
+ .name = "dldo4",
+ .id = DLDO4,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo4_vtbl),
+ .volt_table = lp8788_dldo4_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO4_VOUT,
+ .vsel_mask = LP8788_VOUT_1BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO4_M,
+ },
+ {
+ .name = "dldo5",
+ .id = DLDO5,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
+ .volt_table = lp8788_dldo578_aldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO5_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO5_M,
+ },
+ {
+ .name = "dldo6",
+ .id = DLDO6,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo6_vtbl),
+ .volt_table = lp8788_dldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO6_VOUT,
+ .vsel_mask = LP8788_VOUT_3BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO6_M,
+ },
+ {
+ .name = "dldo7",
+ .id = DLDO7,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
+ .volt_table = lp8788_dldo578_aldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO7_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO7_M,
+ },
+ {
+ .name = "dldo8",
+ .id = DLDO8,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
+ .volt_table = lp8788_dldo578_aldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO8_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO8_M,
+ },
+ {
+ .name = "dldo9",
+ .id = DLDO9,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
+ .volt_table = lp8788_dldo1239_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO9_VOUT,
+ .vsel_mask = LP8788_VOUT_5BIT_M,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_DLDO9_M,
+ },
+ {
+ .name = "dldo10",
+ .id = DLDO10,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1011_vtbl),
+ .volt_table = lp8788_dldo1011_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO10_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_DLDO10_M,
+ },
+ {
+ .name = "dldo11",
+ .id = DLDO11,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1011_vtbl),
+ .volt_table = lp8788_dldo1011_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO11_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_DLDO11_M,
+ },
+ {
+ .name = "dldo12",
+ .id = DLDO12,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_DLDO12_M,
+ },
+};
+
+static struct regulator_desc lp8788_aldo_desc[] = {
+ {
+ .name = "aldo1",
+ .id = ALDO1,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_aldo1_vtbl),
+ .volt_table = lp8788_aldo1_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_ALDO1_VOUT,
+ .vsel_mask = LP8788_VOUT_1BIT_M,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_ALDO1_M,
+ },
+ {
+ .name = "aldo2",
+ .id = ALDO2,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_ALDO2_M,
+ },
+ {
+ .name = "aldo3",
+ .id = ALDO3,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_ALDO3_M,
+ },
+ {
+ .name = "aldo4",
+ .id = ALDO4,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_ALDO4_M,
+ },
+ {
+ .name = "aldo5",
+ .id = ALDO5,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO5_M,
+ },
+ {
+ .name = "aldo6",
+ .id = ALDO6,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
+ .volt_table = lp8788_dldo578_aldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_ALDO6_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO6_M,
+ },
+ {
+ .name = "aldo7",
+ .id = ALDO7,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_aldo7_vtbl),
+ .volt_table = lp8788_aldo7_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_ALDO7_VOUT,
+ .vsel_mask = LP8788_VOUT_3BIT_M,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO7_M,
+ },
+ {
+ .name = "aldo8",
+ .id = ALDO8,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO8_M,
+ },
+ {
+ .name = "aldo9",
+ .id = ALDO9,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO9_M,
+ },
+ {
+ .name = "aldo10",
+ .id = ALDO10,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO10_M,
+ },
+};
+
+static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo,
+ enum lp8788_ext_ldo_en_id id)
+{
+ struct device *dev = ldo->lp->dev;
+ struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
+ int ret, gpio, pinstate;
+ char *name[] = {
+ [EN_ALDO1] = "LP8788_EN_ALDO1",
+ [EN_ALDO234] = "LP8788_EN_ALDO234",
+ [EN_ALDO5] = "LP8788_EN_ALDO5",
+ [EN_ALDO7] = "LP8788_EN_ALDO7",
+ [EN_DLDO7] = "LP8788_EN_DLDO7",
+ [EN_DLDO911] = "LP8788_EN_DLDO911",
+ };
+
+ gpio = pin->gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "invalid gpio: %d\n", gpio);
+ return -EINVAL;
+ }
+
+ pinstate = pin->init_state;
+ ret = devm_gpio_request_one(dev, gpio, pinstate, name[id]);
+ if (ret == -EBUSY) {
+ dev_warn(dev, "gpio%d already used\n", gpio);
+ return 0;
+ }
+
+ return ret;
+}
+
+static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
+ enum lp8788_ldo_id id)
+{
+ int ret;
+ struct lp8788 *lp = ldo->lp;
+ struct lp8788_platform_data *pdata = lp->pdata;
+ enum lp8788_ext_ldo_en_id enable_id;
+ u8 en_mask[] = {
+ [EN_ALDO1] = LP8788_EN_SEL_ALDO1_M,
+ [EN_ALDO234] = LP8788_EN_SEL_ALDO234_M,
+ [EN_ALDO5] = LP8788_EN_SEL_ALDO5_M,
+ [EN_ALDO7] = LP8788_EN_SEL_ALDO7_M,
+ [EN_DLDO7] = LP8788_EN_SEL_DLDO7_M,
+ [EN_DLDO911] = LP8788_EN_SEL_DLDO911_M,
+ };
+ u8 val[] = {
+ [EN_ALDO1] = 0 << 5,
+ [EN_ALDO234] = 0 << 4,
+ [EN_ALDO5] = 0 << 3,
+ [EN_ALDO7] = 0 << 2,
+ [EN_DLDO7] = 0 << 1,
+ [EN_DLDO911] = 0 << 0,
+ };
+
+ switch (id) {
+ case DLDO7:
+ enable_id = EN_DLDO7;
+ break;
+ case DLDO9:
+ case DLDO11:
+ enable_id = EN_DLDO911;
+ break;
+ case ALDO1:
+ enable_id = EN_ALDO1;
+ break;
+ case ALDO2 ... ALDO4:
+ enable_id = EN_ALDO234;
+ break;
+ case ALDO5:
+ enable_id = EN_ALDO5;
+ break;
+ case ALDO7:
+ enable_id = EN_ALDO7;
+ break;
+ default:
+ return 0;
+ }
+
+ /* if no platform data for ldo pin, then set default enable mode */
+ if (!pdata || !pdata->ldo_pin || !pdata->ldo_pin[enable_id])
+ goto set_default_ldo_enable_mode;
+
+ ldo->en_pin = pdata->ldo_pin[enable_id];
+
+ ret = lp8788_gpio_request_ldo_en(ldo, enable_id);
+ if (ret)
+ goto set_default_ldo_enable_mode;
+
+ return ret;
+
+set_default_ldo_enable_mode:
+ return lp8788_update_bits(lp, LP8788_EN_SEL, en_mask[enable_id],
+ val[enable_id]);
+}
+
+static __devinit int lp8788_dldo_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ int id = pdev->id;
+ struct lp8788_ldo *ldo;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ int ret;
+
+ ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
+ if (!ldo)
+ return -ENOMEM;
+
+ ldo->lp = lp;
+ ret = lp8788_config_ldo_enable_mode(ldo, lp8788_dldo_id[id]);
+ if (ret)
+ return ret;
+
+ cfg.dev = lp->dev;
+ cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL;
+ cfg.driver_data = ldo;
+ cfg.regmap = lp->regmap;
+
+ rdev = regulator_register(&lp8788_dldo_desc[id], &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(lp->dev, "DLDO%d regulator register err = %d\n",
+ id + 1, ret);
+ return ret;
+ }
+
+ ldo->regulator = rdev;
+ platform_set_drvdata(pdev, ldo);
+
+ return 0;
+}
+
+static int __devexit lp8788_dldo_remove(struct platform_device *pdev)
+{
+ struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ regulator_unregister(ldo->regulator);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_dldo_driver = {
+ .probe = lp8788_dldo_probe,
+ .remove = __devexit_p(lp8788_dldo_remove),
+ .driver = {
+ .name = LP8788_DEV_DLDO,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __devinit int lp8788_aldo_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ int id = pdev->id;
+ struct lp8788_ldo *ldo;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ int ret;
+
+ ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
+ if (!ldo)
+ return -ENOMEM;
+
+ ldo->lp = lp;
+ ret = lp8788_config_ldo_enable_mode(ldo, lp8788_aldo_id[id]);
+ if (ret)
+ return ret;
+
+ cfg.dev = lp->dev;
+ cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL;
+ cfg.driver_data = ldo;
+ cfg.regmap = lp->regmap;
+
+ rdev = regulator_register(&lp8788_aldo_desc[id], &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(lp->dev, "ALDO%d regulator register err = %d\n",
+ id + 1, ret);
+ return ret;
+ }
+
+ ldo->regulator = rdev;
+ platform_set_drvdata(pdev, ldo);
+
+ return 0;
+}
+
+static int __devexit lp8788_aldo_remove(struct platform_device *pdev)
+{
+ struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ regulator_unregister(ldo->regulator);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_aldo_driver = {
+ .probe = lp8788_aldo_probe,
+ .remove = __devexit_p(lp8788_aldo_remove),
+ .driver = {
+ .name = LP8788_DEV_ALDO,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init lp8788_ldo_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&lp8788_dldo_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&lp8788_aldo_driver);
+}
+subsys_initcall(lp8788_ldo_init);
+
+static void __exit lp8788_ldo_exit(void)
+{
+ platform_driver_unregister(&lp8788_aldo_driver);
+ platform_driver_unregister(&lp8788_dldo_driver);
+}
+module_exit(lp8788_ldo_exit);
+
+MODULE_DESCRIPTION("TI LP8788 LDO Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-dldo");
+MODULE_ALIAS("platform:lp8788-aldo");
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index b9444ee..f67af3c 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -48,6 +48,14 @@ struct max1586_data {
};
/*
+ * V6 voltage
+ * On I2C bus, sending a "x" byte to the max1586 means :
+ * set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3)
+ * As regulator framework doesn't accept voltages to be 0V, we use 1uV.
+ */
+static int v6_voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
+
+/*
* V3 voltage
* On I2C bus, sending a "x" byte to the max1586 means :
* set V3 to 0.700V + (x & 0x1f) * 0.025V
@@ -55,113 +63,49 @@ struct max1586_data {
* R24 and R25=100kOhm as described in the data sheet.
* The gain is approximately: 1 + R24/R25 + R24/185.5kOhm
*/
-static int max1586_v3_calc_voltage(struct max1586_data *max1586,
- unsigned selector)
-{
- unsigned range_uV = max1586->max_uV - max1586->min_uV;
-
- return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL);
-}
-
-static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV,
- unsigned *selector)
+static int max1586_v3_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct max1586_data *max1586 = rdev_get_drvdata(rdev);
struct i2c_client *client = max1586->client;
- unsigned range_uV = max1586->max_uV - max1586->min_uV;
u8 v3_prog;
- if (min_uV > max1586->max_uV || max_uV < max1586->min_uV)
- return -EINVAL;
- if (min_uV < max1586->min_uV)
- min_uV = max1586->min_uV;
-
- *selector = DIV_ROUND_UP((min_uV - max1586->min_uV) *
- MAX1586_V3_MAX_VSEL, range_uV);
- if (max1586_v3_calc_voltage(max1586, *selector) > max_uV)
- return -EINVAL;
-
dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
- max1586_v3_calc_voltage(max1586, *selector) / 1000);
+ regulator_list_voltage_linear(rdev, selector) / 1000);
- v3_prog = I2C_V3_SELECT | (u8) *selector;
+ v3_prog = I2C_V3_SELECT | (u8) selector;
return i2c_smbus_write_byte(client, v3_prog);
}
-static int max1586_v3_list(struct regulator_dev *rdev, unsigned selector)
-{
- struct max1586_data *max1586 = rdev_get_drvdata(rdev);
-
- if (selector > MAX1586_V3_MAX_VSEL)
- return -EINVAL;
- return max1586_v3_calc_voltage(max1586, selector);
-}
-
-/*
- * V6 voltage
- * On I2C bus, sending a "x" byte to the max1586 means :
- * set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3)
- * As regulator framework doesn't accept voltages to be 0V, we use 1uV.
- */
-static int max1586_v6_calc_voltage(unsigned selector)
-{
- static int voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
-
- return voltages_uv[selector];
-}
-
-static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV,
- unsigned int *selector)
+static int max1586_v6_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
{
struct i2c_client *client = rdev_get_drvdata(rdev);
u8 v6_prog;
- if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV)
- return -EINVAL;
- if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
- return -EINVAL;
-
- if (min_uV < 1800000)
- *selector = 0;
- else if (min_uV < 2500000)
- *selector = 1;
- else if (min_uV < 3000000)
- *selector = 2;
- else if (min_uV >= 3000000)
- *selector = 3;
-
- if (max1586_v6_calc_voltage(*selector) > max_uV)
- return -EINVAL;
-
dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
- max1586_v6_calc_voltage(*selector) / 1000);
+ rdev->desc->volt_table[selector] / 1000);
- v6_prog = I2C_V6_SELECT | (u8) *selector;
+ v6_prog = I2C_V6_SELECT | (u8) selector;
return i2c_smbus_write_byte(client, v6_prog);
}
-static int max1586_v6_list(struct regulator_dev *rdev, unsigned selector)
-{
- if (selector > MAX1586_V6_MAX_VSEL)
- return -EINVAL;
- return max1586_v6_calc_voltage(selector);
-}
-
/*
* The Maxim 1586 controls V3 and V6 voltages, but offers no way of reading back
* the set up value.
*/
static struct regulator_ops max1586_v3_ops = {
- .set_voltage = max1586_v3_set,
- .list_voltage = max1586_v3_list,
+ .set_voltage_sel = max1586_v3_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
};
static struct regulator_ops max1586_v6_ops = {
- .set_voltage = max1586_v6_set,
- .list_voltage = max1586_v6_list,
+ .set_voltage_sel = max1586_v6_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_table,
};
-static const struct regulator_desc max1586_reg[] = {
+static struct regulator_desc max1586_reg[] = {
{
.name = "Output_V3",
.id = MAX1586_V3,
@@ -176,6 +120,7 @@ static const struct regulator_desc max1586_reg[] = {
.ops = &max1586_v6_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = MAX1586_V6_MAX_VSEL + 1,
+ .volt_table = v6_voltages_uv,
.owner = THIS_MODULE,
},
};
@@ -213,6 +158,13 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client,
goto err;
}
+ if (id == MAX1586_V3) {
+ max1586_reg[id].min_uV = max1586->min_uV;
+ max1586_reg[id].uV_step =
+ (max1586->max_uV - max1586->min_uV) /
+ MAX1586_V3_MAX_VSEL;
+ }
+
config.dev = &client->dev;
config.init_data = pdata->subdevs[i].platform_data;
config.driver_data = max1586;
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
new file mode 100644
index 0000000..c564af6
--- /dev/null
+++ b/drivers/regulator/max77686.c
@@ -0,0 +1,389 @@
+/*
+ * max77686.c - Regulator driver for the Maxim 77686
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * Chiwoong Byun <woong.byun@smasung.com>
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/max77686.h>
+#include <linux/mfd/max77686-private.h>
+
+#define MAX77686_LDO_MINUV 800000
+#define MAX77686_LDO_UVSTEP 50000
+#define MAX77686_LDO_LOW_MINUV 800000
+#define MAX77686_LDO_LOW_UVSTEP 25000
+#define MAX77686_BUCK_MINUV 750000
+#define MAX77686_BUCK_UVSTEP 50000
+#define MAX77686_RAMP_DELAY 100000 /* uV/us */
+#define MAX77686_DVS_RAMP_DELAY 27500 /* uV/us */
+#define MAX77686_DVS_MINUV 600000
+#define MAX77686_DVS_UVSTEP 12500
+
+#define MAX77686_OPMODE_SHIFT 6
+#define MAX77686_OPMODE_BUCK234_SHIFT 4
+#define MAX77686_OPMODE_MASK 0x3
+
+#define MAX77686_VSEL_MASK 0x3F
+#define MAX77686_DVS_VSEL_MASK 0xFF
+
+#define MAX77686_RAMP_RATE_MASK 0xC0
+
+#define MAX77686_REGULATORS MAX77686_REG_MAX
+#define MAX77686_LDOS 26
+
+enum max77686_ramp_rate {
+ RAMP_RATE_13P75MV,
+ RAMP_RATE_27P5MV,
+ RAMP_RATE_55MV,
+ RAMP_RATE_NO_CTRL, /* 100mV/us */
+};
+
+struct max77686_data {
+ struct regulator_dev **rdev;
+};
+
+static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_value = RAMP_RATE_NO_CTRL;
+
+ switch (ramp_delay) {
+ case 1 ... 13750:
+ ramp_value = RAMP_RATE_13P75MV;
+ break;
+ case 13751 ... 27500:
+ ramp_value = RAMP_RATE_27P5MV;
+ break;
+ case 27501 ... 55000:
+ ramp_value = RAMP_RATE_55MV;
+ break;
+ case 55001 ... 100000:
+ break;
+ default:
+ pr_warn("%s: ramp_delay: %d not supported, setting 100000\n",
+ rdev->desc->name, ramp_delay);
+ }
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ MAX77686_RAMP_RATE_MASK, ramp_value << 6);
+}
+
+static struct regulator_ops max77686_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops max77686_buck_dvs_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = max77686_set_ramp_delay,
+};
+
+#define regulator_desc_ldo(num) { \
+ .name = "LDO"#num, \
+ .id = MAX77686_LDO##num, \
+ .ops = &max77686_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_LDO_MINUV, \
+ .uV_step = MAX77686_LDO_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .enable_mask = MAX77686_OPMODE_MASK \
+ << MAX77686_OPMODE_SHIFT, \
+}
+#define regulator_desc_ldo_low(num) { \
+ .name = "LDO"#num, \
+ .id = MAX77686_LDO##num, \
+ .ops = &max77686_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_LDO_LOW_MINUV, \
+ .uV_step = MAX77686_LDO_LOW_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .enable_mask = MAX77686_OPMODE_MASK \
+ << MAX77686_OPMODE_SHIFT, \
+}
+#define regulator_desc_buck(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77686_BUCK##num, \
+ .ops = &max77686_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_BUCK_MINUV, \
+ .uV_step = MAX77686_BUCK_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_BUCK5OUT + (num - 5) * 2, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_BUCK5CTRL + (num - 5) * 2, \
+ .enable_mask = MAX77686_OPMODE_MASK, \
+}
+#define regulator_desc_buck1(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77686_BUCK##num, \
+ .ops = &max77686_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_BUCK_MINUV, \
+ .uV_step = MAX77686_BUCK_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_BUCK1OUT, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_BUCK1CTRL, \
+ .enable_mask = MAX77686_OPMODE_MASK, \
+}
+#define regulator_desc_buck_dvs(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77686_BUCK##num, \
+ .ops = &max77686_buck_dvs_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_DVS_MINUV, \
+ .uV_step = MAX77686_DVS_UVSTEP, \
+ .ramp_delay = MAX77686_DVS_RAMP_DELAY, \
+ .n_voltages = MAX77686_DVS_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_BUCK2DVS1 + (num - 2) * 10, \
+ .vsel_mask = MAX77686_DVS_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_BUCK2CTRL1 + (num - 2) * 10, \
+ .enable_mask = MAX77686_OPMODE_MASK \
+ << MAX77686_OPMODE_BUCK234_SHIFT, \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_desc_ldo_low(1),
+ regulator_desc_ldo_low(2),
+ regulator_desc_ldo(3),
+ regulator_desc_ldo(4),
+ regulator_desc_ldo(5),
+ regulator_desc_ldo_low(6),
+ regulator_desc_ldo_low(7),
+ regulator_desc_ldo_low(8),
+ regulator_desc_ldo(9),
+ regulator_desc_ldo(10),
+ regulator_desc_ldo(11),
+ regulator_desc_ldo(12),
+ regulator_desc_ldo(13),
+ regulator_desc_ldo(14),
+ regulator_desc_ldo_low(15),
+ regulator_desc_ldo(16),
+ regulator_desc_ldo(17),
+ regulator_desc_ldo(18),
+ regulator_desc_ldo(19),
+ regulator_desc_ldo(20),
+ regulator_desc_ldo(21),
+ regulator_desc_ldo(22),
+ regulator_desc_ldo(23),
+ regulator_desc_ldo(24),
+ regulator_desc_ldo(25),
+ regulator_desc_ldo(26),
+ regulator_desc_buck1(1),
+ regulator_desc_buck_dvs(2),
+ regulator_desc_buck_dvs(3),
+ regulator_desc_buck_dvs(4),
+ regulator_desc_buck(5),
+ regulator_desc_buck(6),
+ regulator_desc_buck(7),
+ regulator_desc_buck(8),
+ regulator_desc_buck(9),
+};
+
+#ifdef CONFIG_OF
+static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+ struct max77686_platform_data *pdata)
+{
+ struct device_node *pmic_np, *regulators_np;
+ struct max77686_regulator_data *rdata;
+ struct of_regulator_match rmatch;
+ unsigned int i;
+
+ pmic_np = iodev->dev->of_node;
+ regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
+ if (!regulators_np) {
+ dev_err(iodev->dev, "could not find regulators sub-node\n");
+ return -EINVAL;
+ }
+
+ pdata->num_regulators = ARRAY_SIZE(regulators);
+ rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+ pdata->num_regulators, GFP_KERNEL);
+ if (!rdata) {
+ dev_err(iodev->dev,
+ "could not allocate memory for regulator data\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pdata->num_regulators; i++) {
+ rmatch.name = regulators[i].name;
+ rmatch.init_data = NULL;
+ rmatch.of_node = NULL;
+ of_regulator_match(iodev->dev, regulators_np, &rmatch, 1);
+ rdata[i].initdata = rmatch.init_data;
+ }
+
+ pdata->regulators = rdata;
+
+ return 0;
+}
+#else
+static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+ struct max77686_platform_data *pdata)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static __devinit int max77686_pmic_probe(struct platform_device *pdev)
+{
+ struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct regulator_dev **rdev;
+ struct max77686_data *max77686;
+ int i, size;
+ int ret = 0;
+ struct regulator_config config = { };
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data found for regulator\n");
+ return -ENODEV;
+ }
+
+ if (iodev->dev->of_node) {
+ ret = max77686_pmic_dt_parse_pdata(iodev, pdata);
+ if (ret)
+ return ret;
+ }
+
+ if (pdata->num_regulators != MAX77686_REGULATORS) {
+ dev_err(&pdev->dev,
+ "Invalid initial data for regulator's initialiation\n");
+ return -EINVAL;
+ }
+
+ max77686 = devm_kzalloc(&pdev->dev, sizeof(struct max77686_data),
+ GFP_KERNEL);
+ if (!max77686)
+ return -ENOMEM;
+
+ size = sizeof(struct regulator_dev *) * MAX77686_REGULATORS;
+ max77686->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!max77686->rdev)
+ return -ENOMEM;
+
+ rdev = max77686->rdev;
+ config.dev = &pdev->dev;
+ config.regmap = iodev->regmap;
+ platform_set_drvdata(pdev, max77686);
+
+ for (i = 0; i < MAX77686_REGULATORS; i++) {
+ config.init_data = pdata->regulators[i].initdata;
+
+ rdev[i] = regulator_register(&regulators[i], &config);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(&pdev->dev,
+ "regulator init failed for %d\n", i);
+ rdev[i] = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ while (--i >= 0)
+ regulator_unregister(rdev[i]);
+ return ret;
+}
+
+static int __devexit max77686_pmic_remove(struct platform_device *pdev)
+{
+ struct max77686_data *max77686 = platform_get_drvdata(pdev);
+ struct regulator_dev **rdev = max77686->rdev;
+ int i;
+
+ for (i = 0; i < MAX77686_REGULATORS; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ return 0;
+}
+
+static const struct platform_device_id max77686_pmic_id[] = {
+ {"max77686-pmic", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, max77686_pmic_id);
+
+static struct platform_driver max77686_pmic_driver = {
+ .driver = {
+ .name = "max77686-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max77686_pmic_probe,
+ .remove = __devexit_p(max77686_pmic_remove),
+ .id_table = max77686_pmic_id,
+};
+
+static int __init max77686_pmic_init(void)
+{
+ return platform_driver_register(&max77686_pmic_driver);
+}
+subsys_initcall(max77686_pmic_init);
+
+static void __exit max77686_pmic_cleanup(void)
+{
+ platform_driver_unregister(&max77686_pmic_driver);
+}
+module_exit(max77686_pmic_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 77686 Regulator Driver");
+MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1f4bb80..9d540cd 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
config.dev = &client->dev;
config.init_data = pdata->regulator;
config.driver_data = info;
+ config.regmap = info->regmap;
info->regulator = regulator_register(&dcdc_desc, &config);
if (IS_ERR(info->regulator)) {
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 910c9b2..355ca7b 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -51,7 +51,6 @@ struct max8952_data {
bool vid0;
bool vid1;
- bool en;
};
static int max8952_read_reg(struct max8952_data *max8952, u8 reg)
@@ -80,38 +79,6 @@ static int max8952_list_voltage(struct regulator_dev *rdev,
return (max8952->pdata->dvs_mode[selector] * 10 + 770) * 1000;
}
-static int max8952_is_enabled(struct regulator_dev *rdev)
-{
- struct max8952_data *max8952 = rdev_get_drvdata(rdev);
- return max8952->en;
-}
-
-static int max8952_enable(struct regulator_dev *rdev)
-{
- struct max8952_data *max8952 = rdev_get_drvdata(rdev);
-
- /* If not valid, assume "ALWAYS_HIGH" */
- if (gpio_is_valid(max8952->pdata->gpio_en))
- gpio_set_value(max8952->pdata->gpio_en, 1);
-
- max8952->en = true;
- return 0;
-}
-
-static int max8952_disable(struct regulator_dev *rdev)
-{
- struct max8952_data *max8952 = rdev_get_drvdata(rdev);
-
- /* If not valid, assume "ALWAYS_HIGH" -> not permitted */
- if (gpio_is_valid(max8952->pdata->gpio_en))
- gpio_set_value(max8952->pdata->gpio_en, 0);
- else
- return -EPERM;
-
- max8952->en = false;
- return 0;
-}
-
static int max8952_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8952_data *max8952 = rdev_get_drvdata(rdev);
@@ -146,12 +113,8 @@ static int max8952_set_voltage_sel(struct regulator_dev *rdev,
static struct regulator_ops max8952_ops = {
.list_voltage = max8952_list_voltage,
- .is_enabled = max8952_is_enabled,
- .enable = max8952_enable,
- .disable = max8952_disable,
.get_voltage_sel = max8952_get_voltage_sel,
.set_voltage_sel = max8952_set_voltage_sel,
- .set_suspend_disable = max8952_disable,
};
static const struct regulator_desc regulator = {
@@ -194,6 +157,10 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
config.init_data = &pdata->reg_data;
config.driver_data = max8952;
+ config.ena_gpio = pdata->gpio_en;
+ if (pdata->reg_data.constraints.boot_on)
+ config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+
max8952->rdev = regulator_register(&regulator, &config);
if (IS_ERR(max8952->rdev)) {
@@ -202,27 +169,9 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
return ret;
}
- max8952->en = !!(pdata->reg_data.constraints.boot_on);
max8952->vid0 = pdata->default_mode & 0x1;
max8952->vid1 = (pdata->default_mode >> 1) & 0x1;
- if (gpio_is_valid(pdata->gpio_en)) {
- if (!gpio_request(pdata->gpio_en, "MAX8952 EN"))
- gpio_direction_output(pdata->gpio_en, max8952->en);
- else
- err = 1;
- } else
- err = 2;
-
- if (err) {
- dev_info(max8952->dev, "EN gpio invalid: assume that EN"
- "is always High\n");
- max8952->en = 1;
- pdata->gpio_en = -1; /* Mark invalid */
- }
-
- err = 0;
-
if (gpio_is_valid(pdata->gpio_vid0) &&
gpio_is_valid(pdata->gpio_vid1)) {
if (!gpio_request(pdata->gpio_vid0, "MAX8952 VID0"))
@@ -308,7 +257,6 @@ static int __devexit max8952_pmic_remove(struct i2c_client *client)
gpio_free(pdata->gpio_vid0);
gpio_free(pdata->gpio_vid1);
- gpio_free(pdata->gpio_en);
return 0;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 704cd49..e39a0c7 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -1025,7 +1025,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
*/
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
pdata->buck5_gpiodvs) {
- bool gpio1set = false, gpio2set = false;
if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
!gpio_is_valid(pdata->buck125_gpios[1]) ||
@@ -1035,40 +1034,20 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
goto err_out;
}
- ret = gpio_request(pdata->buck125_gpios[0],
- "MAX8997 SET1");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request"
- " on SET1\n");
- else if (ret)
+ ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0],
+ "MAX8997 SET1");
+ if (ret)
goto err_out;
- else
- gpio1set = true;
-
- ret = gpio_request(pdata->buck125_gpios[1],
- "MAX8997 SET2");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request"
- " on SET2\n");
- else if (ret) {
- if (gpio1set)
- gpio_free(pdata->buck125_gpios[0]);
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1],
+ "MAX8997 SET2");
+ if (ret)
goto err_out;
- } else
- gpio2set = true;
- ret = gpio_request(pdata->buck125_gpios[2],
+ ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2],
"MAX8997 SET3");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request"
- " on SET3\n");
- else if (ret) {
- if (gpio1set)
- gpio_free(pdata->buck125_gpios[0]);
- if (gpio2set)
- gpio_free(pdata->buck125_gpios[1]);
+ if (ret)
goto err_out;
- }
gpio_direction_output(pdata->buck125_gpios[0],
(max8997->buck125_gpioindex >> 2)
@@ -1079,7 +1058,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
gpio_direction_output(pdata->buck125_gpios[2],
(max8997->buck125_gpioindex >> 0)
& 0x1); /* SET3 */
- ret = 0;
}
/* DVS-GPIO disabled */
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 18bb58b..5dfa920 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -111,27 +111,6 @@ static const struct voltage_map_desc *ldo_voltage_map[] = {
&buck4_voltage_map_desc, /* BUCK4 */
};
-static int max8998_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- const struct voltage_map_desc *desc;
- int ldo = rdev_get_id(rdev);
- int val;
-
- if (ldo >= ARRAY_SIZE(ldo_voltage_map))
- return -EINVAL;
-
- desc = ldo_voltage_map[ldo];
- if (desc == NULL)
- return -EINVAL;
-
- val = desc->min + desc->step * selector;
- if (val > desc->max)
- return -EINVAL;
-
- return val * 1000;
-}
-
static int max8998_get_enable_register(struct regulator_dev *rdev,
int *reg, int *shift)
{
@@ -297,41 +276,18 @@ static int max8998_get_voltage_sel(struct regulator_dev *rdev)
return val;
}
-static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int max8998_set_voltage_ldo_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8998->iodev->i2c;
- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
- const struct voltage_map_desc *desc;
- int ldo = rdev_get_id(rdev);
- int reg, shift = 0, mask, ret, i;
-
- if (ldo >= ARRAY_SIZE(ldo_voltage_map))
- return -EINVAL;
-
- desc = ldo_voltage_map[ldo];
- if (desc == NULL)
- return -EINVAL;
-
- if (max_vol < desc->min || min_vol > desc->max)
- return -EINVAL;
-
- if (min_vol < desc->min)
- min_vol = desc->min;
-
- i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
-
- if (desc->min + desc->step*i > max_vol)
- return -EINVAL;
-
- *selector = i;
+ int reg, shift = 0, mask, ret;
ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
if (ret)
return ret;
- ret = max8998_update_reg(i2c, reg, i<<shift, mask<<shift);
+ ret = max8998_update_reg(i2c, reg, selector<<shift, mask<<shift);
return ret;
}
@@ -347,41 +303,18 @@ static inline void buck2_gpio_set(int gpio, int v)
gpio_set_value(gpio, v & 0x1);
}
-static int max8998_set_voltage_buck(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct max8998_platform_data *pdata =
dev_get_platdata(max8998->iodev->dev);
struct i2c_client *i2c = max8998->iodev->i2c;
- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
- const struct voltage_map_desc *desc;
int buck = rdev_get_id(rdev);
int reg, shift = 0, mask, ret;
- int i, j, previous_sel;
+ int j, previous_sel;
static u8 buck1_last_val;
- if (buck >= ARRAY_SIZE(ldo_voltage_map))
- return -EINVAL;
-
- desc = ldo_voltage_map[buck];
-
- if (desc == NULL)
- return -EINVAL;
-
- if (max_vol < desc->min || min_vol > desc->max)
- return -EINVAL;
-
- if (min_vol < desc->min)
- min_vol = desc->min;
-
- i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
-
- if (desc->min + desc->step*i > max_vol)
- return -EINVAL;
-
- *selector = i;
-
ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
if (ret)
return ret;
@@ -390,19 +323,19 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
/* Check if voltage needs to be changed */
/* if previous_voltage equal new voltage, return */
- if (previous_sel == i) {
+ if (previous_sel == selector) {
dev_dbg(max8998->dev, "No voltage change, old:%d, new:%d\n",
- max8998_list_voltage(rdev, previous_sel),
- max8998_list_voltage(rdev, i));
+ regulator_list_voltage_linear(rdev, previous_sel),
+ regulator_list_voltage_linear(rdev, selector));
return ret;
}
switch (buck) {
case MAX8998_BUCK1:
dev_dbg(max8998->dev,
- "BUCK1, i:%d, buck1_vol1:%d, buck1_vol2:%d\n"
+ "BUCK1, selector:%d, buck1_vol1:%d, buck1_vol2:%d\n"
"buck1_vol3:%d, buck1_vol4:%d\n",
- i, max8998->buck1_vol[0], max8998->buck1_vol[1],
+ selector, max8998->buck1_vol[0], max8998->buck1_vol[1],
max8998->buck1_vol[2], max8998->buck1_vol[3]);
if (gpio_is_valid(pdata->buck1_set1) &&
@@ -411,7 +344,7 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
/* check if requested voltage */
/* value is already defined */
for (j = 0; j < ARRAY_SIZE(max8998->buck1_vol); j++) {
- if (max8998->buck1_vol[j] == i) {
+ if (max8998->buck1_vol[j] == selector) {
max8998->buck1_idx = j;
buck1_gpio_set(pdata->buck1_set1,
pdata->buck1_set2, j);
@@ -426,11 +359,11 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
max8998->buck1_idx = (buck1_last_val % 2) + 2;
dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n",
max8998->buck1_idx);
- max8998->buck1_vol[max8998->buck1_idx] = i;
+ max8998->buck1_vol[max8998->buck1_idx] = selector;
ret = max8998_get_voltage_register(rdev, &reg,
&shift,
&mask);
- ret = max8998_write_reg(i2c, reg, i);
+ ret = max8998_write_reg(i2c, reg, selector);
buck1_gpio_set(pdata->buck1_set1,
pdata->buck1_set2, max8998->buck1_idx);
buck1_last_val++;
@@ -440,20 +373,20 @@ buck1_exit:
gpio_get_value(pdata->buck1_set2));
break;
} else {
- ret = max8998_write_reg(i2c, reg, i);
+ ret = max8998_write_reg(i2c, reg, selector);
}
break;
case MAX8998_BUCK2:
dev_dbg(max8998->dev,
- "BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n"
- , i, max8998->buck2_vol[0], max8998->buck2_vol[1]);
+ "BUCK2, selector:%d buck2_vol1:%d, buck2_vol2:%d\n",
+ selector, max8998->buck2_vol[0], max8998->buck2_vol[1]);
if (gpio_is_valid(pdata->buck2_set3)) {
/* check if requested voltage */
/* value is already defined */
for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) {
- if (max8998->buck2_vol[j] == i) {
+ if (max8998->buck2_vol[j] == selector) {
max8998->buck2_idx = j;
buck2_gpio_set(pdata->buck2_set3, j);
goto buck2_exit;
@@ -465,20 +398,21 @@ buck1_exit:
max8998_get_voltage_register(rdev,
&reg, &shift, &mask);
- ret = max8998_write_reg(i2c, reg, i);
- max8998->buck2_vol[max8998->buck2_idx] = i;
+ ret = max8998_write_reg(i2c, reg, selector);
+ max8998->buck2_vol[max8998->buck2_idx] = selector;
buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx);
buck2_exit:
dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
gpio_get_value(pdata->buck2_set3));
} else {
- ret = max8998_write_reg(i2c, reg, i);
+ ret = max8998_write_reg(i2c, reg, selector);
}
break;
case MAX8998_BUCK3:
case MAX8998_BUCK4:
- ret = max8998_update_reg(i2c, reg, i<<shift, mask<<shift);
+ ret = max8998_update_reg(i2c, reg, selector<<shift,
+ mask<<shift);
break;
}
@@ -519,34 +453,30 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
}
static struct regulator_ops max8998_ldo_ops = {
- .list_voltage = max8998_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
.get_voltage_sel = max8998_get_voltage_sel,
- .set_voltage = max8998_set_voltage_ldo,
- .set_suspend_enable = max8998_ldo_enable,
- .set_suspend_disable = max8998_ldo_disable,
+ .set_voltage_sel = max8998_set_voltage_ldo_sel,
};
static struct regulator_ops max8998_buck_ops = {
- .list_voltage = max8998_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
.get_voltage_sel = max8998_get_voltage_sel,
- .set_voltage = max8998_set_voltage_buck,
+ .set_voltage_sel = max8998_set_voltage_buck_sel,
.set_voltage_time_sel = max8998_set_voltage_buck_time_sel,
- .set_suspend_enable = max8998_ldo_enable,
- .set_suspend_disable = max8998_ldo_disable,
};
static struct regulator_ops max8998_others_ops = {
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
- .set_suspend_enable = max8998_ldo_enable,
- .set_suspend_disable = max8998_ldo_disable,
};
static struct regulator_desc regulators[] = {
@@ -860,7 +790,10 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
desc = ldo_voltage_map[id];
if (desc && regulators[index].ops != &max8998_others_ops) {
int count = (desc->max - desc->min) / desc->step + 1;
+
regulators[index].n_voltages = count;
+ regulators[index].min_uV = desc->min * 1000;
+ regulators[index].uV_step = desc->step * 1000;
}
config.dev = max8998->dev;
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 7dcdfa2..4932e34 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -93,78 +93,78 @@
/* Voltage Values */
-static const int mc13783_sw3_val[] = {
+static const unsigned int mc13783_sw3_val[] = {
5000000, 5000000, 5000000, 5500000,
};
-static const int mc13783_vaudio_val[] = {
+static const unsigned int mc13783_vaudio_val[] = {
2775000,
};
-static const int mc13783_viohi_val[] = {
+static const unsigned int mc13783_viohi_val[] = {
2775000,
};
-static const int mc13783_violo_val[] = {
+static const unsigned int mc13783_violo_val[] = {
1200000, 1300000, 1500000, 1800000,
};
-static const int mc13783_vdig_val[] = {
+static const unsigned int mc13783_vdig_val[] = {
1200000, 1300000, 1500000, 1800000,
};
-static const int mc13783_vgen_val[] = {
+static const unsigned int mc13783_vgen_val[] = {
1200000, 1300000, 1500000, 1800000,
1100000, 2000000, 2775000, 2400000,
};
-static const int mc13783_vrfdig_val[] = {
+static const unsigned int mc13783_vrfdig_val[] = {
1200000, 1500000, 1800000, 1875000,
};
-static const int mc13783_vrfref_val[] = {
+static const unsigned int mc13783_vrfref_val[] = {
2475000, 2600000, 2700000, 2775000,
};
-static const int mc13783_vrfcp_val[] = {
+static const unsigned int mc13783_vrfcp_val[] = {
2700000, 2775000,
};
-static const int mc13783_vsim_val[] = {
+static const unsigned int mc13783_vsim_val[] = {
1800000, 2900000, 3000000,
};
-static const int mc13783_vesim_val[] = {
+static const unsigned int mc13783_vesim_val[] = {
1800000, 2900000,
};
-static const int mc13783_vcam_val[] = {
+static const unsigned int mc13783_vcam_val[] = {
1500000, 1800000, 2500000, 2550000,
2600000, 2750000, 2800000, 3000000,
};
-static const int mc13783_vrfbg_val[] = {
+static const unsigned int mc13783_vrfbg_val[] = {
1250000,
};
-static const int mc13783_vvib_val[] = {
+static const unsigned int mc13783_vvib_val[] = {
1300000, 1800000, 2000000, 3000000,
};
-static const int mc13783_vmmc_val[] = {
+static const unsigned int mc13783_vmmc_val[] = {
1600000, 1800000, 2000000, 2600000,
2700000, 2800000, 2900000, 3000000,
};
-static const int mc13783_vrf_val[] = {
+static const unsigned int mc13783_vrf_val[] = {
1500000, 1875000, 2700000, 2775000,
};
-static const int mc13783_gpo_val[] = {
+static const unsigned int mc13783_gpo_val[] = {
3100000,
};
-static const int mc13783_pwgtdrv_val[] = {
+static const unsigned int mc13783_pwgtdrv_val[] = {
5500000,
};
@@ -328,7 +328,7 @@ static struct regulator_ops mc13783_gpo_regulator_ops = {
.enable = mc13783_gpo_regulator_enable,
.disable = mc13783_gpo_regulator_disable,
.is_enabled = mc13783_gpo_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
.get_voltage = mc13xxx_fixed_regulator_get_voltage,
};
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 970a233..b388b74 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -150,12 +150,12 @@
#define MC13892_USB1 50
#define MC13892_USB1_VUSBEN (1<<3)
-static const int mc13892_vcoincell[] = {
+static const unsigned int mc13892_vcoincell[] = {
2500000, 2700000, 2800000, 2900000, 3000000, 3100000,
3200000, 3300000,
};
-static const int mc13892_sw1[] = {
+static const unsigned int mc13892_sw1[] = {
600000, 625000, 650000, 675000, 700000, 725000,
750000, 775000, 800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000, 1000000, 1025000,
@@ -164,7 +164,7 @@ static const int mc13892_sw1[] = {
1350000, 1375000
};
-static const int mc13892_sw[] = {
+static const unsigned int mc13892_sw[] = {
600000, 625000, 650000, 675000, 700000, 725000,
750000, 775000, 800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000, 1000000, 1025000,
@@ -176,65 +176,65 @@ static const int mc13892_sw[] = {
1800000, 1825000, 1850000, 1875000
};
-static const int mc13892_swbst[] = {
+static const unsigned int mc13892_swbst[] = {
5000000,
};
-static const int mc13892_viohi[] = {
+static const unsigned int mc13892_viohi[] = {
2775000,
};
-static const int mc13892_vpll[] = {
+static const unsigned int mc13892_vpll[] = {
1050000, 1250000, 1650000, 1800000,
};
-static const int mc13892_vdig[] = {
+static const unsigned int mc13892_vdig[] = {
1050000, 1250000, 1650000, 1800000,
};
-static const int mc13892_vsd[] = {
+static const unsigned int mc13892_vsd[] = {
1800000, 2000000, 2600000, 2700000,
2800000, 2900000, 3000000, 3150000,
};
-static const int mc13892_vusb2[] = {
+static const unsigned int mc13892_vusb2[] = {
2400000, 2600000, 2700000, 2775000,
};
-static const int mc13892_vvideo[] = {
+static const unsigned int mc13892_vvideo[] = {
2700000, 2775000, 2500000, 2600000,
};
-static const int mc13892_vaudio[] = {
+static const unsigned int mc13892_vaudio[] = {
2300000, 2500000, 2775000, 3000000,
};
-static const int mc13892_vcam[] = {
+static const unsigned int mc13892_vcam[] = {
2500000, 2600000, 2750000, 3000000,
};
-static const int mc13892_vgen1[] = {
+static const unsigned int mc13892_vgen1[] = {
1200000, 1500000, 2775000, 3150000,
};
-static const int mc13892_vgen2[] = {
+static const unsigned int mc13892_vgen2[] = {
1200000, 1500000, 1600000, 1800000,
2700000, 2800000, 3000000, 3150000,
};
-static const int mc13892_vgen3[] = {
+static const unsigned int mc13892_vgen3[] = {
1800000, 2900000,
};
-static const int mc13892_vusb[] = {
+static const unsigned int mc13892_vusb[] = {
3300000,
};
-static const int mc13892_gpo[] = {
+static const unsigned int mc13892_gpo[] = {
2750000,
};
-static const int mc13892_pwgtdrv[] = {
+static const unsigned int mc13892_pwgtdrv[] = {
5000000,
};
@@ -394,7 +394,7 @@ static struct regulator_ops mc13892_gpo_regulator_ops = {
.enable = mc13892_gpo_regulator_enable,
.disable = mc13892_gpo_regulator_disable,
.is_enabled = mc13892_gpo_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
.get_voltage = mc13xxx_fixed_regulator_get_voltage,
};
@@ -436,7 +436,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
u32 valread;
int ret;
- value = mc13892_regulators[id].voltages[selector];
+ value = rdev->desc->volt_table[selector];
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx,
@@ -469,8 +469,7 @@ err:
}
static struct regulator_ops mc13892_sw_regulator_ops = {
- .is_enabled = mc13xxx_sw_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage_sel = mc13892_sw_regulator_set_voltage_sel,
.get_voltage = mc13892_sw_regulator_get_voltage,
};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 4fa9704..d6eda28 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -80,20 +80,6 @@ static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev)
return (val & mc13xxx_regulators[id].enable_bit) != 0;
}
-int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- int id = rdev_get_id(rdev);
- struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
- struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
-
- if (selector >= mc13xxx_regulators[id].desc.n_voltages)
- return -EINVAL;
-
- return mc13xxx_regulators[id].voltages[selector];
-}
-EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage);
-
static int mc13xxx_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
@@ -135,14 +121,14 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
- return mc13xxx_regulators[id].voltages[val];
+ return rdev->desc->volt_table[val];
}
struct regulator_ops mc13xxx_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage_sel = mc13xxx_regulator_set_voltage_sel,
.get_voltage = mc13xxx_regulator_get_voltage,
};
@@ -151,15 +137,13 @@ EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
- struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
- struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
__func__, id, min_uV, max_uV);
- if (min_uV >= mc13xxx_regulators[id].voltages[0] &&
- max_uV <= mc13xxx_regulators[id].voltages[0])
+ if (min_uV <= rdev->desc->volt_table[0] &&
+ rdev->desc->volt_table[0] <= max_uV)
return 0;
else
return -EINVAL;
@@ -168,13 +152,11 @@ EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev)
{
- struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
- struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
- return mc13xxx_regulators[id].voltages[0];
+ return rdev->desc->volt_table[0];
}
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage);
@@ -182,18 +164,12 @@ struct regulator_ops mc13xxx_fixed_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
.get_voltage = mc13xxx_fixed_regulator_get_voltage,
};
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
-int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
-{
- return 1;
-}
-EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
-
#ifdef CONFIG_OF
int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
{
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 044aba4..eaff551 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -22,7 +22,6 @@ struct mc13xxx_regulator {
int vsel_shift;
int vsel_mask;
int hi_bit;
- int const *voltages;
};
struct mc13xxx_regulator_priv {
@@ -33,10 +32,6 @@ struct mc13xxx_regulator_priv {
struct regulator_dev *regulators[];
};
-extern int mc13xxx_sw_regulator(struct regulator_dev *rdev);
-extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev);
-extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned selector);
extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector);
extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
@@ -68,6 +63,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
+ .volt_table = _voltages, \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
.id = prefix ## _name, \
@@ -78,7 +74,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.vsel_reg = prefix ## _vsel_reg, \
.vsel_shift = prefix ## _vsel_reg ## _ ## _name ## VSEL,\
.vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\
- .voltages = _voltages, \
}
#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \
@@ -86,6 +81,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
+ .volt_table = _voltages, \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
.id = prefix ## _name, \
@@ -93,7 +89,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
}, \
.reg = prefix ## _reg, \
.enable_bit = prefix ## _reg ## _ ## _name ## EN, \
- .voltages = _voltages, \
}
#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \
@@ -101,6 +96,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
+ .volt_table = _voltages, \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
.id = prefix ## _name, \
@@ -108,7 +104,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
}, \
.reg = prefix ## _reg, \
.enable_bit = prefix ## _reg ## _ ## _name ## EN, \
- .voltages = _voltages, \
}
#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 56593b7..3e4106f 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -20,7 +20,7 @@ static void of_get_regulation_constraints(struct device_node *np,
struct regulator_init_data **init_data)
{
const __be32 *min_uV, *max_uV, *uV_offset;
- const __be32 *min_uA, *max_uA;
+ const __be32 *min_uA, *max_uA, *ramp_delay;
struct regulation_constraints *constraints = &(*init_data)->constraints;
constraints->name = of_get_property(np, "regulator-name", NULL);
@@ -60,6 +60,10 @@ static void of_get_regulation_constraints(struct device_node *np,
constraints->always_on = true;
else /* status change should be possible if not always on. */
constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+
+ ramp_delay = of_get_property(np, "regulator-ramp-delay", NULL);
+ if (ramp_delay)
+ constraints->ramp_delay = be32_to_cpu(*ramp_delay);
}
/**
@@ -88,15 +92,17 @@ struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
/**
- * of_regulator_match - extract regulator init data
+ * of_regulator_match - extract regulator init data when node
+ * property "regulator-compatible" matches with the regulator name.
* @dev: device requesting the data
* @node: parent device node of the regulators
* @matches: match table for the regulators
* @num_matches: number of entries in match table
*
* This function uses a match table specified by the regulator driver and
- * looks up the corresponding init data in the device tree. Note that the
- * match table is modified in place.
+ * looks up the corresponding init data in the device tree if
+ * regulator-compatible matches. Note that the match table is modified
+ * in place.
*
* Returns the number of matches found or a negative error code on failure.
*/
@@ -106,27 +112,40 @@ int of_regulator_match(struct device *dev, struct device_node *node,
{
unsigned int count = 0;
unsigned int i;
+ const char *regulator_comp;
+ struct device_node *child;
if (!dev || !node)
return -EINVAL;
- for (i = 0; i < num_matches; i++) {
- struct of_regulator_match *match = &matches[i];
- struct device_node *child;
-
- child = of_find_node_by_name(node, match->name);
- if (!child)
- continue;
-
- match->init_data = of_get_regulator_init_data(dev, child);
- if (!match->init_data) {
- dev_err(dev, "failed to parse DT for regulator %s\n",
+ for_each_child_of_node(node, child) {
+ regulator_comp = of_get_property(child,
+ "regulator-compatible", NULL);
+ if (!regulator_comp) {
+ dev_err(dev, "regulator-compatible is missing for node %s\n",
child->name);
- return -EINVAL;
+ continue;
+ }
+ for (i = 0; i < num_matches; i++) {
+ struct of_regulator_match *match = &matches[i];
+ if (match->of_node)
+ continue;
+
+ if (strcmp(match->name, regulator_comp))
+ continue;
+
+ match->init_data =
+ of_get_regulator_init_data(dev, child);
+ if (!match->init_data) {
+ dev_err(dev,
+ "failed to parse DT for regulator %s\n",
+ child->name);
+ return -EINVAL;
+ }
+ match->of_node = child;
+ count++;
+ break;
}
-
- match->of_node = child;
- count++;
}
return count;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c4435f6..17d19fb 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -257,8 +257,7 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
unsigned int reg;
palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
- reg &= ~PALMAS_SMPS12_CTRL_STATUS_MASK;
- reg >>= PALMAS_SMPS12_CTRL_STATUS_SHIFT;
+ reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -374,11 +373,22 @@ static int palmas_set_voltage_smps_sel(struct regulator_dev *dev,
static int palmas_map_voltage_smps(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
+ struct palmas_pmic *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
int ret, voltage;
- ret = ((min_uV - 500000) / 10000) + 1;
- if (ret < 0)
- return ret;
+ if (min_uV == 0)
+ return 0;
+
+ if (pmic->range[id]) { /* RANGE is x2 */
+ if (min_uV < 1000000)
+ min_uV = 1000000;
+ ret = DIV_ROUND_UP(min_uV - 1000000, 20000) + 1;
+ } else { /* RANGE is x1 */
+ if (min_uV < 500000)
+ min_uV = 500000;
+ ret = DIV_ROUND_UP(min_uV - 500000, 10000) + 1;
+ }
/* Map back into a voltage to verify we're still in bounds */
voltage = palmas_list_voltage_smps(rdev, ret);
@@ -400,19 +410,14 @@ static struct regulator_ops palmas_ops_smps = {
.map_voltage = palmas_map_voltage_smps,
};
-static int palmas_list_voltage_smps10(struct regulator_dev *dev,
- unsigned selector)
-{
- return 3750000 + (selector * 1250000);
-}
-
static struct regulator_ops palmas_ops_smps10 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = palmas_list_voltage_smps10,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
};
static int palmas_is_enabled_ldo(struct regulator_dev *dev)
@@ -522,7 +527,15 @@ static int palmas_smps_init(struct palmas *palmas, int id,
if (ret)
return ret;
- if (id != PALMAS_REG_SMPS10) {
+ switch (id) {
+ case PALMAS_REG_SMPS10:
+ if (reg_init->mode_sleep) {
+ reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
+ reg |= reg_init->mode_sleep <<
+ PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT;
+ }
+ break;
+ default:
if (reg_init->warm_reset)
reg |= PALMAS_SMPS12_CTRL_WR_S;
@@ -534,14 +547,8 @@ static int palmas_smps_init(struct palmas *palmas, int id,
reg |= reg_init->mode_sleep <<
PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT;
}
- } else {
- if (reg_init->mode_sleep) {
- reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
- reg |= reg_init->mode_sleep <<
- PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT;
- }
-
}
+
ret = palmas_smps_write(palmas, addr, reg);
if (ret)
return ret;
@@ -665,16 +672,22 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].name = palmas_regs_info[id].name;
pmic->desc[id].id = id;
- if (id != PALMAS_REG_SMPS10) {
- pmic->desc[id].ops = &palmas_ops_smps;
- pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
- } else {
+ switch (id) {
+ case PALMAS_REG_SMPS10:
pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;
pmic->desc[id].ops = &palmas_ops_smps10;
pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
pmic->desc[id].vsel_mask = SMPS10_VSEL;
- pmic->desc[id].enable_reg = PALMAS_SMPS10_STATUS;
+ pmic->desc[id].enable_reg =
+ PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+ PALMAS_SMPS10_STATUS);
pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
+ pmic->desc[id].min_uV = 3750000;
+ pmic->desc[id].uV_step = 1250000;
+ break;
+ default:
+ pmic->desc[id].ops = &palmas_ops_smps;
+ pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
}
pmic->desc[id].type = REGULATOR_VOLTAGE;
@@ -739,7 +752,8 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].type = REGULATOR_VOLTAGE;
pmic->desc[id].owner = THIS_MODULE;
- pmic->desc[id].enable_reg = palmas_regs_info[id].ctrl_addr;
+ pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE,
+ palmas_regs_info[id].ctrl_addr);
pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE;
if (pdata && pdata->reg_data)
@@ -775,9 +789,6 @@ static __devinit int palmas_probe(struct platform_device *pdev)
err_unregister_regulator:
while (--id >= 0)
regulator_unregister(pmic->rdev[id]);
- kfree(pmic->rdev);
- kfree(pmic->desc);
- kfree(pmic);
return ret;
}
@@ -788,10 +799,6 @@ static int __devexit palmas_remove(struct platform_device *pdev)
for (id = 0; id < PALMAS_NUM_REGS; id++)
regulator_unregister(pmic->rdev[id]);
-
- kfree(pmic->rdev);
- kfree(pmic->desc);
- kfree(pmic);
return 0;
}
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 8211101..68777ac 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -18,80 +18,80 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/ezx-pcap.h>
-static const u16 V1_table[] = {
- 2775, 1275, 1600, 1725, 1825, 1925, 2075, 2275,
+static const unsigned int V1_table[] = {
+ 2775000, 1275000, 1600000, 1725000, 1825000, 1925000, 2075000, 2275000,
};
-static const u16 V2_table[] = {
- 2500, 2775,
+static const unsigned int V2_table[] = {
+ 2500000, 2775000,
};
-static const u16 V3_table[] = {
- 1075, 1275, 1550, 1725, 1876, 1950, 2075, 2275,
+static const unsigned int V3_table[] = {
+ 1075000, 1275000, 1550000, 1725000, 1876000, 1950000, 2075000, 2275000,
};
-static const u16 V4_table[] = {
- 1275, 1550, 1725, 1875, 1950, 2075, 2275, 2775,
+static const unsigned int V4_table[] = {
+ 1275000, 1550000, 1725000, 1875000, 1950000, 2075000, 2275000, 2775000,
};
-static const u16 V5_table[] = {
- 1875, 2275, 2475, 2775,
+static const unsigned int V5_table[] = {
+ 1875000, 2275000, 2475000, 2775000,
};
-static const u16 V6_table[] = {
- 2475, 2775,
+static const unsigned int V6_table[] = {
+ 2475000, 2775000,
};
-static const u16 V7_table[] = {
- 1875, 2775,
+static const unsigned int V7_table[] = {
+ 1875000, 2775000,
};
#define V8_table V4_table
-static const u16 V9_table[] = {
- 1575, 1875, 2475, 2775,
+static const unsigned int V9_table[] = {
+ 1575000, 1875000, 2475000, 2775000,
};
-static const u16 V10_table[] = {
- 5000,
+static const unsigned int V10_table[] = {
+ 5000000,
};
-static const u16 VAUX1_table[] = {
- 1875, 2475, 2775, 3000,
+static const unsigned int VAUX1_table[] = {
+ 1875000, 2475000, 2775000, 3000000,
};
#define VAUX2_table VAUX1_table
-static const u16 VAUX3_table[] = {
- 1200, 1200, 1200, 1200, 1400, 1600, 1800, 2000,
- 2200, 2400, 2600, 2800, 3000, 3200, 3400, 3600,
+static const unsigned int VAUX3_table[] = {
+ 1200000, 1200000, 1200000, 1200000, 1400000, 1600000, 1800000, 2000000,
+ 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000,
};
-static const u16 VAUX4_table[] = {
- 1800, 1800, 3000, 5000,
+static const unsigned int VAUX4_table[] = {
+ 1800000, 1800000, 3000000, 5000000,
};
-static const u16 VSIM_table[] = {
- 1875, 3000,
+static const unsigned int VSIM_table[] = {
+ 1875000, 3000000,
};
-static const u16 VSIM2_table[] = {
- 1875,
+static const unsigned int VSIM2_table[] = {
+ 1875000,
};
-static const u16 VVIB_table[] = {
- 1300, 1800, 2000, 3000,
+static const unsigned int VVIB_table[] = {
+ 1300000, 1800000, 2000000, 3000000,
};
-static const u16 SW1_table[] = {
- 900, 950, 1000, 1050, 1100, 1150, 1200, 1250,
- 1300, 1350, 1400, 1450, 1500, 1600, 1875, 2250,
+static const unsigned int SW1_table[] = {
+ 900000, 950000, 1000000, 1050000, 1100000, 1150000, 1200000, 1250000,
+ 1300000, 1350000, 1400000, 1450000, 1500000, 1600000, 1875000, 2250000,
};
#define SW2_table SW1_table
-static const u16 SW3_table[] = {
- 4000, 4500, 5000, 5500,
+static const unsigned int SW3_table[] = {
+ 4000000, 4500000, 5000000, 5500000,
};
struct pcap_regulator {
@@ -100,8 +100,6 @@ struct pcap_regulator {
const u8 index;
const u8 stby;
const u8 lowpwr;
- const u8 n_voltages;
- const u16 *voltage_table;
};
#define NA 0xff
@@ -113,8 +111,6 @@ struct pcap_regulator {
.index = _index, \
.stby = _stby, \
.lowpwr = _lowpwr, \
- .n_voltages = ARRAY_SIZE(_vreg##_table), \
- .voltage_table = _vreg##_table, \
}
static struct pcap_regulator vreg_table[] = {
@@ -157,11 +153,11 @@ static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev,
void *pcap = rdev_get_drvdata(rdev);
/* the regulator doesn't support voltage switching */
- if (vreg->n_voltages == 1)
+ if (rdev->desc->n_voltages == 1)
return -EINVAL;
return ezx_pcap_set_bits(pcap, vreg->reg,
- (vreg->n_voltages - 1) << vreg->index,
+ (rdev->desc->n_voltages - 1) << vreg->index,
selector << vreg->index);
}
@@ -171,11 +167,11 @@ static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev)
void *pcap = rdev_get_drvdata(rdev);
u32 tmp;
- if (vreg->n_voltages == 1)
+ if (rdev->desc->n_voltages == 1)
return 0;
ezx_pcap_read(pcap, vreg->reg, &tmp);
- tmp = ((tmp >> vreg->index) & (vreg->n_voltages - 1));
+ tmp = ((tmp >> vreg->index) & (rdev->desc->n_voltages - 1));
return tmp;
}
@@ -214,16 +210,8 @@ static int pcap_regulator_is_enabled(struct regulator_dev *rdev)
return (tmp >> vreg->en) & 1;
}
-static int pcap_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned int index)
-{
- struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
-
- return vreg->voltage_table[index] * 1000;
-}
-
static struct regulator_ops pcap_regulator_ops = {
- .list_voltage = pcap_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage_sel = pcap_regulator_set_voltage_sel,
.get_voltage_sel = pcap_regulator_get_voltage_sel,
.enable = pcap_regulator_enable,
@@ -236,6 +224,7 @@ static struct regulator_ops pcap_regulator_ops = {
.name = #_vreg, \
.id = _vreg, \
.n_voltages = ARRAY_SIZE(_vreg##_table), \
+ .volt_table = _vreg##_table, \
.ops = &pcap_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 3c9d14c..092e5cb 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -100,13 +100,12 @@ static unsigned int ldo_voltage_value(u8 bits)
return 900 + (bits * 100);
}
-static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
+static int pcf50633_regulator_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct pcf50633 *pcf;
int regulator_id, millivolts;
- u8 volt_bits, regnr;
+ u8 volt_bits;
pcf = rdev_get_drvdata(rdev);
@@ -116,15 +115,11 @@ static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
millivolts = min_uV / 1000;
- regnr = rdev->desc->vsel_reg;
-
switch (regulator_id) {
case PCF50633_REGULATOR_AUTO:
volt_bits = auto_voltage_bits(millivolts);
break;
case PCF50633_REGULATOR_DOWN1:
- volt_bits = down_voltage_bits(millivolts);
- break;
case PCF50633_REGULATOR_DOWN2:
volt_bits = down_voltage_bits(millivolts);
break;
@@ -142,9 +137,7 @@ static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
- *selector = volt_bits;
-
- return pcf50633_reg_write(pcf, regnr, volt_bits);
+ return volt_bits;
}
static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
@@ -159,8 +152,6 @@ static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
millivolts = auto_voltage_value(index);
break;
case PCF50633_REGULATOR_DOWN1:
- millivolts = down_voltage_value(index);
- break;
case PCF50633_REGULATOR_DOWN2:
millivolts = down_voltage_value(index);
break;
@@ -182,9 +173,10 @@ static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
}
static struct regulator_ops pcf50633_regulator_ops = {
- .set_voltage = pcf50633_regulator_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = pcf50633_regulator_list_voltage,
+ .map_voltage = pcf50633_regulator_map_voltage,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 1d34e64..8bf4e8c9 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -42,7 +42,6 @@ struct rc5t583_regulator_info {
/* Regulator specific turn-on delay and voltage settling time*/
int enable_uv_per_us;
- int change_uv_per_us;
/* Used by regulator core */
struct regulator_desc desc;
@@ -66,25 +65,6 @@ static int rc5t583_regulator_enable_time(struct regulator_dev *rdev)
return DIV_ROUND_UP(curr_uV, reg->reg_info->enable_uv_per_us);
}
-static int rc5t583_set_voltage_time_sel(struct regulator_dev *rdev,
- unsigned int old_selector, unsigned int new_selector)
-{
- struct rc5t583_regulator *reg = rdev_get_drvdata(rdev);
- int old_uV, new_uV;
- old_uV = regulator_list_voltage_linear(rdev, old_selector);
-
- if (old_uV < 0)
- return old_uV;
-
- new_uV = regulator_list_voltage_linear(rdev, new_selector);
- if (new_uV < 0)
- return new_uV;
-
- return DIV_ROUND_UP(abs(old_uV - new_uV),
- reg->reg_info->change_uv_per_us);
-}
-
-
static struct regulator_ops rc5t583_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -94,7 +74,7 @@ static struct regulator_ops rc5t583_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
- .set_voltage_time_sel = rc5t583_set_voltage_time_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
};
#define RC5T583_REG(_id, _en_reg, _en_bit, _disc_reg, _disc_bit, \
@@ -104,7 +84,6 @@ static struct regulator_ops rc5t583_ops = {
.disc_bit = _disc_bit, \
.deepsleep_reg = RC5T583_REG_##_id##DAC_DS, \
.enable_uv_per_us = _enable_mv * 1000, \
- .change_uv_per_us = 40 * 1000, \
.deepsleep_id = RC5T583_DS_##_id, \
.desc = { \
.name = "rc5t583-regulator-"#_id, \
@@ -119,6 +98,7 @@ static struct regulator_ops rc5t583_ops = {
.enable_mask = BIT(_en_bit), \
.min_uV = _min_mv * 1000, \
.uV_step = _step_uV, \
+ .ramp_delay = 40 * 1000, \
}, \
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
new file mode 100644
index 0000000..4669dc9
--- /dev/null
+++ b/drivers/regulator/s2mps11.c
@@ -0,0 +1,363 @@
+/*
+ * s2mps11.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s2mps11.h>
+
+struct s2mps11_info {
+ struct regulator_dev **rdev;
+
+ int ramp_delay2;
+ int ramp_delay34;
+ int ramp_delay5;
+ int ramp_delay16;
+ int ramp_delay7810;
+ int ramp_delay9;
+
+ bool buck6_ramp;
+ bool buck2_ramp;
+ bool buck3_ramp;
+ bool buck4_ramp;
+};
+
+static int get_ramp_delay(int ramp_delay)
+{
+ unsigned char cnt = 0;
+
+ ramp_delay /= 6;
+
+ while (true) {
+ ramp_delay = ramp_delay >> 1;
+ if (ramp_delay == 0)
+ break;
+ cnt++;
+ }
+ return cnt;
+}
+
+static struct regulator_ops s2mps11_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops s2mps11_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+#define regulator_desc_ldo1(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS11_LDO##num, \
+ .ops = &s2mps11_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_LDO_MIN, \
+ .uV_step = S2MPS11_LDO_STEP1, \
+ .n_voltages = S2MPS11_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS11_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+#define regulator_desc_ldo2(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS11_LDO##num, \
+ .ops = &s2mps11_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_LDO_MIN, \
+ .uV_step = S2MPS11_LDO_STEP2, \
+ .n_voltages = S2MPS11_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS11_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck1_4(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS11_BUCK##num, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN1, \
+ .uV_step = S2MPS11_BUCK_STEP1, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck5 { \
+ .name = "BUCK5", \
+ .id = S2MPS11_BUCK5, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN1, \
+ .uV_step = S2MPS11_BUCK_STEP1, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B5CTRL2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B5CTRL1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck6_8(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS11_BUCK##num, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN1, \
+ .uV_step = S2MPS11_BUCK_STEP1, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B6CTRL2 + (num - 6) * 2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B6CTRL1 + (num - 6) * 2, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck9 { \
+ .name = "BUCK9", \
+ .id = S2MPS11_BUCK9, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN3, \
+ .uV_step = S2MPS11_BUCK_STEP3, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B9CTRL2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B9CTRL1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck10 { \
+ .name = "BUCK10", \
+ .id = S2MPS11_BUCK10, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN2, \
+ .uV_step = S2MPS11_BUCK_STEP2, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B9CTRL2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B9CTRL1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_desc_ldo2(1),
+ regulator_desc_ldo1(2),
+ regulator_desc_ldo1(3),
+ regulator_desc_ldo1(4),
+ regulator_desc_ldo1(5),
+ regulator_desc_ldo2(6),
+ regulator_desc_ldo1(7),
+ regulator_desc_ldo1(8),
+ regulator_desc_ldo1(9),
+ regulator_desc_ldo1(10),
+ regulator_desc_ldo2(11),
+ regulator_desc_ldo1(12),
+ regulator_desc_ldo1(13),
+ regulator_desc_ldo1(14),
+ regulator_desc_ldo1(15),
+ regulator_desc_ldo1(16),
+ regulator_desc_ldo1(17),
+ regulator_desc_ldo1(18),
+ regulator_desc_ldo1(19),
+ regulator_desc_ldo1(20),
+ regulator_desc_ldo1(21),
+ regulator_desc_ldo2(22),
+ regulator_desc_ldo2(23),
+ regulator_desc_ldo1(24),
+ regulator_desc_ldo1(25),
+ regulator_desc_ldo1(26),
+ regulator_desc_ldo2(27),
+ regulator_desc_ldo1(28),
+ regulator_desc_ldo1(29),
+ regulator_desc_ldo1(30),
+ regulator_desc_ldo1(31),
+ regulator_desc_ldo1(32),
+ regulator_desc_ldo1(33),
+ regulator_desc_ldo1(34),
+ regulator_desc_ldo1(35),
+ regulator_desc_ldo1(36),
+ regulator_desc_ldo1(37),
+ regulator_desc_ldo1(38),
+ regulator_desc_buck1_4(1),
+ regulator_desc_buck1_4(2),
+ regulator_desc_buck1_4(3),
+ regulator_desc_buck1_4(4),
+ regulator_desc_buck5,
+ regulator_desc_buck6_8(6),
+ regulator_desc_buck6_8(7),
+ regulator_desc_buck6_8(8),
+ regulator_desc_buck9,
+ regulator_desc_buck10,
+};
+
+static __devinit int s2mps11_pmic_probe(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct regulator_config config = { };
+ struct regulator_dev **rdev;
+ struct s2mps11_info *s2mps11;
+ int i, ret, size;
+ unsigned char ramp_enable, ramp_reg = 0;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "Platform data not supplied\n");
+ return -ENODEV;
+ }
+
+ s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
+ GFP_KERNEL);
+ if (!s2mps11)
+ return -ENOMEM;
+
+ size = sizeof(struct regulator_dev *) * S2MPS11_REGULATOR_MAX;
+ s2mps11->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!s2mps11->rdev) {
+ return -ENOMEM;
+ }
+
+ rdev = s2mps11->rdev;
+ platform_set_drvdata(pdev, s2mps11);
+
+ s2mps11->ramp_delay2 = pdata->buck2_ramp_delay;
+ s2mps11->ramp_delay34 = pdata->buck34_ramp_delay;
+ s2mps11->ramp_delay5 = pdata->buck5_ramp_delay;
+ s2mps11->ramp_delay16 = pdata->buck16_ramp_delay;
+ s2mps11->ramp_delay7810 = pdata->buck7810_ramp_delay;
+ s2mps11->ramp_delay9 = pdata->buck9_ramp_delay;
+
+ s2mps11->buck6_ramp = pdata->buck6_ramp_enable;
+ s2mps11->buck2_ramp = pdata->buck2_ramp_enable;
+ s2mps11->buck3_ramp = pdata->buck3_ramp_enable;
+ s2mps11->buck4_ramp = pdata->buck4_ramp_enable;
+
+ ramp_enable = (s2mps11->buck2_ramp << 3) | (s2mps11->buck3_ramp << 2) |
+ (s2mps11->buck4_ramp << 1) | s2mps11->buck6_ramp ;
+
+ if (ramp_enable) {
+ if (s2mps11->buck2_ramp)
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) >> 6;
+ if (s2mps11->buck3_ramp || s2mps11->buck4_ramp)
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) >> 4;
+ sec_reg_write(iodev, S2MPS11_REG_RAMP, ramp_reg | ramp_enable);
+ }
+
+ ramp_reg &= 0x00;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) >> 6;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) >> 4;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) >> 2;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay9);
+ sec_reg_write(iodev, S2MPS11_REG_RAMP_BUCK, ramp_reg);
+
+ for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+
+ config.dev = &pdev->dev;
+ config.regmap = iodev->regmap;
+ config.init_data = pdata->regulators[i].initdata;
+ config.driver_data = s2mps11;
+
+ rdev[i] = regulator_register(&regulators[i], &config);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(&pdev->dev, "regulator init failed for %d\n",
+ i);
+ rdev[i] = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ for (i = 0; i < S2MPS11_REGULATOR_MAX; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ return ret;
+}
+
+static int __devexit s2mps11_pmic_remove(struct platform_device *pdev)
+{
+ struct s2mps11_info *s2mps11 = platform_get_drvdata(pdev);
+ struct regulator_dev **rdev = s2mps11->rdev;
+ int i;
+
+ for (i = 0; i < S2MPS11_REGULATOR_MAX; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ return 0;
+}
+
+static const struct platform_device_id s2mps11_pmic_id[] = {
+ { "s2mps11-pmic", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
+
+static struct platform_driver s2mps11_pmic_driver = {
+ .driver = {
+ .name = "s2mps11-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = s2mps11_pmic_probe,
+ .remove = __devexit_p(s2mps11_pmic_remove),
+ .id_table = s2mps11_pmic_id,
+};
+
+static int __init s2mps11_pmic_init(void)
+{
+ return platform_driver_register(&s2mps11_pmic_driver);
+}
+subsys_initcall(s2mps11_pmic_init);
+
+static void __exit s2mps11_pmic_exit(void)
+{
+ platform_driver_unregister(&s2mps11_pmic_driver);
+}
+module_exit(s2mps11_pmic_exit);
+
+/* Module information */
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_DESCRIPTION("SAMSUNG S2MPS11 Regulator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 290d6fc..102287f 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -41,6 +41,7 @@ struct s5m8767_info {
u8 buck3_vol[8];
u8 buck4_vol[8];
int buck_gpios[3];
+ int buck_ds[3];
int buck_gpioindex;
};
@@ -120,27 +121,6 @@ static const struct s5m_voltage_desc *reg_voltage_map[] = {
[S5M8767_BUCK9] = &buck_voltage_val3,
};
-static int s5m8767_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- const struct s5m_voltage_desc *desc;
- int reg_id = rdev_get_id(rdev);
- int val;
-
- if (reg_id >= ARRAY_SIZE(reg_voltage_map) || reg_id < 0)
- return -EINVAL;
-
- desc = reg_voltage_map[reg_id];
- if (desc == NULL)
- return -EINVAL;
-
- val = desc->min + desc->step * selector;
- if (val > desc->max)
- return -EINVAL;
-
- return val;
-}
-
static unsigned int s5m8767_opmode_reg[][4] = {
/* {OFF, ON, LOWPOWER, SUSPEND} */
/* LDO1 ... LDO28 */
@@ -283,17 +263,17 @@ static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
reg = S5M8767_REG_BUCK1CTRL2;
break;
case S5M8767_BUCK2:
- reg = S5M8767_REG_BUCK2DVS1;
+ reg = S5M8767_REG_BUCK2DVS2;
if (s5m8767->buck2_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK3:
- reg = S5M8767_REG_BUCK3DVS1;
+ reg = S5M8767_REG_BUCK3DVS2;
if (s5m8767->buck3_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK4:
- reg = S5M8767_REG_BUCK4DVS1;
+ reg = S5M8767_REG_BUCK4DVS2;
if (s5m8767->buck4_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
@@ -357,32 +337,34 @@ static int s5m8767_convert_voltage_to_sel(
return selector;
}
-static inline void s5m8767_set_high(struct s5m8767_info *s5m8767)
+static inline int s5m8767_set_high(struct s5m8767_info *s5m8767)
{
int temp_index = s5m8767->buck_gpioindex;
gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1);
gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1);
+
+ return 0;
}
-static inline void s5m8767_set_low(struct s5m8767_info *s5m8767)
+static inline int s5m8767_set_low(struct s5m8767_info *s5m8767)
{
int temp_index = s5m8767->buck_gpioindex;
gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1);
gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1);
gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
+
+ return 0;
}
-static int s5m8767_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- const struct s5m_voltage_desc *desc;
int reg_id = rdev_get_id(rdev);
- int sel, reg, mask, ret = 0, old_index, index = 0;
- u8 val;
+ int reg, mask, ret = 0, old_index, index = 0;
u8 *buck234_vol = NULL;
switch (reg_id) {
@@ -407,15 +389,9 @@ static int s5m8767_set_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
- desc = reg_voltage_map[reg_id];
-
- sel = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
- if (sel < 0)
- return sel;
-
/* buck234_vol != NULL means to control buck234 voltage via DVS GPIO */
if (buck234_vol) {
- while (*buck234_vol != sel) {
+ while (*buck234_vol != selector) {
buck234_vol++;
index++;
}
@@ -423,22 +399,16 @@ static int s5m8767_set_voltage(struct regulator_dev *rdev,
s5m8767->buck_gpioindex = index;
if (index > old_index)
- s5m8767_set_high(s5m8767);
+ return s5m8767_set_high(s5m8767);
else
- s5m8767_set_low(s5m8767);
+ return s5m8767_set_low(s5m8767);
} else {
ret = s5m8767_get_voltage_register(rdev, &reg);
if (ret)
return ret;
- s5m_reg_read(s5m8767->iodev, reg, &val);
- val = (val & ~mask) | sel;
-
- ret = s5m_reg_write(s5m8767->iodev, reg, val);
+ return s5m_reg_update(s5m8767->iodev, reg, selector, mask);
}
-
- *selector = sel;
- return ret;
}
static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -451,22 +421,28 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
desc = reg_voltage_map[reg_id];
- if (old_sel < new_sel)
+ if ((old_sel < new_sel) && s5m8767->ramp_delay)
return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
s5m8767->ramp_delay * 1000);
return 0;
}
static struct regulator_ops s5m8767_ops = {
- .list_voltage = s5m8767_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
.is_enabled = s5m8767_reg_is_enabled,
.enable = s5m8767_reg_enable,
.disable = s5m8767_reg_disable,
.get_voltage_sel = s5m8767_get_voltage_sel,
- .set_voltage = s5m8767_set_voltage,
+ .set_voltage_sel = s5m8767_set_voltage_sel,
.set_voltage_time_sel = s5m8767_set_voltage_time_sel,
};
+static struct regulator_ops s5m8767_buck78_ops = {
+ .is_enabled = s5m8767_reg_is_enabled,
+ .enable = s5m8767_reg_enable,
+ .disable = s5m8767_reg_disable,
+};
+
#define s5m8767_regulator_desc(_name) { \
.name = #_name, \
.id = S5M8767_##_name, \
@@ -475,6 +451,14 @@ static struct regulator_ops s5m8767_ops = {
.owner = THIS_MODULE, \
}
+#define s5m8767_regulator_buck78_desc(_name) { \
+ .name = #_name, \
+ .id = S5M8767_##_name, \
+ .ops = &s5m8767_buck78_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+}
+
static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(LDO1),
s5m8767_regulator_desc(LDO2),
@@ -510,8 +494,8 @@ static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(BUCK4),
s5m8767_regulator_desc(BUCK5),
s5m8767_regulator_desc(BUCK6),
- s5m8767_regulator_desc(BUCK7),
- s5m8767_regulator_desc(BUCK8),
+ s5m8767_regulator_buck78_desc(BUCK7),
+ s5m8767_regulator_buck78_desc(BUCK8),
s5m8767_regulator_desc(BUCK9),
};
@@ -522,7 +506,7 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct regulator_dev **rdev;
struct s5m8767_info *s5m8767;
- int i, ret, size;
+ int i, ret, size, buck_init;
if (!pdata) {
dev_err(pdev->dev.parent, "Platform data not supplied\n");
@@ -573,12 +557,37 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
s5m8767->buck_gpios[0] = pdata->buck_gpios[0];
s5m8767->buck_gpios[1] = pdata->buck_gpios[1];
s5m8767->buck_gpios[2] = pdata->buck_gpios[2];
+ s5m8767->buck_ds[0] = pdata->buck_ds[0];
+ s5m8767->buck_ds[1] = pdata->buck_ds[1];
+ s5m8767->buck_ds[2] = pdata->buck_ds[2];
+
s5m8767->ramp_delay = pdata->buck_ramp_delay;
s5m8767->buck2_ramp = pdata->buck2_ramp_enable;
s5m8767->buck3_ramp = pdata->buck3_ramp_enable;
s5m8767->buck4_ramp = pdata->buck4_ramp_enable;
s5m8767->opmode = pdata->opmode;
+ buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
+ pdata->buck2_init,
+ pdata->buck2_init +
+ buck_voltage_val2.step);
+
+ s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
+
+ buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
+ pdata->buck3_init,
+ pdata->buck3_init +
+ buck_voltage_val2.step);
+
+ s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
+
+ buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
+ pdata->buck4_init,
+ pdata->buck4_init +
+ buck_voltage_val2.step);
+
+ s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
+
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
s5m8767->buck2_vol[i] =
@@ -608,48 +617,70 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
}
}
- if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
- pdata->buck4_gpiodvs) {
- if (gpio_is_valid(pdata->buck_gpios[0]) &&
- gpio_is_valid(pdata->buck_gpios[1]) &&
- gpio_is_valid(pdata->buck_gpios[2])) {
- ret = gpio_request(pdata->buck_gpios[0],
- "S5M8767 SET1");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request for SET1\n");
-
- ret = gpio_request(pdata->buck_gpios[1],
- "S5M8767 SET2");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request for SET2\n");
-
- ret = gpio_request(pdata->buck_gpios[2],
- "S5M8767 SET3");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request for SET3\n");
- /* SET1 GPIO */
- gpio_direction_output(pdata->buck_gpios[0],
- (s5m8767->buck_gpioindex >> 2) & 0x1);
- /* SET2 GPIO */
- gpio_direction_output(pdata->buck_gpios[1],
- (s5m8767->buck_gpioindex >> 1) & 0x1);
- /* SET3 GPIO */
- gpio_direction_output(pdata->buck_gpios[2],
- (s5m8767->buck_gpioindex >> 0) & 0x1);
- ret = 0;
- } else {
- dev_err(&pdev->dev, "GPIO NOT VALID\n");
- ret = -EINVAL;
+ if (gpio_is_valid(pdata->buck_gpios[0]) &&
+ gpio_is_valid(pdata->buck_gpios[1]) &&
+ gpio_is_valid(pdata->buck_gpios[2])) {
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[0],
+ "S5M8767 SET1");
+ if (ret)
return ret;
- }
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[1],
+ "S5M8767 SET2");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[2],
+ "S5M8767 SET3");
+ if (ret)
+ return ret;
+
+ /* SET1 GPIO */
+ gpio_direction_output(pdata->buck_gpios[0],
+ (s5m8767->buck_gpioindex >> 2) & 0x1);
+ /* SET2 GPIO */
+ gpio_direction_output(pdata->buck_gpios[1],
+ (s5m8767->buck_gpioindex >> 1) & 0x1);
+ /* SET3 GPIO */
+ gpio_direction_output(pdata->buck_gpios[2],
+ (s5m8767->buck_gpioindex >> 0) & 0x1);
+ } else {
+ dev_err(&pdev->dev, "GPIO NOT VALID\n");
+ ret = -EINVAL;
+ return ret;
}
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL,
- (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL,
- (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL,
- (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1);
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[0], "S5M8767 DS2");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[1], "S5M8767 DS3");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[2], "S5M8767 DS4");
+ if (ret)
+ return ret;
+
+ /* DS2 GPIO */
+ gpio_direction_output(pdata->buck_ds[0], 0x0);
+ /* DS3 GPIO */
+ gpio_direction_output(pdata->buck_ds[1], 0x0);
+ /* DS4 GPIO */
+ gpio_direction_output(pdata->buck_ds[2], 0x0);
+
+ if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
+ pdata->buck4_gpiodvs) {
+ s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL,
+ (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1),
+ 1 << 1);
+ s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL,
+ (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1),
+ 1 << 1);
+ s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL,
+ (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1),
+ 1 << 1);
+ }
/* Initialize GPIO DVS registers */
for (i = 0; i < 8; i++) {
@@ -668,9 +699,6 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
s5m8767->buck4_vol[i]);
}
}
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL, 0x78, 0xff);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL, 0x58, 0xff);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL, 0x78, 0xff);
if (s5m8767->buck2_ramp)
s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x08, 0x08);
@@ -684,9 +712,13 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
if (s5m8767->buck2_ramp || s5m8767->buck3_ramp
|| s5m8767->buck4_ramp) {
switch (s5m8767->ramp_delay) {
- case 15:
+ case 5:
s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0xc0, 0xf0);
+ 0x40, 0xf0);
+ break;
+ case 10:
+ s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
+ 0x90, 0xf0);
break;
case 25:
s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
@@ -711,9 +743,12 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
int id = pdata->regulators[i].id;
desc = reg_voltage_map[id];
- if (desc)
+ if (desc) {
regulators[id].n_voltages =
(desc->max - desc->min) / desc->step + 1;
+ regulators[id].min_uV = desc->min;
+ regulators[id].uV_step = desc->step;
+ }
config.dev = s5m8767->dev;
config.init_data = pdata->regulators[i].initdata;
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index d840d84..1378409 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -20,7 +20,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps6105x.h>
-static const int tps6105x_voltages[] = {
+static const unsigned int tps6105x_voltages[] = {
4500000,
5000000,
5250000,
@@ -105,22 +105,13 @@ static int tps6105x_regulator_set_voltage_sel(struct regulator_dev *rdev,
return 0;
}
-static int tps6105x_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector >= ARRAY_SIZE(tps6105x_voltages))
- return -EINVAL;
-
- return tps6105x_voltages[selector];
-}
-
static struct regulator_ops tps6105x_regulator_ops = {
.enable = tps6105x_regulator_enable,
.disable = tps6105x_regulator_disable,
.is_enabled = tps6105x_regulator_is_enabled,
.get_voltage_sel = tps6105x_regulator_get_voltage_sel,
.set_voltage_sel = tps6105x_regulator_set_voltage_sel,
- .list_voltage = tps6105x_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static const struct regulator_desc tps6105x_regulator_desc = {
@@ -130,6 +121,7 @@ static const struct regulator_desc tps6105x_regulator_desc = {
.id = 0,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(tps6105x_voltages),
+ .volt_table = tps6105x_voltages,
};
/*
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index e534269..68729a7 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -65,10 +65,8 @@ struct tps62360_chip {
struct regulator_desc desc;
struct regulator_dev *rdev;
struct regmap *regmap;
- int chip_id;
int vsel0_gpio;
int vsel1_gpio;
- int voltage_base;
u8 voltage_reg_mask;
bool en_internal_pulldn;
bool en_discharge;
@@ -76,7 +74,6 @@ struct tps62360_chip {
int lru_index[4];
int curr_vset_vsel[4];
int curr_vset_id;
- int change_uv_per_us;
};
/*
@@ -175,23 +172,6 @@ static int tps62360_dcdc_set_voltage_sel(struct regulator_dev *dev,
return 0;
}
-static int tps62360_set_voltage_time_sel(struct regulator_dev *rdev,
- unsigned int old_selector, unsigned int new_selector)
-{
- struct tps62360_chip *tps = rdev_get_drvdata(rdev);
- int old_uV, new_uV;
-
- old_uV = regulator_list_voltage_linear(rdev, old_selector);
- if (old_uV < 0)
- return old_uV;
-
- new_uV = regulator_list_voltage_linear(rdev, new_selector);
- if (new_uV < 0)
- return new_uV;
-
- return DIV_ROUND_UP(abs(old_uV - new_uV), tps->change_uv_per_us);
-}
-
static int tps62360_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct tps62360_chip *tps = rdev_get_drvdata(rdev);
@@ -258,7 +238,7 @@ static struct regulator_ops tps62360_dcdc_ops = {
.set_voltage_sel = tps62360_dcdc_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
- .set_voltage_time_sel = tps62360_set_voltage_time_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_mode = tps62360_set_mode,
.get_mode = tps62360_get_mode,
};
@@ -301,7 +281,7 @@ static int __devinit tps62360_init_dcdc(struct tps62360_chip *tps,
ramp_ctrl = (ramp_ctrl >> 4) & 0x7;
/* ramp mV/us = 32/(2^ramp_ctrl) */
- tps->change_uv_per_us = DIV_ROUND_UP(32000, BIT(ramp_ctrl));
+ tps->desc.ramp_delay = DIV_ROUND_UP(32000, BIT(ramp_ctrl));
return ret;
}
@@ -408,13 +388,13 @@ static int __devinit tps62360_probe(struct i2c_client *client,
switch (chip_id) {
case TPS62360:
case TPS62362:
- tps->voltage_base = TPS62360_BASE_VOLTAGE;
+ tps->desc.min_uV = TPS62360_BASE_VOLTAGE;
tps->voltage_reg_mask = 0x3F;
tps->desc.n_voltages = TPS62360_N_VOLTAGES;
break;
case TPS62361:
case TPS62363:
- tps->voltage_base = TPS62361_BASE_VOLTAGE;
+ tps->desc.min_uV = TPS62361_BASE_VOLTAGE;
tps->voltage_reg_mask = 0x7F;
tps->desc.n_voltages = TPS62361_N_VOLTAGES;
break;
@@ -427,7 +407,6 @@ static int __devinit tps62360_probe(struct i2c_client *client,
tps->desc.ops = &tps62360_dcdc_ops;
tps->desc.type = REGULATOR_VOLTAGE;
tps->desc.owner = THIS_MODULE;
- tps->desc.min_uV = tps->voltage_base;
tps->desc.uV_step = 10000;
tps->regmap = devm_regmap_init_i2c(client, &tps62360_regmap_config);
@@ -449,24 +428,24 @@ static int __devinit tps62360_probe(struct i2c_client *client,
int gpio_flags;
gpio_flags = (pdata->vsel0_def_state) ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = gpio_request_one(tps->vsel0_gpio,
+ ret = devm_gpio_request_one(&client->dev, tps->vsel0_gpio,
gpio_flags, "tps62360-vsel0");
if (ret) {
dev_err(&client->dev,
"%s(): Could not obtain vsel0 GPIO %d: %d\n",
__func__, tps->vsel0_gpio, ret);
- goto err_gpio0;
+ return ret;
}
gpio_flags = (pdata->vsel1_def_state) ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = gpio_request_one(tps->vsel1_gpio,
+ ret = devm_gpio_request_one(&client->dev, tps->vsel1_gpio,
gpio_flags, "tps62360-vsel1");
if (ret) {
dev_err(&client->dev,
"%s(): Could not obtain vsel1 GPIO %d: %d\n",
__func__, tps->vsel1_gpio, ret);
- goto err_gpio1;
+ return ret;
}
tps->valid_gpios = true;
@@ -484,7 +463,7 @@ static int __devinit tps62360_probe(struct i2c_client *client,
if (ret < 0) {
dev_err(tps->dev, "%s(): Init failed with err = %d\n",
__func__, ret);
- goto err_init;
+ return ret;
}
config.dev = &client->dev;
@@ -498,21 +477,11 @@ static int __devinit tps62360_probe(struct i2c_client *client,
dev_err(tps->dev,
"%s(): regulator register failed with err %s\n",
__func__, id->name);
- ret = PTR_ERR(rdev);
- goto err_init;
+ return PTR_ERR(rdev);
}
tps->rdev = rdev;
return 0;
-
-err_init:
- if (gpio_is_valid(tps->vsel1_gpio))
- gpio_free(tps->vsel1_gpio);
-err_gpio1:
- if (gpio_is_valid(tps->vsel0_gpio))
- gpio_free(tps->vsel0_gpio);
-err_gpio0:
- return ret;
}
/**
@@ -525,12 +494,6 @@ static int __devexit tps62360_remove(struct i2c_client *client)
{
struct tps62360_chip *tps = i2c_get_clientdata(client);
- if (gpio_is_valid(tps->vsel1_gpio))
- gpio_free(tps->vsel1_gpio);
-
- if (gpio_is_valid(tps->vsel0_gpio))
- gpio_free(tps->vsel0_gpio);
-
regulator_unregister(tps->rdev);
return 0;
}
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index f841bd0..6998d57 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -69,10 +69,6 @@
#define TPS65023_REG_CTRL2_DCDC1 BIT(1)
#define TPS65023_REG_CTRL2_DCDC3 BIT(0)
-/* LDO_CTRL bitfields */
-#define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id) ((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0x0F << ((ldo_id)*4))
-
/* Number of step-down converters available */
#define TPS65023_NUM_DCDC 3
/* Number of LDO voltage regulators available */
@@ -91,48 +87,53 @@
#define TPS65023_MAX_REG_ID TPS65023_LDO_2
/* Supported voltage values for regulators */
-static const u16 VCORE_VSEL_table[] = {
- 800, 825, 850, 875,
- 900, 925, 950, 975,
- 1000, 1025, 1050, 1075,
- 1100, 1125, 1150, 1175,
- 1200, 1225, 1250, 1275,
- 1300, 1325, 1350, 1375,
- 1400, 1425, 1450, 1475,
- 1500, 1525, 1550, 1600,
+static const unsigned int VCORE_VSEL_table[] = {
+ 800000, 825000, 850000, 875000,
+ 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000,
+ 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000,
+ 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000,
+ 1500000, 1525000, 1550000, 1600000,
+};
+
+static const unsigned int DCDC_FIXED_3300000_VSEL_table[] = {
+ 3300000,
+};
+
+static const unsigned int DCDC_FIXED_1800000_VSEL_table[] = {
+ 1800000,
};
/* Supported voltage values for LDO regulators for tps65020 */
-static const u16 TPS65020_LDO1_VSEL_table[] = {
- 1000, 1050, 1100, 1300,
- 1800, 2500, 3000, 3300,
+static const unsigned int TPS65020_LDO1_VSEL_table[] = {
+ 1000000, 1050000, 1100000, 1300000,
+ 1800000, 2500000, 3000000, 3300000,
};
-static const u16 TPS65020_LDO2_VSEL_table[] = {
- 1000, 1050, 1100, 1300,
- 1800, 2500, 3000, 3300,
+static const unsigned int TPS65020_LDO2_VSEL_table[] = {
+ 1000000, 1050000, 1100000, 1300000,
+ 1800000, 2500000, 3000000, 3300000,
};
/* Supported voltage values for LDO regulators
* for tps65021 and tps65023 */
-static const u16 TPS65023_LDO1_VSEL_table[] = {
- 1000, 1100, 1300, 1800,
- 2200, 2600, 2800, 3150,
+static const unsigned int TPS65023_LDO1_VSEL_table[] = {
+ 1000000, 1100000, 1300000, 1800000,
+ 2200000, 2600000, 2800000, 3150000,
};
-static const u16 TPS65023_LDO2_VSEL_table[] = {
- 1050, 1200, 1300, 1800,
- 2500, 2800, 3000, 3300,
+static const unsigned int TPS65023_LDO2_VSEL_table[] = {
+ 1050000, 1200000, 1300000, 1800000,
+ 2500000, 2800000, 3000000, 3300000,
};
/* Regulator specific details */
struct tps_info {
const char *name;
- unsigned min_uV;
- unsigned max_uV;
- bool fixed;
u8 table_len;
- const u16 *table;
+ const unsigned int *table;
};
/* PMIC details */
@@ -150,7 +151,7 @@ struct tps_driver_data {
u8 core_regulator;
};
-static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
+static int tps65023_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int ret;
@@ -164,9 +165,9 @@ static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
if (ret != 0)
return ret;
data &= (tps->info[dcdc]->table_len - 1);
- return tps->info[dcdc]->table[data] * 1000;
+ return data;
} else
- return tps->info[dcdc]->min_uV;
+ return 0;
}
static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -193,76 +194,14 @@ out:
return ret;
}
-static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
-{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
- int data, ldo = rdev_get_id(dev);
- int ret;
-
- if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
- return -EINVAL;
-
- ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
- if (ret != 0)
- return ret;
-
- data >>= (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1));
- data &= (tps->info[ldo]->table_len - 1);
- return tps->info[ldo]->table[data] * 1000;
-}
-
-static int tps65023_ldo_set_voltage_sel(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
- int ldo_index = rdev_get_id(dev) - TPS65023_LDO_1;
-
- return regmap_update_bits(tps->regmap, TPS65023_REG_LDO_CTRL,
- TPS65023_LDO_CTRL_LDOx_MASK(ldo_index),
- selector << TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_index));
-}
-
-static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
- int dcdc = rdev_get_id(dev);
-
- if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
- return -EINVAL;
-
- if (dcdc == tps->core_regulator) {
- if (selector >= tps->info[dcdc]->table_len)
- return -EINVAL;
- else
- return tps->info[dcdc]->table[selector] * 1000;
- } else
- return tps->info[dcdc]->min_uV;
-}
-
-static int tps65023_ldo_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
- int ldo = rdev_get_id(dev);
-
- if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
- return -EINVAL;
-
- if (selector >= tps->info[ldo]->table_len)
- return -EINVAL;
- else
- return tps->info[ldo]->table[selector] * 1000;
-}
-
/* Operations permitted on VDCDCx */
static struct regulator_ops tps65023_dcdc_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .get_voltage = tps65023_dcdc_get_voltage,
+ .get_voltage_sel = tps65023_dcdc_get_voltage_sel,
.set_voltage_sel = tps65023_dcdc_set_voltage_sel,
- .list_voltage = tps65023_dcdc_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
/* Operations permitted on LDOx */
@@ -270,9 +209,9 @@ static struct regulator_ops tps65023_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .get_voltage = tps65023_ldo_get_voltage,
- .set_voltage_sel = tps65023_ldo_set_voltage_sel,
- .list_voltage = tps65023_ldo_list_voltage,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_table,
};
static struct regmap_config tps65023_regmap_config = {
@@ -325,19 +264,28 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
tps->desc[i].name = info->name;
tps->desc[i].id = i;
tps->desc[i].n_voltages = info->table_len;
+ tps->desc[i].volt_table = info->table;
tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
&tps65023_ldo_ops : &tps65023_dcdc_ops);
tps->desc[i].type = REGULATOR_VOLTAGE;
tps->desc[i].owner = THIS_MODULE;
tps->desc[i].enable_reg = TPS65023_REG_REG_CTRL;
- if (i == TPS65023_LDO_1)
+ switch (i) {
+ case TPS65023_LDO_1:
+ tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL;
+ tps->desc[i].vsel_mask = 0x07;
tps->desc[i].enable_mask = 1 << 1;
- else if (i == TPS65023_LDO_2)
+ break;
+ case TPS65023_LDO_2:
+ tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL;
+ tps->desc[i].vsel_mask = 0x70;
tps->desc[i].enable_mask = 1 << 2;
- else /* DCDCx */
+ break;
+ default: /* DCDCx */
tps->desc[i].enable_mask =
1 << (TPS65023_NUM_REGULATOR - i);
+ }
config.dev = &client->dev;
config.init_data = init_data;
@@ -384,35 +332,26 @@ static int __devexit tps_65023_remove(struct i2c_client *client)
static const struct tps_info tps65020_regs[] = {
{
.name = "VDCDC1",
- .min_uV = 3300000,
- .max_uV = 3300000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
+ .table = DCDC_FIXED_3300000_VSEL_table,
},
{
.name = "VDCDC2",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
+ .table = DCDC_FIXED_1800000_VSEL_table,
},
{
.name = "VDCDC3",
- .min_uV = 800000,
- .max_uV = 1600000,
.table_len = ARRAY_SIZE(VCORE_VSEL_table),
.table = VCORE_VSEL_table,
},
-
{
.name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3150000,
.table_len = ARRAY_SIZE(TPS65020_LDO1_VSEL_table),
.table = TPS65020_LDO1_VSEL_table,
},
{
.name = "LDO2",
- .min_uV = 1050000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(TPS65020_LDO2_VSEL_table),
.table = TPS65020_LDO2_VSEL_table,
},
@@ -421,34 +360,26 @@ static const struct tps_info tps65020_regs[] = {
static const struct tps_info tps65021_regs[] = {
{
.name = "VDCDC1",
- .min_uV = 3300000,
- .max_uV = 3300000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
+ .table = DCDC_FIXED_3300000_VSEL_table,
},
{
.name = "VDCDC2",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
+ .table = DCDC_FIXED_1800000_VSEL_table,
},
{
.name = "VDCDC3",
- .min_uV = 800000,
- .max_uV = 1600000,
.table_len = ARRAY_SIZE(VCORE_VSEL_table),
.table = VCORE_VSEL_table,
},
{
.name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3150000,
.table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table),
.table = TPS65023_LDO1_VSEL_table,
},
{
.name = "LDO2",
- .min_uV = 1050000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table),
.table = TPS65023_LDO2_VSEL_table,
},
@@ -457,34 +388,26 @@ static const struct tps_info tps65021_regs[] = {
static const struct tps_info tps65023_regs[] = {
{
.name = "VDCDC1",
- .min_uV = 800000,
- .max_uV = 1600000,
.table_len = ARRAY_SIZE(VCORE_VSEL_table),
.table = VCORE_VSEL_table,
},
{
.name = "VDCDC2",
- .min_uV = 3300000,
- .max_uV = 3300000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
+ .table = DCDC_FIXED_3300000_VSEL_table,
},
{
.name = "VDCDC3",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
+ .table = DCDC_FIXED_1800000_VSEL_table,
},
{
.name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3150000,
.table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table),
.table = TPS65023_LDO1_VSEL_table,
},
{
.name = "LDO2",
- .min_uV = 1050000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table),
.table = TPS65023_LDO2_VSEL_table,
},
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index da38be1..07d01cc 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -43,58 +43,40 @@
/* Number of total regulators available */
#define TPS6507X_NUM_REGULATOR (TPS6507X_NUM_DCDC + TPS6507X_NUM_LDO)
-/* Supported voltage values for regulators (in milliVolts) */
-static const u16 VDCDCx_VSEL_table[] = {
- 725, 750, 775, 800,
- 825, 850, 875, 900,
- 925, 950, 975, 1000,
- 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200,
- 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400,
- 1425, 1450, 1475, 1500,
- 1550, 1600, 1650, 1700,
- 1750, 1800, 1850, 1900,
- 1950, 2000, 2050, 2100,
- 2150, 2200, 2250, 2300,
- 2350, 2400, 2450, 2500,
- 2550, 2600, 2650, 2700,
- 2750, 2800, 2850, 2900,
- 3000, 3100, 3200, 3300,
+/* Supported voltage values for regulators (in microVolts) */
+static const unsigned int VDCDCx_VSEL_table[] = {
+ 725000, 750000, 775000, 800000,
+ 825000, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000,
+ 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000,
+ 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000,
+ 1425000, 1450000, 1475000, 1500000,
+ 1550000, 1600000, 1650000, 1700000,
+ 1750000, 1800000, 1850000, 1900000,
+ 1950000, 2000000, 2050000, 2100000,
+ 2150000, 2200000, 2250000, 2300000,
+ 2350000, 2400000, 2450000, 2500000,
+ 2550000, 2600000, 2650000, 2700000,
+ 2750000, 2800000, 2850000, 2900000,
+ 3000000, 3100000, 3200000, 3300000,
};
-static const u16 LDO1_VSEL_table[] = {
- 1000, 1100, 1200, 1250,
- 1300, 1350, 1400, 1500,
- 1600, 1800, 2500, 2750,
- 2800, 3000, 3100, 3300,
+static const unsigned int LDO1_VSEL_table[] = {
+ 1000000, 1100000, 1200000, 1250000,
+ 1300000, 1350000, 1400000, 1500000,
+ 1600000, 1800000, 2500000, 2750000,
+ 2800000, 3000000, 3100000, 3300000,
};
-static const u16 LDO2_VSEL_table[] = {
- 725, 750, 775, 800,
- 825, 850, 875, 900,
- 925, 950, 975, 1000,
- 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200,
- 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400,
- 1425, 1450, 1475, 1500,
- 1550, 1600, 1650, 1700,
- 1750, 1800, 1850, 1900,
- 1950, 2000, 2050, 2100,
- 2150, 2200, 2250, 2300,
- 2350, 2400, 2450, 2500,
- 2550, 2600, 2650, 2700,
- 2750, 2800, 2850, 2900,
- 3000, 3100, 3200, 3300,
-};
+/* The voltage mapping table for LDO2 is the same as VDCDCx */
+#define LDO2_VSEL_table VDCDCx_VSEL_table
struct tps_info {
const char *name;
- unsigned min_uV;
- unsigned max_uV;
u8 table_len;
- const u16 *table;
+ const unsigned int *table;
/* Does DCDC high or the low register defines output voltage? */
bool defdcdc_default;
@@ -103,36 +85,26 @@ struct tps_info {
static struct tps_info tps6507x_pmic_regs[] = {
{
.name = "VDCDC1",
- .min_uV = 725000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "VDCDC2",
- .min_uV = 725000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "VDCDC3",
- .min_uV = 725000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(LDO1_VSEL_table),
.table = LDO1_VSEL_table,
},
{
.name = "LDO2",
- .min_uV = 725000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(LDO2_VSEL_table),
.table = LDO2_VSEL_table,
},
@@ -375,28 +347,13 @@ static int tps6507x_pmic_set_voltage_sel(struct regulator_dev *dev,
return tps6507x_pmic_reg_write(tps, reg, data);
}
-static int tps6507x_pmic_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
- int rid = rdev_get_id(dev);
-
- if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2)
- return -EINVAL;
-
- if (selector >= tps->info[rid]->table_len)
- return -EINVAL;
- else
- return tps->info[rid]->table[selector] * 1000;
-}
-
static struct regulator_ops tps6507x_pmic_ops = {
.is_enabled = tps6507x_pmic_is_enabled,
.enable = tps6507x_pmic_enable,
.disable = tps6507x_pmic_disable,
.get_voltage_sel = tps6507x_pmic_get_voltage_sel,
.set_voltage_sel = tps6507x_pmic_set_voltage_sel,
- .list_voltage = tps6507x_pmic_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static __devinit int tps6507x_pmic_probe(struct platform_device *pdev)
@@ -449,6 +406,7 @@ static __devinit int tps6507x_pmic_probe(struct platform_device *pdev)
tps->desc[i].name = info->name;
tps->desc[i].id = i;
tps->desc[i].n_voltages = info->table_len;
+ tps->desc[i].volt_table = info->table;
tps->desc[i].ops = &tps6507x_pmic_ops;
tps->desc[i].type = REGULATOR_VOLTAGE;
tps->desc[i].owner = THIS_MODULE;
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 9d371d2..6caa222 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -26,7 +26,7 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65217.h>
-#define TPS65217_REGULATOR(_name, _id, _ops, _n) \
+#define TPS65217_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _em, _t) \
{ \
.name = _name, \
.id = _id, \
@@ -34,23 +34,23 @@
.n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .enable_reg = TPS65217_REG_ENABLE, \
+ .enable_mask = _em, \
+ .volt_table = _t, \
} \
-#define TPS65217_INFO(_nm, _min, _max, _f1, _f2, _t, _n, _em, _vr, _vm) \
+#define TPS65217_INFO(_nm, _min, _max, _f1, _f2) \
{ \
.name = _nm, \
.min_uV = _min, \
.max_uV = _max, \
.vsel_to_uv = _f1, \
.uv_to_vsel = _f2, \
- .table = _t, \
- .table_len = _n, \
- .enable_mask = _em, \
- .set_vout_reg = _vr, \
- .set_vout_mask = _vm, \
}
-static const int LDO1_VSEL_table[] = {
+static const unsigned int LDO1_VSEL_table[] = {
1000000, 1100000, 1200000, 1250000,
1300000, 1350000, 1400000, 1500000,
1600000, 1800000, 2500000, 2750000,
@@ -78,7 +78,7 @@ static int tps65217_vsel_to_uv1(unsigned int vsel)
static int tps65217_uv_to_vsel1(int uV, unsigned int *vsel)
{
- if ((uV < 0) && (uV > 3300000))
+ if (uV < 0 || uV > 3300000)
return -EINVAL;
if (uV <= 1500000)
@@ -112,7 +112,7 @@ static int tps65217_vsel_to_uv2(unsigned int vsel)
static int tps65217_uv_to_vsel2(int uV, unsigned int *vsel)
{
- if ((uV < 0) && (uV > 3300000))
+ if (uV < 0 || uV > 3300000)
return -EINVAL;
if (uV <= 1900000)
@@ -127,46 +127,20 @@ static int tps65217_uv_to_vsel2(int uV, unsigned int *vsel)
static struct tps_info tps65217_pmic_regs[] = {
TPS65217_INFO("DCDC1", 900000, 1800000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC1_EN,
- TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK),
+ tps65217_uv_to_vsel1),
TPS65217_INFO("DCDC2", 900000, 3300000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC2_EN,
- TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK),
+ tps65217_uv_to_vsel1),
TPS65217_INFO("DCDC3", 900000, 1500000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC3_EN,
- TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK),
- TPS65217_INFO("LDO1", 1000000, 3300000, NULL, NULL, LDO1_VSEL_table,
- 16, TPS65217_ENABLE_LDO1_EN, TPS65217_REG_DEFLDO1,
- TPS65217_DEFLDO1_LDO1_MASK),
+ tps65217_uv_to_vsel1),
+ TPS65217_INFO("LDO1", 1000000, 3300000, NULL, NULL),
TPS65217_INFO("LDO2", 900000, 3300000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_LDO2_EN,
- TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK),
+ tps65217_uv_to_vsel1),
TPS65217_INFO("LDO3", 1800000, 3300000, tps65217_vsel_to_uv2,
- tps65217_uv_to_vsel2, NULL, 32,
- TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
- TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK),
+ tps65217_uv_to_vsel2),
TPS65217_INFO("LDO4", 1800000, 3300000, tps65217_vsel_to_uv2,
- tps65217_uv_to_vsel2, NULL, 32,
- TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
- TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK),
+ tps65217_uv_to_vsel2),
};
-static int tps65217_pmic_is_enabled(struct regulator_dev *dev)
-{
- int ret;
- struct tps65217 *tps = rdev_get_drvdata(dev);
- unsigned int data, rid = rdev_get_id(dev);
-
- if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
- return -EINVAL;
-
- ret = tps65217_reg_read(tps, TPS65217_REG_ENABLE, &data);
- if (ret)
- return ret;
-
- return (data & tps->info[rid]->enable_mask) ? 1 : 0;
-}
-
static int tps65217_pmic_enable(struct regulator_dev *dev)
{
struct tps65217 *tps = rdev_get_drvdata(dev);
@@ -177,9 +151,8 @@ static int tps65217_pmic_enable(struct regulator_dev *dev)
/* Enable the regulator and password protection is level 1 */
return tps65217_set_bits(tps, TPS65217_REG_ENABLE,
- tps->info[rid]->enable_mask,
- tps->info[rid]->enable_mask,
- TPS65217_PROTECT_L1);
+ dev->desc->enable_mask, dev->desc->enable_mask,
+ TPS65217_PROTECT_L1);
}
static int tps65217_pmic_disable(struct regulator_dev *dev)
@@ -192,25 +165,7 @@ static int tps65217_pmic_disable(struct regulator_dev *dev)
/* Disable the regulator and password protection is level 1 */
return tps65217_clear_bits(tps, TPS65217_REG_ENABLE,
- tps->info[rid]->enable_mask, TPS65217_PROTECT_L1);
-}
-
-static int tps65217_pmic_get_voltage_sel(struct regulator_dev *dev)
-{
- int ret;
- struct tps65217 *tps = rdev_get_drvdata(dev);
- unsigned int selector, rid = rdev_get_id(dev);
-
- if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
- return -EINVAL;
-
- ret = tps65217_reg_read(tps, tps->info[rid]->set_vout_reg, &selector);
- if (ret)
- return ret;
-
- selector &= tps->info[rid]->set_vout_mask;
-
- return selector;
+ dev->desc->enable_mask, TPS65217_PROTECT_L1);
}
static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev,
@@ -221,8 +176,7 @@ static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev,
unsigned int rid = rdev_get_id(dev);
/* Set the voltage based on vsel value and write protect level is 2 */
- ret = tps65217_set_bits(tps, tps->info[rid]->set_vout_reg,
- tps->info[rid]->set_vout_mask,
+ ret = tps65217_set_bits(tps, dev->desc->vsel_reg, dev->desc->vsel_mask,
selector, TPS65217_PROTECT_L2);
/* Set GO bit for DCDCx to initiate voltage transistion */
@@ -252,10 +206,10 @@ static int tps65217_pmic_map_voltage(struct regulator_dev *dev,
if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
return -EINVAL;
- if (min_uV < tps->info[rid]->min_uV || min_uV > tps->info[rid]->max_uV)
- return -EINVAL;
+ if (min_uV < tps->info[rid]->min_uV)
+ min_uV = tps->info[rid]->min_uV;
- if (max_uV < tps->info[rid]->min_uV || max_uV > tps->info[rid]->max_uV)
+ if (max_uV < tps->info[rid]->min_uV || min_uV > tps->info[rid]->max_uV)
return -EINVAL;
ret = tps->info[rid]->uv_to_vsel(min_uV, &sel);
@@ -274,21 +228,18 @@ static int tps65217_pmic_list_voltage(struct regulator_dev *dev,
if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
return -EINVAL;
- if (selector >= tps->info[rid]->table_len)
+ if (selector >= dev->desc->n_voltages)
return -EINVAL;
- if (tps->info[rid]->table)
- return tps->info[rid]->table[selector];
-
return tps->info[rid]->vsel_to_uv(selector);
}
/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
static struct regulator_ops tps65217_pmic_ops = {
- .is_enabled = tps65217_pmic_is_enabled,
+ .is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
- .get_voltage_sel = tps65217_pmic_get_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
.list_voltage = tps65217_pmic_list_voltage,
.map_voltage = tps65217_pmic_map_voltage,
@@ -296,22 +247,38 @@ static struct regulator_ops tps65217_pmic_ops = {
/* Operations permitted on LDO1 */
static struct regulator_ops tps65217_pmic_ldo1_ops = {
- .is_enabled = tps65217_pmic_is_enabled,
+ .is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
- .get_voltage_sel = tps65217_pmic_get_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
- .list_voltage = tps65217_pmic_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static const struct regulator_desc regulators[] = {
- TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64),
- TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64),
- TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64),
- TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16),
- TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64),
- TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32),
- TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32),
+ TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64,
+ TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK,
+ TPS65217_ENABLE_DC1_EN, NULL),
+ TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64,
+ TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK,
+ TPS65217_ENABLE_DC2_EN, NULL),
+ TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64,
+ TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK,
+ TPS65217_ENABLE_DC3_EN, NULL),
+ TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16,
+ TPS65217_REG_DEFLDO1, TPS65217_DEFLDO1_LDO1_MASK,
+ TPS65217_ENABLE_LDO1_EN, LDO1_VSEL_table),
+ TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64,
+ TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK,
+ TPS65217_ENABLE_LDO2_EN, NULL),
+ TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32,
+ TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK,
+ TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
+ NULL),
+ TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32,
+ TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK,
+ TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
+ NULL),
};
static int __devinit tps65217_regulator_probe(struct platform_device *pdev)
@@ -326,6 +293,7 @@ static int __devinit tps65217_regulator_probe(struct platform_device *pdev)
tps->info[pdev->id] = info;
config.dev = &pdev->dev;
+ config.of_node = pdev->dev.of_node;
config.init_data = pdev->dev.platform_data;
config.driver_data = tps;
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index b88b3df..947ece9 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -110,9 +110,6 @@
#define N_SWITCH 2
#define N_REGULATORS (N_DCDC + N_LDO + N_SWITCH)
-#define FIXED_ILIMSEL BIT(0)
-#define FIXED_VOLTAGE BIT(1)
-
#define CMD_READ(reg) ((reg) << 6)
#define CMD_WRITE(reg) (BIT(5) | (reg) << 6)
#define STAT_CLK BIT(3)
@@ -129,12 +126,9 @@ struct field {
struct supply_info {
const char *name;
int n_voltages;
- const int *voltages;
- int fixed_voltage;
+ const unsigned int *voltages;
int n_ilimsels;
- const int *ilimsels;
- int fixed_ilimsel;
- int flags;
+ const unsigned int *ilimsels;
struct field enable, voltage, ilimsel;
};
@@ -307,7 +301,7 @@ static int write_field(struct tps6524x *hw, const struct field *field,
val << field->shift);
}
-static const int dcdc1_voltages[] = {
+static const unsigned int dcdc1_voltages[] = {
800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000,
1000000, 1025000, 1050000, 1075000,
@@ -318,7 +312,7 @@ static const int dcdc1_voltages[] = {
1500000, 1525000, 1550000, 1575000,
};
-static const int dcdc2_voltages[] = {
+static const unsigned int dcdc2_voltages[] = {
1400000, 1450000, 1500000, 1550000,
1600000, 1650000, 1700000, 1750000,
1800000, 1850000, 1900000, 1950000,
@@ -329,7 +323,7 @@ static const int dcdc2_voltages[] = {
2800000, 2850000, 2900000, 2950000,
};
-static const int dcdc3_voltages[] = {
+static const unsigned int dcdc3_voltages[] = {
2400000, 2450000, 2500000, 2550000, 2600000,
2650000, 2700000, 2750000, 2800000, 2850000,
2900000, 2950000, 3000000, 3050000, 3100000,
@@ -337,38 +331,54 @@ static const int dcdc3_voltages[] = {
3400000, 3450000, 3500000, 3550000, 3600000,
};
-static const int ldo1_voltages[] = {
+static const unsigned int ldo1_voltages[] = {
4300000, 4350000, 4400000, 4450000,
4500000, 4550000, 4600000, 4650000,
4700000, 4750000, 4800000, 4850000,
4900000, 4950000, 5000000, 5050000,
};
-static const int ldo2_voltages[] = {
+static const unsigned int ldo2_voltages[] = {
1100000, 1150000, 1200000, 1250000,
1300000, 1700000, 1750000, 1800000,
1850000, 1900000, 3150000, 3200000,
3250000, 3300000, 3350000, 3400000,
};
-static const int ldo_ilimsel[] = {
+static const unsigned int fixed_5000000_voltage[] = {
+ 5000000
+};
+
+static const unsigned int ldo_ilimsel[] = {
400000, 1500000
};
-static const int usb_ilimsel[] = {
+static const unsigned int usb_ilimsel[] = {
200000, 400000, 800000, 1000000
};
+static const unsigned int fixed_2400000_ilimsel[] = {
+ 2400000
+};
+
+static const unsigned int fixed_1200000_ilimsel[] = {
+ 1200000
+};
+
+static const unsigned int fixed_400000_ilimsel[] = {
+ 400000
+};
+
#define __MK_FIELD(_reg, _mask, _shift) \
{ .reg = (_reg), .mask = (_mask), .shift = (_shift), }
static const struct supply_info supply_info[N_REGULATORS] = {
{
.name = "DCDC1",
- .flags = FIXED_ILIMSEL,
.n_voltages = ARRAY_SIZE(dcdc1_voltages),
.voltages = dcdc1_voltages,
- .fixed_ilimsel = 2400000,
+ .n_ilimsels = ARRAY_SIZE(fixed_2400000_ilimsel),
+ .ilimsels = fixed_2400000_ilimsel,
.enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
DCDCDCDC1_EN_SHIFT),
.voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
@@ -376,10 +386,10 @@ static const struct supply_info supply_info[N_REGULATORS] = {
},
{
.name = "DCDC2",
- .flags = FIXED_ILIMSEL,
.n_voltages = ARRAY_SIZE(dcdc2_voltages),
.voltages = dcdc2_voltages,
- .fixed_ilimsel = 1200000,
+ .n_ilimsels = ARRAY_SIZE(fixed_1200000_ilimsel),
+ .ilimsels = fixed_1200000_ilimsel,
.enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
DCDCDCDC2_EN_SHIFT),
.voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
@@ -387,10 +397,10 @@ static const struct supply_info supply_info[N_REGULATORS] = {
},
{
.name = "DCDC3",
- .flags = FIXED_ILIMSEL,
.n_voltages = ARRAY_SIZE(dcdc3_voltages),
.voltages = dcdc3_voltages,
- .fixed_ilimsel = 1200000,
+ .n_ilimsels = ARRAY_SIZE(fixed_1200000_ilimsel),
+ .ilimsels = fixed_1200000_ilimsel,
.enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
DCDCDCDC3_EN_SHIFT),
.voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
@@ -424,8 +434,8 @@ static const struct supply_info supply_info[N_REGULATORS] = {
},
{
.name = "USB",
- .flags = FIXED_VOLTAGE,
- .fixed_voltage = 5000000,
+ .n_voltages = ARRAY_SIZE(fixed_5000000_voltage),
+ .voltages = fixed_5000000_voltage,
.n_ilimsels = ARRAY_SIZE(usb_ilimsel),
.ilimsels = usb_ilimsel,
.enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
@@ -435,29 +445,15 @@ static const struct supply_info supply_info[N_REGULATORS] = {
},
{
.name = "LCD",
- .flags = FIXED_VOLTAGE | FIXED_ILIMSEL,
- .fixed_voltage = 5000000,
- .fixed_ilimsel = 400000,
+ .n_voltages = ARRAY_SIZE(fixed_5000000_voltage),
+ .voltages = fixed_5000000_voltage,
+ .n_ilimsels = ARRAY_SIZE(fixed_400000_ilimsel),
+ .ilimsels = fixed_400000_ilimsel,
.enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
BLOCK_LCD_SHIFT),
},
};
-static int list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- const struct supply_info *info;
- struct tps6524x *hw;
-
- hw = rdev_get_drvdata(rdev);
- info = &supply_info[rdev_get_id(rdev)];
-
- if (info->flags & FIXED_VOLTAGE)
- return selector ? -EINVAL : info->fixed_voltage;
-
- return ((selector < info->n_voltages) ?
- info->voltages[selector] : -EINVAL);
-}
-
static int set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
const struct supply_info *info;
@@ -466,7 +462,7 @@ static int set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
hw = rdev_get_drvdata(rdev);
info = &supply_info[rdev_get_id(rdev)];
- if (info->flags & FIXED_VOLTAGE)
+ if (rdev->desc->n_voltages == 1)
return -EINVAL;
return write_field(hw, &info->voltage, selector);
@@ -481,8 +477,8 @@ static int get_voltage_sel(struct regulator_dev *rdev)
hw = rdev_get_drvdata(rdev);
info = &supply_info[rdev_get_id(rdev)];
- if (info->flags & FIXED_VOLTAGE)
- return info->fixed_voltage;
+ if (rdev->desc->n_voltages == 1)
+ return 0;
ret = read_field(hw, &info->voltage);
if (ret < 0)
@@ -503,7 +499,7 @@ static int set_current_limit(struct regulator_dev *rdev, int min_uA,
hw = rdev_get_drvdata(rdev);
info = &supply_info[rdev_get_id(rdev)];
- if (info->flags & FIXED_ILIMSEL)
+ if (info->n_ilimsels == 1)
return -EINVAL;
for (i = 0; i < info->n_ilimsels; i++)
@@ -526,8 +522,8 @@ static int get_current_limit(struct regulator_dev *rdev)
hw = rdev_get_drvdata(rdev);
info = &supply_info[rdev_get_id(rdev)];
- if (info->flags & FIXED_ILIMSEL)
- return info->fixed_ilimsel;
+ if (info->n_ilimsels == 1)
+ return info->ilimsels[0];
ret = read_field(hw, &info->ilimsel);
if (ret < 0)
@@ -577,7 +573,7 @@ static struct regulator_ops regulator_ops = {
.disable = disable_supply,
.get_voltage_sel = get_voltage_sel,
.set_voltage_sel = set_voltage_sel,
- .list_voltage = list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_current_limit = set_current_limit,
.get_current_limit = get_current_limit,
};
@@ -629,13 +625,11 @@ static int __devinit pmic_probe(struct spi_device *spi)
hw->desc[i].name = info->name;
hw->desc[i].id = i;
hw->desc[i].n_voltages = info->n_voltages;
+ hw->desc[i].volt_table = info->voltages;
hw->desc[i].ops = &regulator_ops;
hw->desc[i].type = REGULATOR_VOLTAGE;
hw->desc[i].owner = THIS_MODULE;
- if (info->flags & FIXED_VOLTAGE)
- hw->desc[i].n_voltages = 1;
-
config.dev = dev;
config.init_data = init_data;
config.driver_data = hw;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index c0a2145..e6da90a 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -63,8 +63,6 @@ struct tps6586x_regulator {
int enable_bit[2];
int enable_reg[2];
- int *voltages;
-
/* for DVM regulators */
int go_reg;
int go_bit;
@@ -72,22 +70,9 @@ struct tps6586x_regulator {
static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
{
- return rdev_get_dev(rdev)->parent->parent;
+ return rdev_get_dev(rdev)->parent;
}
-static int tps6586x_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- struct tps6586x_regulator *info = rdev_get_drvdata(rdev);
- int rid = rdev_get_id(rdev);
-
- /* LDO0 has minimal voltage 1.2V rather than 1.25V */
- if ((rid == TPS6586X_ID_LDO_0) && (selector == 0))
- return (info->voltages[0] - 50) * 1000;
-
- return info->voltages[selector] * 1000;
-}
-
-
static int tps6586x_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
@@ -168,7 +153,7 @@ static int tps6586x_regulator_is_enabled(struct regulator_dev *rdev)
}
static struct regulator_ops tps6586x_regulator_ops = {
- .list_voltage = tps6586x_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.get_voltage_sel = tps6586x_get_voltage_sel,
.set_voltage_sel = tps6586x_set_voltage_sel,
@@ -177,39 +162,45 @@ static struct regulator_ops tps6586x_regulator_ops = {
.disable = tps6586x_regulator_disable,
};
-static int tps6586x_ldo_voltages[] = {
- 1250, 1500, 1800, 2500, 2700, 2850, 3100, 3300,
+static const unsigned int tps6586x_ldo0_voltages[] = {
+ 1200000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000,
+};
+
+static const unsigned int tps6586x_ldo4_voltages[] = {
+ 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000,
+ 1900000, 1925000, 1950000, 1975000, 2000000, 2025000, 2050000, 2075000,
+ 2100000, 2125000, 2150000, 2175000, 2200000, 2225000, 2250000, 2275000,
+ 2300000, 2325000, 2350000, 2375000, 2400000, 2425000, 2450000, 2475000,
};
-static int tps6586x_ldo4_voltages[] = {
- 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
- 1900, 1925, 1950, 1975, 2000, 2025, 2050, 2075,
- 2100, 2125, 2150, 2175, 2200, 2225, 2250, 2275,
- 2300, 2325, 2350, 2375, 2400, 2425, 2450, 2475,
+static const unsigned int tps6586x_ldo_voltages[] = {
+ 1250000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000,
};
-static int tps6586x_sm2_voltages[] = {
- 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350,
- 3400, 3450, 3500, 3550, 3600, 3650, 3700, 3750,
- 3800, 3850, 3900, 3950, 4000, 4050, 4100, 4150,
- 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550,
+static const unsigned int tps6586x_sm2_voltages[] = {
+ 3000000, 3050000, 3100000, 3150000, 3200000, 3250000, 3300000, 3350000,
+ 3400000, 3450000, 3500000, 3550000, 3600000, 3650000, 3700000, 3750000,
+ 3800000, 3850000, 3900000, 3950000, 4000000, 4050000, 4100000, 4150000,
+ 4200000, 4250000, 4300000, 4350000, 4400000, 4450000, 4500000, 4550000,
};
-static int tps6586x_dvm_voltages[] = {
- 725, 750, 775, 800, 825, 850, 875, 900,
- 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+static const unsigned int tps6586x_dvm_voltages[] = {
+ 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
};
-#define TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \
+#define TPS6586X_REGULATOR(_id, _pin_name, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
.desc = { \
+ .supply_name = _pin_name, \
.name = "REG-" #_id, \
.ops = &tps6586x_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = TPS6586X_ID_##_id, \
.n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages), \
+ .volt_table = tps6586x_##vdata##_voltages, \
.owner = THIS_MODULE, \
}, \
.volt_reg = TPS6586X_##vreg, \
@@ -218,44 +209,45 @@ static int tps6586x_dvm_voltages[] = {
.enable_reg[0] = TPS6586X_SUPPLY##ereg0, \
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
- .enable_bit[1] = (ebit1), \
- .voltages = tps6586x_##vdata##_voltages,
+ .enable_bit[1] = (ebit1),
#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
.go_reg = TPS6586X_##goreg, \
.go_bit = (gobit),
-#define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \
+#define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
{ \
- TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \
+ TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
}
-#define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \
+#define TPS6586X_DVM(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
{ \
- TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \
+ TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
}
static struct tps6586x_regulator tps6586x_regulator[] = {
- TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0),
- TPS6586X_LDO(LDO_3, ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2),
- TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
- TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
- TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
- TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
- TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
- TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
- TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
- TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
-
- TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6),
- TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6),
- TPS6586X_DVM(SM_0, dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
- TPS6586X_DVM(SM_1, dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
+ TPS6586X_LDO(LDO_0, "vinldo01", ldo0, SUPPLYV1, 5, 3, ENC, 0, END, 0),
+ TPS6586X_LDO(LDO_3, "vinldo23", ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2),
+ TPS6586X_LDO(LDO_5, NULL, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
+ TPS6586X_LDO(LDO_6, "vinldo678", ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
+ TPS6586X_LDO(LDO_7, "vinldo678", ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
+ TPS6586X_LDO(LDO_8, "vinldo678", ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
+ TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
+ TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
+ TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
+ TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
+
+ TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
+ ENB, 3, VCC2, 6),
+ TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
+ END, 3, VCC1, 6),
+ TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
+ TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
};
/*
@@ -362,7 +354,7 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
if (err)
return err;
- config.dev = &pdev->dev;
+ config.dev = pdev->dev.parent;
config.of_node = pdev->dev.of_node;
config.init_data = pdev->dev.platform_data;
config.driver_data = ri;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 6bf864b..793adda 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -31,160 +31,147 @@
TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 | \
TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
-/* supported VIO voltages in millivolts */
-static const u16 VIO_VSEL_table[] = {
- 1500, 1800, 2500, 3300,
+/* supported VIO voltages in microvolts */
+static const unsigned int VIO_VSEL_table[] = {
+ 1500000, 1800000, 2500000, 3300000,
};
/* VSEL tables for TPS65910 specific LDOs and dcdc's */
-/* supported VDD3 voltages in millivolts */
-static const u16 VDD3_VSEL_table[] = {
- 5000,
+/* supported VDD3 voltages in microvolts */
+static const unsigned int VDD3_VSEL_table[] = {
+ 5000000,
};
-/* supported VDIG1 voltages in millivolts */
-static const u16 VDIG1_VSEL_table[] = {
- 1200, 1500, 1800, 2700,
+/* supported VDIG1 voltages in microvolts */
+static const unsigned int VDIG1_VSEL_table[] = {
+ 1200000, 1500000, 1800000, 2700000,
};
-/* supported VDIG2 voltages in millivolts */
-static const u16 VDIG2_VSEL_table[] = {
- 1000, 1100, 1200, 1800,
+/* supported VDIG2 voltages in microvolts */
+static const unsigned int VDIG2_VSEL_table[] = {
+ 1000000, 1100000, 1200000, 1800000,
};
-/* supported VPLL voltages in millivolts */
-static const u16 VPLL_VSEL_table[] = {
- 1000, 1100, 1800, 2500,
+/* supported VPLL voltages in microvolts */
+static const unsigned int VPLL_VSEL_table[] = {
+ 1000000, 1100000, 1800000, 2500000,
};
-/* supported VDAC voltages in millivolts */
-static const u16 VDAC_VSEL_table[] = {
- 1800, 2600, 2800, 2850,
+/* supported VDAC voltages in microvolts */
+static const unsigned int VDAC_VSEL_table[] = {
+ 1800000, 2600000, 2800000, 2850000,
};
-/* supported VAUX1 voltages in millivolts */
-static const u16 VAUX1_VSEL_table[] = {
- 1800, 2500, 2800, 2850,
+/* supported VAUX1 voltages in microvolts */
+static const unsigned int VAUX1_VSEL_table[] = {
+ 1800000, 2500000, 2800000, 2850000,
};
-/* supported VAUX2 voltages in millivolts */
-static const u16 VAUX2_VSEL_table[] = {
- 1800, 2800, 2900, 3300,
+/* supported VAUX2 voltages in microvolts */
+static const unsigned int VAUX2_VSEL_table[] = {
+ 1800000, 2800000, 2900000, 3300000,
};
-/* supported VAUX33 voltages in millivolts */
-static const u16 VAUX33_VSEL_table[] = {
- 1800, 2000, 2800, 3300,
+/* supported VAUX33 voltages in microvolts */
+static const unsigned int VAUX33_VSEL_table[] = {
+ 1800000, 2000000, 2800000, 3300000,
};
-/* supported VMMC voltages in millivolts */
-static const u16 VMMC_VSEL_table[] = {
- 1800, 2800, 3000, 3300,
+/* supported VMMC voltages in microvolts */
+static const unsigned int VMMC_VSEL_table[] = {
+ 1800000, 2800000, 3000000, 3300000,
};
struct tps_info {
const char *name;
- unsigned min_uV;
- unsigned max_uV;
+ const char *vin_name;
u8 n_voltages;
- const u16 *voltage_table;
+ const unsigned int *voltage_table;
int enable_time_us;
};
static struct tps_info tps65910_regs[] = {
{
.name = "vrtc",
+ .vin_name = "vcc7",
.enable_time_us = 2200,
},
{
.name = "vio",
- .min_uV = 1500000,
- .max_uV = 3300000,
+ .vin_name = "vccio",
.n_voltages = ARRAY_SIZE(VIO_VSEL_table),
.voltage_table = VIO_VSEL_table,
.enable_time_us = 350,
},
{
.name = "vdd1",
- .min_uV = 600000,
- .max_uV = 4500000,
+ .vin_name = "vcc1",
.enable_time_us = 350,
},
{
.name = "vdd2",
- .min_uV = 600000,
- .max_uV = 4500000,
+ .vin_name = "vcc2",
.enable_time_us = 350,
},
{
.name = "vdd3",
- .min_uV = 5000000,
- .max_uV = 5000000,
.n_voltages = ARRAY_SIZE(VDD3_VSEL_table),
.voltage_table = VDD3_VSEL_table,
.enable_time_us = 200,
},
{
.name = "vdig1",
- .min_uV = 1200000,
- .max_uV = 2700000,
+ .vin_name = "vcc6",
.n_voltages = ARRAY_SIZE(VDIG1_VSEL_table),
.voltage_table = VDIG1_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vdig2",
- .min_uV = 1000000,
- .max_uV = 1800000,
+ .vin_name = "vcc6",
.n_voltages = ARRAY_SIZE(VDIG2_VSEL_table),
.voltage_table = VDIG2_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vpll",
- .min_uV = 1000000,
- .max_uV = 2500000,
+ .vin_name = "vcc5",
.n_voltages = ARRAY_SIZE(VPLL_VSEL_table),
.voltage_table = VPLL_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vdac",
- .min_uV = 1800000,
- .max_uV = 2850000,
+ .vin_name = "vcc5",
.n_voltages = ARRAY_SIZE(VDAC_VSEL_table),
.voltage_table = VDAC_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux1",
- .min_uV = 1800000,
- .max_uV = 2850000,
+ .vin_name = "vcc4",
.n_voltages = ARRAY_SIZE(VAUX1_VSEL_table),
.voltage_table = VAUX1_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux2",
- .min_uV = 1800000,
- .max_uV = 3300000,
+ .vin_name = "vcc4",
.n_voltages = ARRAY_SIZE(VAUX2_VSEL_table),
.voltage_table = VAUX2_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux33",
- .min_uV = 1800000,
- .max_uV = 3300000,
+ .vin_name = "vcc3",
.n_voltages = ARRAY_SIZE(VAUX33_VSEL_table),
.voltage_table = VAUX33_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vmmc",
- .min_uV = 1800000,
- .max_uV = 3300000,
+ .vin_name = "vcc3",
.n_voltages = ARRAY_SIZE(VMMC_VSEL_table),
.voltage_table = VMMC_VSEL_table,
.enable_time_us = 100,
@@ -194,91 +181,79 @@ static struct tps_info tps65910_regs[] = {
static struct tps_info tps65911_regs[] = {
{
.name = "vrtc",
+ .vin_name = "vcc7",
.enable_time_us = 2200,
},
{
.name = "vio",
- .min_uV = 1500000,
- .max_uV = 3300000,
+ .vin_name = "vccio",
.n_voltages = ARRAY_SIZE(VIO_VSEL_table),
.voltage_table = VIO_VSEL_table,
.enable_time_us = 350,
},
{
.name = "vdd1",
- .min_uV = 600000,
- .max_uV = 4500000,
- .n_voltages = 73,
+ .vin_name = "vcc1",
+ .n_voltages = 0x4C,
.enable_time_us = 350,
},
{
.name = "vdd2",
- .min_uV = 600000,
- .max_uV = 4500000,
- .n_voltages = 73,
+ .vin_name = "vcc2",
+ .n_voltages = 0x4C,
.enable_time_us = 350,
},
{
.name = "vddctrl",
- .min_uV = 600000,
- .max_uV = 1400000,
- .n_voltages = 65,
+ .n_voltages = 0x44,
.enable_time_us = 900,
},
{
.name = "ldo1",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 47,
+ .vin_name = "vcc6",
+ .n_voltages = 0x33,
.enable_time_us = 420,
},
{
.name = "ldo2",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 47,
+ .vin_name = "vcc6",
+ .n_voltages = 0x33,
.enable_time_us = 420,
},
{
.name = "ldo3",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc5",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo4",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 47,
+ .vin_name = "vcc5",
+ .n_voltages = 0x33,
.enable_time_us = 230,
},
{
.name = "ldo5",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc4",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo6",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc3",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo7",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc3",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo8",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc3",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
};
@@ -321,7 +296,6 @@ struct tps65910_reg {
struct tps65910 *mfd;
struct regulator_dev **rdev;
struct tps_info **info;
- struct mutex mutex;
int num_regulators;
int mode;
int (*get_ctrl_reg)(int);
@@ -329,71 +303,6 @@ struct tps65910_reg {
unsigned int board_ext_control[TPS65910_NUM_REGS];
};
-static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
-{
- unsigned int val;
- int err;
-
- err = tps65910_reg_read(pmic->mfd, reg, &val);
- if (err)
- return err;
-
- return val;
-}
-
-static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
- u8 set_mask, u8 clear_mask)
-{
- int err, data;
-
- mutex_lock(&pmic->mutex);
-
- data = tps65910_read(pmic, reg);
- if (data < 0) {
- dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
- err = data;
- goto out;
- }
-
- data &= ~clear_mask;
- data |= set_mask;
- err = tps65910_reg_write(pmic->mfd, reg, data);
- if (err)
- dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
-
-out:
- mutex_unlock(&pmic->mutex);
- return err;
-}
-
-static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg)
-{
- int data;
-
- mutex_lock(&pmic->mutex);
-
- data = tps65910_read(pmic, reg);
- if (data < 0)
- dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
-
- mutex_unlock(&pmic->mutex);
- return data;
-}
-
-static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val)
-{
- int err;
-
- mutex_lock(&pmic->mutex);
-
- err = tps65910_reg_write(pmic->mfd, reg, val);
- if (err < 0)
- dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
-
- mutex_unlock(&pmic->mutex);
- return err;
-}
-
static int tps65910_get_ctrl_register(int id)
{
switch (id) {
@@ -462,13 +371,6 @@ static int tps65911_get_ctrl_register(int id)
}
}
-static int tps65910_enable_time(struct regulator_dev *dev)
-{
- struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev);
- return pmic->info[id]->enable_time_us;
-}
-
static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
@@ -481,8 +383,9 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_NORMAL:
- return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT,
- LDO_ST_MODE_BIT);
+ return tps65910_reg_update_bits(pmic->mfd, reg,
+ LDO_ST_MODE_BIT | LDO_ST_ON_BIT,
+ LDO_ST_ON_BIT);
case REGULATOR_MODE_IDLE:
value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
return tps65910_reg_set_bits(mfd, reg, value);
@@ -496,15 +399,15 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
static unsigned int tps65910_get_mode(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int reg, value, id = rdev_get_id(dev);
+ int ret, reg, value, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
- value = tps65910_reg_read_locked(pmic, reg);
- if (value < 0)
- return value;
+ ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ if (ret < 0)
+ return ret;
if (!(value & LDO_ST_ON_BIT))
return REGULATOR_MODE_STANDBY;
@@ -517,33 +420,51 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev)
static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev);
+ int ret, id = rdev_get_id(dev);
int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
switch (id) {
case TPS65910_REG_VDD1:
- opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP);
- mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1);
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1_OP, &opvsel);
+ if (ret < 0)
+ return ret;
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1, &mult);
+ if (ret < 0)
+ return ret;
mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR);
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1_SR, &srvsel);
+ if (ret < 0)
+ return ret;
sr = opvsel & VDD1_OP_CMD_MASK;
opvsel &= VDD1_OP_SEL_MASK;
srvsel &= VDD1_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65910_REG_VDD2:
- opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP);
- mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2);
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2_OP, &opvsel);
+ if (ret < 0)
+ return ret;
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2, &mult);
+ if (ret < 0)
+ return ret;
mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR);
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2_SR, &srvsel);
+ if (ret < 0)
+ return ret;
sr = opvsel & VDD2_OP_CMD_MASK;
opvsel &= VDD2_OP_SEL_MASK;
srvsel &= VDD2_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65911_REG_VDDCTRL:
- opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP);
- srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR);
+ ret = tps65910_reg_read(pmic->mfd, TPS65911_VDDCTRL_OP,
+ &opvsel);
+ if (ret < 0)
+ return ret;
+ ret = tps65910_reg_read(pmic->mfd, TPS65911_VDDCTRL_SR,
+ &srvsel);
+ if (ret < 0)
+ return ret;
sr = opvsel & VDDCTRL_OP_CMD_MASK;
opvsel &= VDDCTRL_OP_SEL_MASK;
srvsel &= VDDCTRL_SR_SEL_MASK;
@@ -577,15 +498,15 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
static int tps65910_get_voltage_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int reg, value, id = rdev_get_id(dev);
+ int ret, reg, value, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
- value = tps65910_reg_read_locked(pmic, reg);
- if (value < 0)
- return value;
+ ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ if (ret < 0)
+ return ret;
switch (id) {
case TPS65910_REG_VIO:
@@ -609,18 +530,20 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
{
- return 5 * 1000 * 1000;
+ return dev->desc->volt_table[0];
}
static int tps65911_get_voltage_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev);
- u8 value, reg;
+ int ret, id = rdev_get_id(dev);
+ unsigned int value, reg;
reg = pmic->get_ctrl_reg(id);
- value = tps65910_reg_read_locked(pmic, reg);
+ ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ if (ret < 0)
+ return ret;
switch (id) {
case TPS65911_REG_LDO1:
@@ -662,10 +585,10 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
dcdc_mult--;
vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
- tps65910_modify_bits(pmic, TPS65910_VDD1,
- (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
- VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel);
+ tps65910_reg_update_bits(pmic->mfd, TPS65910_VDD1,
+ VDD1_VGAIN_SEL_MASK,
+ dcdc_mult << VDD1_VGAIN_SEL_SHIFT);
+ tps65910_reg_write(pmic->mfd, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -673,14 +596,14 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
dcdc_mult--;
vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
- tps65910_modify_bits(pmic, TPS65910_VDD2,
- (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
- VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel);
+ tps65910_reg_update_bits(pmic->mfd, TPS65910_VDD2,
+ VDD1_VGAIN_SEL_MASK,
+ dcdc_mult << VDD2_VGAIN_SEL_SHIFT);
+ tps65910_reg_write(pmic->mfd, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
vsel = selector + 3;
- tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel);
+ tps65910_reg_write(pmic->mfd, TPS65911_VDDCTRL_OP, vsel);
}
return 0;
@@ -706,8 +629,8 @@ static int tps65910_set_voltage_sel(struct regulator_dev *dev,
case TPS65910_REG_VAUX2:
case TPS65910_REG_VAUX33:
case TPS65910_REG_VMMC:
- return tps65910_modify_bits(pmic, reg,
- (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
+ return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
}
return -EINVAL;
@@ -727,18 +650,18 @@ static int tps65911_set_voltage_sel(struct regulator_dev *dev,
case TPS65911_REG_LDO1:
case TPS65911_REG_LDO2:
case TPS65911_REG_LDO4:
- return tps65910_modify_bits(pmic, reg,
- (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK);
+ return tps65910_reg_update_bits(pmic->mfd, reg, LDO1_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
case TPS65911_REG_LDO3:
case TPS65911_REG_LDO5:
case TPS65911_REG_LDO6:
case TPS65911_REG_LDO7:
case TPS65911_REG_LDO8:
- return tps65910_modify_bits(pmic, reg,
- (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK);
+ return tps65910_reg_update_bits(pmic->mfd, reg, LDO3_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
case TPS65910_REG_VIO:
- return tps65910_modify_bits(pmic, reg,
- (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
+ return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
}
return -EINVAL;
@@ -768,23 +691,6 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
return volt * 100 * mult;
}
-static int tps65910_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev), voltage;
-
- if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC)
- return -EINVAL;
-
- if (selector >= pmic->info[id]->n_voltages)
- return -EINVAL;
- else
- voltage = pmic->info[id]->voltage_table[selector] * 1000;
-
- return voltage;
-}
-
static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
@@ -816,7 +722,7 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
step_mv = 100;
break;
case TPS65910_REG_VIO:
- return pmic->info[id]->voltage_table[selector] * 1000;
+ return pmic->info[id]->voltage_table[selector];
default:
return -EINVAL;
}
@@ -824,42 +730,16 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
return (LDO_MIN_VOLT + selector * step_mv) * 1000;
}
-static int tps65910_set_voltage_dcdc_time_sel(struct regulator_dev *dev,
- unsigned int old_selector, unsigned int new_selector)
-{
- int id = rdev_get_id(dev);
- int old_volt, new_volt;
-
- old_volt = tps65910_list_voltage_dcdc(dev, old_selector);
- if (old_volt < 0)
- return old_volt;
-
- new_volt = tps65910_list_voltage_dcdc(dev, new_selector);
- if (new_volt < 0)
- return new_volt;
-
- /* VDD1 and VDD2 are 12.5mV/us, VDDCTRL is 100mV/20us */
- switch (id) {
- case TPS65910_REG_VDD1:
- case TPS65910_REG_VDD2:
- return DIV_ROUND_UP(abs(old_volt - new_volt), 12500);
- case TPS65911_REG_VDDCTRL:
- return DIV_ROUND_UP(abs(old_volt - new_volt), 5000);
- }
- return -EINVAL;
-}
-
/* Regulator ops (except VRTC) */
static struct regulator_ops tps65910_ops_dcdc = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65910_get_voltage_dcdc_sel,
.set_voltage_sel = tps65910_set_voltage_dcdc_sel,
- .set_voltage_time_sel = tps65910_set_voltage_dcdc_time_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
.list_voltage = tps65910_list_voltage_dcdc,
};
@@ -867,30 +747,27 @@ static struct regulator_ops tps65910_ops_vdd3 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage = tps65910_get_voltage_vdd3,
- .list_voltage = tps65910_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static struct regulator_ops tps65910_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65910_get_voltage_sel,
.set_voltage_sel = tps65910_set_voltage_sel,
- .list_voltage = tps65910_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static struct regulator_ops tps65911_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65911_get_voltage_sel,
@@ -996,19 +873,27 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
(tps65910_chip_id(mfd) == TPS65911))) {
int op_reg_add = pmic->get_ctrl_reg(id) + 1;
int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
- int opvsel = tps65910_reg_read_locked(pmic, op_reg_add);
- int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add);
+ int opvsel, srvsel;
+
+ ret = tps65910_reg_read(pmic->mfd, op_reg_add, &opvsel);
+ if (ret < 0)
+ return ret;
+ ret = tps65910_reg_read(pmic->mfd, sr_reg_add, &srvsel);
+ if (ret < 0)
+ return ret;
+
if (opvsel & VDD1_OP_CMD_MASK) {
u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
- ret = tps65910_reg_write_locked(pmic, op_reg_add,
- reg_val);
+
+ ret = tps65910_reg_write(pmic->mfd, op_reg_add,
+ reg_val);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring op register\n");
return ret;
}
}
- ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0);
+ ret = tps65910_reg_write(pmic->mfd, sr_reg_add, 0);
if (ret < 0) {
dev_err(mfd->dev, "Error in settting sr register\n");
return ret;
@@ -1126,6 +1011,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
"ti,regulator-ext-sleep-control", &prop);
if (!ret)
pmic_plat_data->regulator_ext_sleep_control[idx] = prop;
+
}
return pmic_plat_data;
@@ -1136,7 +1022,7 @@ static inline struct tps65910_board *tps65910_parse_dt_reg_data(
struct of_regulator_match **tps65910_reg_matches)
{
*tps65910_reg_matches = NULL;
- return 0;
+ return NULL;
}
#endif
@@ -1168,7 +1054,6 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
return -ENOMEM;
}
- mutex_init(&pmic->mutex);
pmic->mfd = tps65910;
platform_set_drvdata(pdev, pmic);
@@ -1229,23 +1114,31 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
pmic->info[i] = info;
pmic->desc[i].name = info->name;
+ pmic->desc[i].supply_name = info->vin_name;
pmic->desc[i].id = i;
pmic->desc[i].n_voltages = info->n_voltages;
+ pmic->desc[i].enable_time = info->enable_time_us;
if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
pmic->desc[i].ops = &tps65910_ops_dcdc;
pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
VDD1_2_NUM_VOLT_COARSE;
+ pmic->desc[i].ramp_delay = 12500;
} else if (i == TPS65910_REG_VDD3) {
- if (tps65910_chip_id(tps65910) == TPS65910)
+ if (tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops_vdd3;
- else
+ pmic->desc[i].volt_table = info->voltage_table;
+ } else {
pmic->desc[i].ops = &tps65910_ops_dcdc;
+ pmic->desc[i].ramp_delay = 5000;
+ }
} else {
- if (tps65910_chip_id(tps65910) == TPS65910)
+ if (tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops;
- else
+ pmic->desc[i].volt_table = info->voltage_table;
+ } else {
pmic->desc[i].ops = &tps65911_ops;
+ }
}
err = tps65910_set_ext_sleep_config(pmic, i,
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index c739071..242fe90 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -43,9 +43,6 @@ struct twlreg_info {
u8 table_len;
const u16 *table;
- /* regulator specific turn-on delay */
- u16 delay;
-
/* State REMAP default configuration */
u8 remap;
@@ -223,20 +220,6 @@ static int twl6030reg_enable(struct regulator_dev *rdev)
return ret;
}
-static int twl4030reg_enable_time(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- return info->delay;
-}
-
-static int twl6030reg_enable_time(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- return info->delay;
-}
-
static int twl4030reg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -508,7 +491,6 @@ static struct regulator_ops twl4030ldo_ops = {
.enable = twl4030reg_enable,
.disable = twl4030reg_disable,
.is_enabled = twl4030reg_is_enabled,
- .enable_time = twl4030reg_enable_time,
.set_mode = twl4030reg_set_mode,
@@ -577,59 +559,53 @@ static struct regulator_ops twl6030coresmps_ops = {
.get_voltage = twl6030coresmps_get_voltage,
};
-static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel)
{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
- return ((info->min_mV + (index * 100)) * 1000);
+ switch (sel) {
+ case 0:
+ return 0;
+ case 1 ... 24:
+ /* Linear mapping from 00000001 to 00011000:
+ * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001)
+ */
+ return (info->min_mV + 100 * (sel - 1)) * 1000;
+ case 25 ... 30:
+ return -EINVAL;
+ case 31:
+ return 2750000;
+ default:
+ return -EINVAL;
+ }
}
static int
-twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
- unsigned *selector)
+twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel;
-
- if ((min_uV/1000 < info->min_mV) || (max_uV/1000 > info->max_mV))
- return -EDOM;
-
- /*
- * Use the below formula to calculate vsel
- * mV = 1000mv + 100mv * (vsel - 1)
- */
- vsel = (min_uV/1000 - 1000)/100 + 1;
- *selector = vsel;
- return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE,
+ selector);
}
-static int twl6030ldo_get_voltage(struct regulator_dev *rdev)
+static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
- VREG_VOLTAGE);
-
- if (vsel < 0)
- return vsel;
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
- /*
- * Use the below formula to calculate vsel
- * mV = 1000mv + 100mv * (vsel - 1)
- */
- return (1000 + (100 * (vsel - 1))) * 1000;
+ return vsel;
}
static struct regulator_ops twl6030ldo_ops = {
.list_voltage = twl6030ldo_list_voltage,
- .set_voltage = twl6030ldo_set_voltage,
- .get_voltage = twl6030ldo_get_voltage,
+ .set_voltage_sel = twl6030ldo_set_voltage_sel,
+ .get_voltage_sel = twl6030ldo_get_voltage_sel,
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
- .enable_time = twl6030reg_enable_time,
.set_mode = twl6030reg_set_mode,
@@ -663,7 +639,6 @@ static struct regulator_ops twl4030fixed_ops = {
.enable = twl4030reg_enable,
.disable = twl4030reg_disable,
.is_enabled = twl4030reg_is_enabled,
- .enable_time = twl4030reg_enable_time,
.set_mode = twl4030reg_set_mode,
@@ -678,7 +653,6 @@ static struct regulator_ops twl6030fixed_ops = {
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
- .enable_time = twl6030reg_enable_time,
.set_mode = twl6030reg_set_mode,
@@ -689,7 +663,6 @@ static struct regulator_ops twl6030_fixed_resource = {
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
- .enable_time = twl6030reg_enable_time,
.get_status = twl6030reg_get_status,
};
@@ -886,7 +859,6 @@ static struct regulator_ops twlsmps_ops = {
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
- .enable_time = twl6030reg_enable_time,
.set_mode = twl6030reg_set_mode,
@@ -909,7 +881,6 @@ static struct twlreg_info TWL4030_INFO_##label = { \
.id = num, \
.table_len = ARRAY_SIZE(label##_VSEL_table), \
.table = label##_VSEL_table, \
- .delay = turnon_delay, \
.remap = remap_conf, \
.desc = { \
.name = #label, \
@@ -918,6 +889,7 @@ static struct twlreg_info TWL4030_INFO_##label = { \
.ops = &twl4030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
}, \
}
@@ -925,7 +897,6 @@ static struct twlreg_info TWL4030_INFO_##label = { \
static struct twlreg_info TWL4030_INFO_##label = { \
.base = offset, \
.id = num, \
- .delay = turnon_delay, \
.remap = remap_conf, \
.desc = { \
.name = #label, \
@@ -933,6 +904,7 @@ static struct twlreg_info TWL4030_INFO_##label = { \
.ops = &twl4030smps_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
}, \
}
@@ -955,7 +927,7 @@ static struct twlreg_info TWL6030_INFO_##label = { \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
- .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \
+ .n_voltages = 32, \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -970,7 +942,7 @@ static struct twlreg_info TWL6025_INFO_##label = { \
.desc = { \
.name = #label, \
.id = TWL6025_REG_##label, \
- .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \
+ .n_voltages = 32, \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -983,7 +955,6 @@ static struct twlreg_info TWLFIXED_INFO_##label = { \
.base = offset, \
.id = num, \
.min_mV = mVolts, \
- .delay = turnon_delay, \
.remap = remap_conf, \
.desc = { \
.name = #label, \
@@ -992,19 +963,20 @@ static struct twlreg_info TWLFIXED_INFO_##label = { \
.ops = &operations, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
}, \
}
#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) \
static struct twlreg_info TWLRES_INFO_##label = { \
.base = offset, \
- .delay = turnon_delay, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
.ops = &twl6030_fixed_resource, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
}, \
}
@@ -1109,7 +1081,6 @@ static u8 twl_get_smps_mult(void)
#define TWL6030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6030, label)
#define TWL6025_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6025, label)
#define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label)
-#define TWLRES_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLRES, label)
#define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label)
static const struct of_device_id twl_of_match[] __devinitconst = {
@@ -1157,7 +1128,6 @@ static const struct of_device_id twl_of_match[] __devinitconst = {
TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB),
TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8),
TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1),
- TWLRES_OF_MATCH("ti,twl6030-clk32kg", CLK32KG),
TWLSMPS_OF_MATCH("ti,twl6025-smps3", SMPS3),
TWLSMPS_OF_MATCH("ti,twl6025-smps4", SMPS4),
TWLSMPS_OF_MATCH("ti,twl6025-vio", VIO),
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 099da11..7413885 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -215,8 +215,8 @@ static int wm831x_buckv_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+static int wm831x_buckv_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
u16 vsel;
@@ -251,20 +251,14 @@ static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
return 0;
}
-static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned vsel)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
struct wm831x *wm831x = dcdc->wm831x;
int on_reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
int dvs_reg = dcdc->base + WM831X_DCDC_DVS_CONTROL;
- int vsel, ret;
-
- vsel = wm831x_buckv_select_min_voltage(rdev, min_uV, max_uV);
- if (vsel < 0)
- return vsel;
-
- *selector = vsel;
+ int ret;
/* If this value is already set then do a GPIO update if we can */
if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
@@ -315,7 +309,7 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
int vsel;
- vsel = wm831x_buckv_select_min_voltage(rdev, uV, uV);
+ vsel = wm831x_buckv_map_voltage(rdev, uV, uV);
if (vsel < 0)
return vsel;
@@ -373,9 +367,10 @@ static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev)
}
static struct regulator_ops wm831x_buckv_ops = {
- .set_voltage = wm831x_buckv_set_voltage,
+ .set_voltage_sel = wm831x_buckv_set_voltage_sel,
.get_voltage_sel = wm831x_buckv_get_voltage_sel,
.list_voltage = wm831x_buckv_list_voltage,
+ .map_voltage = wm831x_buckv_map_voltage,
.set_suspend_voltage = wm831x_buckv_set_suspend_voltage,
.set_current_limit = wm831x_buckv_set_current_limit,
.get_current_limit = wm831x_buckv_get_current_limit,
@@ -599,60 +594,25 @@ static struct platform_driver wm831x_buckv_driver = {
* BUCKP specifics
*/
-static int wm831x_buckp_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector <= WM831X_BUCKP_MAX_SELECTOR)
- return 850000 + (selector * 25000);
- else
- return -EINVAL;
-}
-
-static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV, int *selector)
+static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
struct wm831x *wm831x = dcdc->wm831x;
- u16 vsel;
-
- if (min_uV <= 34000000)
- vsel = (min_uV - 850000) / 25000;
- else
- return -EINVAL;
-
- if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV)
- return -EINVAL;
-
- *selector = vsel;
-
- return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel);
-}
-
-static int wm831x_buckp_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
-
- return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV,
- selector);
-}
-
-static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev,
- int uV)
-{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
- unsigned selector;
+ int sel;
+
+ sel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
- return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector);
+ return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, sel);
}
static struct regulator_ops wm831x_buckp_ops = {
- .set_voltage = wm831x_buckp_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .list_voltage = wm831x_buckp_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.set_suspend_voltage = wm831x_buckp_set_suspend_voltage,
.is_enabled = regulator_is_enabled_regmap,
@@ -715,6 +675,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
dcdc->desc.vsel_mask = WM831X_DC3_ON_VSEL_MASK;
dcdc->desc.enable_reg = WM831X_DCDC_ENABLE;
dcdc->desc.enable_mask = 1 << id;
+ dcdc->desc.min_uV = 850000;
+ dcdc->desc.uV_step = 25000;
config.dev = pdev->dev.parent;
if (pdata)
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index a9a28d8..5cb70ca 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -78,13 +78,10 @@ static int wm831x_gp_ldo_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV,
- unsigned *selector)
+static int wm831x_gp_ldo_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = ldo->wm831x;
- int vsel, ret;
+ int volt, vsel;
if (min_uV < 900000)
vsel = 0;
@@ -94,36 +91,25 @@ static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
vsel = ((min_uV - 1700000) / 100000)
+ WM831X_GP_LDO_SELECTOR_LOW + 1;
- ret = wm831x_gp_ldo_list_voltage(rdev, vsel);
- if (ret < 0)
- return ret;
- if (ret < min_uV || ret > max_uV)
+ volt = wm831x_gp_ldo_list_voltage(rdev, vsel);
+ if (volt < min_uV || volt > max_uV)
return -EINVAL;
- *selector = vsel;
-
- return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel);
-}
-
-static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_LDO_ON_CONTROL;
-
- return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
- selector);
+ return vsel;
}
static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
- unsigned int selector;
+ struct wm831x *wm831x = ldo->wm831x;
+ int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
- return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
+ sel = wm831x_gp_ldo_map_voltage(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
+
+ return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, sel);
}
static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
@@ -243,8 +229,9 @@ static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev,
static struct regulator_ops wm831x_gp_ldo_ops = {
.list_voltage = wm831x_gp_ldo_list_voltage,
+ .map_voltage = wm831x_gp_ldo_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage = wm831x_gp_ldo_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
.get_mode = wm831x_gp_ldo_get_mode,
.set_mode = wm831x_gp_ldo_set_mode,
@@ -384,13 +371,10 @@ static int wm831x_aldo_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV,
- unsigned *selector)
+static int wm831x_aldo_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = ldo->wm831x;
- int vsel, ret;
+ int volt, vsel;
if (min_uV < 1000000)
vsel = 0;
@@ -400,35 +384,26 @@ static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
vsel = ((min_uV - 1700000) / 100000)
+ WM831X_ALDO_SELECTOR_LOW + 1;
- ret = wm831x_aldo_list_voltage(rdev, vsel);
- if (ret < 0)
- return ret;
- if (ret < min_uV || ret > max_uV)
+ volt = wm831x_aldo_list_voltage(rdev, vsel);
+ if (volt < min_uV || volt > max_uV)
return -EINVAL;
- *selector = vsel;
-
- return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel);
-}
-
-static int wm831x_aldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
-{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_LDO_ON_CONTROL;
+ return vsel;
- return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV,
- selector);
}
static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
- unsigned int selector;
+ struct wm831x *wm831x = ldo->wm831x;
+ int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+
+ sel = wm831x_aldo_map_voltage(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
- return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector);
+ return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, sel);
}
static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
@@ -506,8 +481,9 @@ static int wm831x_aldo_get_status(struct regulator_dev *rdev)
static struct regulator_ops wm831x_aldo_ops = {
.list_voltage = wm831x_aldo_list_voltage,
+ .map_voltage = wm831x_aldo_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage = wm831x_aldo_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
.get_mode = wm831x_aldo_get_mode,
.set_mode = wm831x_aldo_set_mode,
@@ -628,47 +604,18 @@ static struct platform_driver wm831x_aldo_driver = {
#define WM831X_ALIVE_LDO_MAX_SELECTOR 0xf
-static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev,
- int reg,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = ldo->wm831x;
- int vsel, ret;
-
- vsel = (min_uV - 800000) / 50000;
-
- ret = regulator_list_voltage_linear(rdev, vsel);
- if (ret < 0)
- return ret;
- if (ret < min_uV || ret > max_uV)
- return -EINVAL;
-
- *selector = vsel;
-
- return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel);
-}
-
-static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
-
- return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
- selector);
-}
-
static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
- unsigned selector;
+ struct wm831x *wm831x = ldo->wm831x;
+ int sel, reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
+
+ sel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
- return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
+ return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, sel);
}
static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
@@ -690,8 +637,9 @@ static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
static struct regulator_ops wm831x_alive_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage = wm831x_alive_ldo_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage,
.get_status = wm831x_alive_ldo_get_status,
@@ -753,6 +701,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
ldo->desc.enable_mask = 1 << id;
ldo->desc.min_uV = 800000;
ldo->desc.uV_step = 50000;
+ ldo->desc.enable_time = 1000;
config.dev = pdev->dev.parent;
if (pdata)
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 94e550d..7f0fa22 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -108,33 +108,6 @@ static int get_isink_val(int min_uA, int max_uA, u16 *setting)
return -EINVAL;
}
-static inline int wm8350_ldo_val_to_mvolts(unsigned int val)
-{
- if (val < 16)
- return (val * 50) + 900;
- else
- return ((val - 16) * 100) + 1800;
-
-}
-
-static inline unsigned int wm8350_ldo_mvolts_to_val(int mV)
-{
- if (mV < 1800)
- return (mV - 900) / 50;
- else
- return ((mV - 1800) / 100) + 16;
-}
-
-static inline int wm8350_dcdc_val_to_mvolts(unsigned int val)
-{
- return (val * 25) + 850;
-}
-
-static inline unsigned int wm8350_dcdc_mvolts_to_val(int mV)
-{
- return (mV - 850) / 25;
-}
-
static int wm8350_isink_set_current(struct regulator_dev *rdev, int min_uA,
int max_uA)
{
@@ -359,104 +332,13 @@ int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
}
EXPORT_SYMBOL_GPL(wm8350_isink_set_flash);
-static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV, unsigned *selector)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, dcdc = rdev_get_id(rdev), mV,
- min_mV = min_uV / 1000, max_mV = max_uV / 1000;
- u16 val;
-
- if (min_mV < 850 || min_mV > 4025)
- return -EINVAL;
- if (max_mV < 850 || max_mV > 4025)
- return -EINVAL;
-
- /* step size is 25mV */
- mV = (min_mV - 826) / 25;
- if (wm8350_dcdc_val_to_mvolts(mV) > max_mV)
- return -EINVAL;
- BUG_ON(wm8350_dcdc_val_to_mvolts(mV) < min_mV);
-
- switch (dcdc) {
- case WM8350_DCDC_1:
- volt_reg = WM8350_DCDC1_CONTROL;
- break;
- case WM8350_DCDC_3:
- volt_reg = WM8350_DCDC3_CONTROL;
- break;
- case WM8350_DCDC_4:
- volt_reg = WM8350_DCDC4_CONTROL;
- break;
- case WM8350_DCDC_6:
- volt_reg = WM8350_DCDC6_CONTROL;
- break;
- case WM8350_DCDC_2:
- case WM8350_DCDC_5:
- default:
- return -EINVAL;
- }
-
- *selector = mV;
-
- /* all DCDCs have same mV bits */
- val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
- wm8350_reg_write(wm8350, volt_reg, val | mV);
- return 0;
-}
-
-static int wm8350_dcdc_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, dcdc = rdev_get_id(rdev);
-
- switch (dcdc) {
- case WM8350_DCDC_1:
- volt_reg = WM8350_DCDC1_CONTROL;
- break;
- case WM8350_DCDC_3:
- volt_reg = WM8350_DCDC3_CONTROL;
- break;
- case WM8350_DCDC_4:
- volt_reg = WM8350_DCDC4_CONTROL;
- break;
- case WM8350_DCDC_6:
- volt_reg = WM8350_DCDC6_CONTROL;
- break;
- case WM8350_DCDC_2:
- case WM8350_DCDC_5:
- default:
- return -EINVAL;
- }
-
- /* all DCDCs have same mV bits */
- return wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
-}
-
-static int wm8350_dcdc_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector > WM8350_DCDC_MAX_VSEL)
- return -EINVAL;
- return wm8350_dcdc_val_to_mvolts(selector) * 1000;
-}
-
static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, mV = uV / 1000, dcdc = rdev_get_id(rdev);
+ int sel, volt_reg, dcdc = rdev_get_id(rdev);
u16 val;
- dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, dcdc, mV);
-
- if (mV && (mV < 850 || mV > 4025)) {
- dev_err(wm8350->dev,
- "DCDC%d suspend voltage %d mV out of range\n",
- dcdc, mV);
- return -EINVAL;
- }
- if (mV == 0)
- mV = 850;
+ dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, dcdc, uV / 1000);
switch (dcdc) {
case WM8350_DCDC_1:
@@ -477,10 +359,13 @@ static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV)
return -EINVAL;
}
+ sel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (sel < 0)
+ return -EINVAL;
+
/* all DCDCs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
- wm8350_reg_write(wm8350, volt_reg,
- val | wm8350_dcdc_mvolts_to_val(mV));
+ wm8350_reg_write(wm8350, volt_reg, val | sel);
return 0;
}
@@ -657,19 +542,49 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
return 0;
}
+static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ if (selector > WM8350_LDO1_VSEL_MASK)
+ return -EINVAL;
+
+ if (selector < 16)
+ return (selector * 50000) + 900000;
+ else
+ return ((selector - 16) * 100000) + 1800000;
+}
+
+static int wm8350_ldo_map_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV)
+{
+ int volt, sel;
+ int min_mV = min_uV / 1000;
+ int max_mV = max_uV / 1000;
+
+ if (min_mV < 900 || min_mV > 3300)
+ return -EINVAL;
+ if (max_mV < 900 || max_mV > 3300)
+ return -EINVAL;
+
+ if (min_mV < 1800) /* step size is 50mV < 1800mV */
+ sel = DIV_ROUND_UP(min_uV - 900, 50);
+ else /* step size is 100mV > 1800mV */
+ sel = DIV_ROUND_UP(min_uV - 1800, 100) + 16;
+
+ volt = wm8350_ldo_list_voltage(rdev, sel);
+ if (volt < min_uV || volt > max_uV)
+ return -EINVAL;
+
+ return sel;
+}
+
static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, mV = uV / 1000, ldo = rdev_get_id(rdev);
+ int sel, volt_reg, ldo = rdev_get_id(rdev);
u16 val;
- dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, ldo, mV);
-
- if (mV < 900 || mV > 3300) {
- dev_err(wm8350->dev, "LDO%d voltage %d mV out of range\n",
- ldo, mV);
- return -EINVAL;
- }
+ dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, ldo, uV / 1000);
switch (ldo) {
case WM8350_LDO_1:
@@ -688,10 +603,13 @@ static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
return -EINVAL;
}
+ sel = wm8350_ldo_map_voltage(rdev, uV, uV);
+ if (sel < 0)
+ return -EINVAL;
+
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
- wm8350_reg_write(wm8350, volt_reg,
- val | wm8350_ldo_mvolts_to_val(mV));
+ wm8350_reg_write(wm8350, volt_reg, val | sel);
return 0;
}
@@ -753,92 +671,6 @@ static int wm8350_ldo_set_suspend_disable(struct regulator_dev *rdev)
return 0;
}
-static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV, unsigned *selector)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, ldo = rdev_get_id(rdev), mV, min_mV = min_uV / 1000,
- max_mV = max_uV / 1000;
- u16 val;
-
- if (min_mV < 900 || min_mV > 3300)
- return -EINVAL;
- if (max_mV < 900 || max_mV > 3300)
- return -EINVAL;
-
- if (min_mV < 1800) {
- /* step size is 50mV < 1800mV */
- mV = (min_mV - 851) / 50;
- if (wm8350_ldo_val_to_mvolts(mV) > max_mV)
- return -EINVAL;
- BUG_ON(wm8350_ldo_val_to_mvolts(mV) < min_mV);
- } else {
- /* step size is 100mV > 1800mV */
- mV = ((min_mV - 1701) / 100) + 16;
- if (wm8350_ldo_val_to_mvolts(mV) > max_mV)
- return -EINVAL;
- BUG_ON(wm8350_ldo_val_to_mvolts(mV) < min_mV);
- }
-
- switch (ldo) {
- case WM8350_LDO_1:
- volt_reg = WM8350_LDO1_CONTROL;
- break;
- case WM8350_LDO_2:
- volt_reg = WM8350_LDO2_CONTROL;
- break;
- case WM8350_LDO_3:
- volt_reg = WM8350_LDO3_CONTROL;
- break;
- case WM8350_LDO_4:
- volt_reg = WM8350_LDO4_CONTROL;
- break;
- default:
- return -EINVAL;
- }
-
- *selector = mV;
-
- /* all LDOs have same mV bits */
- val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
- wm8350_reg_write(wm8350, volt_reg, val | mV);
- return 0;
-}
-
-static int wm8350_ldo_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, ldo = rdev_get_id(rdev);
-
- switch (ldo) {
- case WM8350_LDO_1:
- volt_reg = WM8350_LDO1_CONTROL;
- break;
- case WM8350_LDO_2:
- volt_reg = WM8350_LDO2_CONTROL;
- break;
- case WM8350_LDO_3:
- volt_reg = WM8350_LDO3_CONTROL;
- break;
- case WM8350_LDO_4:
- volt_reg = WM8350_LDO4_CONTROL;
- break;
- default:
- return -EINVAL;
- }
-
- /* all LDOs have same mV bits */
- return wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
-}
-
-static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector > WM8350_LDO1_VSEL_MASK)
- return -EINVAL;
- return wm8350_ldo_val_to_mvolts(selector) * 1000;
-}
-
int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start,
u16 stop, u16 fault)
{
@@ -959,63 +791,6 @@ int wm8350_dcdc25_set_mode(struct wm8350 *wm8350, int dcdc, u16 mode,
}
EXPORT_SYMBOL_GPL(wm8350_dcdc25_set_mode);
-static int wm8350_dcdc_enable(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int dcdc = rdev_get_id(rdev);
- u16 shift;
-
- if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6)
- return -EINVAL;
-
- shift = dcdc - WM8350_DCDC_1;
- wm8350_set_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift);
- return 0;
-}
-
-static int wm8350_dcdc_disable(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int dcdc = rdev_get_id(rdev);
- u16 shift;
-
- if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6)
- return -EINVAL;
-
- shift = dcdc - WM8350_DCDC_1;
- wm8350_clear_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift);
-
- return 0;
-}
-
-static int wm8350_ldo_enable(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int ldo = rdev_get_id(rdev);
- u16 shift;
-
- if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4)
- return -EINVAL;
-
- shift = (ldo - WM8350_LDO_1) + 8;
- wm8350_set_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift);
- return 0;
-}
-
-static int wm8350_ldo_disable(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int ldo = rdev_get_id(rdev);
- u16 shift;
-
- if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4)
- return -EINVAL;
-
- shift = (ldo - WM8350_LDO_1) + 8;
- wm8350_clear_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift);
- return 0;
-}
-
static int force_continuous_enable(struct wm8350 *wm8350, int dcdc, int enable)
{
int reg = 0, ret;
@@ -1197,42 +972,17 @@ static unsigned int wm8350_dcdc_get_optimum_mode(struct regulator_dev *rdev,
return mode;
}
-static int wm8350_dcdc_is_enabled(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int dcdc = rdev_get_id(rdev), shift;
-
- if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6)
- return -EINVAL;
-
- shift = dcdc - WM8350_DCDC_1;
- return wm8350_reg_read(wm8350, WM8350_DCDC_LDO_REQUESTED)
- & (1 << shift);
-}
-
-static int wm8350_ldo_is_enabled(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int ldo = rdev_get_id(rdev), shift;
-
- if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4)
- return -EINVAL;
-
- shift = (ldo - WM8350_LDO_1) + 8;
- return wm8350_reg_read(wm8350, WM8350_DCDC_LDO_REQUESTED)
- & (1 << shift);
-}
-
static struct regulator_ops wm8350_dcdc_ops = {
- .set_voltage = wm8350_dcdc_set_voltage,
- .get_voltage_sel = wm8350_dcdc_get_voltage_sel,
- .list_voltage = wm8350_dcdc_list_voltage,
- .enable = wm8350_dcdc_enable,
- .disable = wm8350_dcdc_disable,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.get_mode = wm8350_dcdc_get_mode,
.set_mode = wm8350_dcdc_set_mode,
.get_optimum_mode = wm8350_dcdc_get_optimum_mode,
- .is_enabled = wm8350_dcdc_is_enabled,
.set_suspend_voltage = wm8350_dcdc_set_suspend_voltage,
.set_suspend_enable = wm8350_dcdc_set_suspend_enable,
.set_suspend_disable = wm8350_dcdc_set_suspend_disable,
@@ -1240,20 +990,21 @@ static struct regulator_ops wm8350_dcdc_ops = {
};
static struct regulator_ops wm8350_dcdc2_5_ops = {
- .enable = wm8350_dcdc_enable,
- .disable = wm8350_dcdc_disable,
- .is_enabled = wm8350_dcdc_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_suspend_enable = wm8350_dcdc25_set_suspend_enable,
.set_suspend_disable = wm8350_dcdc25_set_suspend_disable,
};
static struct regulator_ops wm8350_ldo_ops = {
- .set_voltage = wm8350_ldo_set_voltage,
- .get_voltage_sel = wm8350_ldo_get_voltage_sel,
+ .map_voltage = wm8350_ldo_map_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = wm8350_ldo_list_voltage,
- .enable = wm8350_ldo_enable,
- .disable = wm8350_ldo_disable,
- .is_enabled = wm8350_ldo_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.get_mode = wm8350_ldo_get_mode,
.set_suspend_voltage = wm8350_ldo_set_suspend_voltage,
.set_suspend_enable = wm8350_ldo_set_suspend_enable,
@@ -1277,6 +1028,12 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_DC1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
+ .min_uV = 850000,
+ .uV_step = 25000,
+ .vsel_reg = WM8350_DCDC1_CONTROL,
+ .vsel_mask = WM8350_DC1_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC1_ENA,
.owner = THIS_MODULE,
},
{
@@ -1285,6 +1042,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.ops = &wm8350_dcdc2_5_ops,
.irq = WM8350_IRQ_UV_DC2,
.type = REGULATOR_VOLTAGE,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC2_ENA,
.owner = THIS_MODULE,
},
{
@@ -1294,6 +1053,12 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_DC3,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
+ .min_uV = 850000,
+ .uV_step = 25000,
+ .vsel_reg = WM8350_DCDC3_CONTROL,
+ .vsel_mask = WM8350_DC3_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC3_ENA,
.owner = THIS_MODULE,
},
{
@@ -1303,6 +1068,12 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_DC4,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
+ .min_uV = 850000,
+ .uV_step = 25000,
+ .vsel_reg = WM8350_DCDC4_CONTROL,
+ .vsel_mask = WM8350_DC4_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC4_ENA,
.owner = THIS_MODULE,
},
{
@@ -1311,6 +1082,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.ops = &wm8350_dcdc2_5_ops,
.irq = WM8350_IRQ_UV_DC5,
.type = REGULATOR_VOLTAGE,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC5_ENA,
.owner = THIS_MODULE,
},
{
@@ -1320,6 +1093,12 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_DC6,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
+ .min_uV = 850000,
+ .uV_step = 25000,
+ .vsel_reg = WM8350_DCDC6_CONTROL,
+ .vsel_mask = WM8350_DC6_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC6_ENA,
.owner = THIS_MODULE,
},
{
@@ -1329,6 +1108,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO1_VSEL_MASK + 1,
+ .vsel_reg = WM8350_LDO1_CONTROL,
+ .vsel_mask = WM8350_LDO1_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_LDO1_ENA,
.owner = THIS_MODULE,
},
{
@@ -1338,6 +1121,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO2,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO2_VSEL_MASK + 1,
+ .vsel_reg = WM8350_LDO2_CONTROL,
+ .vsel_mask = WM8350_LDO2_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_LDO2_ENA,
.owner = THIS_MODULE,
},
{
@@ -1347,6 +1134,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO3,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO3_VSEL_MASK + 1,
+ .vsel_reg = WM8350_LDO3_CONTROL,
+ .vsel_mask = WM8350_LDO3_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_LDO3_ENA,
.owner = THIS_MODULE,
},
{
@@ -1356,6 +1147,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO4,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO4_VSEL_MASK + 1,
+ .vsel_reg = WM8350_LDO4_CONTROL,
+ .vsel_mask = WM8350_LDO4_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_LDO4_ENA,
.owner = THIS_MODULE,
},
{
@@ -1429,6 +1224,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.init_data = pdev->dev.platform_data;
config.driver_data = dev_get_drvdata(&pdev->dev);
+ config.regmap = wm8350->regmap;
/* register regulator */
rdev = regulator_register(&wm8350_reg[pdev->id], &config);
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 69a2b7c..9035dd0 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -28,34 +28,26 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
if (selector < 15)
return 900000 + (selector * 50000);
else
- return 1600000 + ((selector - 14) * 100000);
+ return 1700000 + ((selector - 15) * 100000);
}
static int wm8400_ldo_map_voltage(struct regulator_dev *dev,
int min_uV, int max_uV)
{
u16 val;
+ int volt;
if (min_uV < 900000 || min_uV > 3300000)
return -EINVAL;
- if (min_uV < 1700000) {
- /* Steps of 50mV from 900mV; */
+ if (min_uV < 1700000) /* Steps of 50mV from 900mV; */
val = DIV_ROUND_UP(min_uV - 900000, 50000);
+ else /* Steps of 100mV from 1700mV */
+ val = DIV_ROUND_UP(min_uV - 1700000, 100000) + 15;
- if ((val * 50000) + 900000 > max_uV)
- return -EINVAL;
- BUG_ON((val * 50000) + 900000 < min_uV);
- } else {
- /* Steps of 100mV from 1700mV */
- val = DIV_ROUND_UP(min_uV - 1700000, 100000);
-
- if ((val * 100000) + 1700000 > max_uV)
- return -EINVAL;
- BUG_ON((val * 100000) + 1700000 < min_uV);
-
- val += 0xf;
- }
+ volt = wm8400_ldo_list_voltage(dev, val);
+ if (volt < min_uV || volt > max_uV)
+ return -EINVAL;
return val;
}
@@ -152,6 +144,7 @@ static struct regulator_ops wm8400_dcdc_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = wm8400_dcdc_get_mode,
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 9a99431..86bb48db 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -26,8 +26,6 @@
#include <linux/mfd/wm8994/pdata.h>
struct wm8994_ldo {
- int enable;
- bool is_enabled;
struct regulator_dev *regulator;
struct wm8994 *wm8994;
};
@@ -35,64 +33,9 @@ struct wm8994_ldo {
#define WM8994_LDO1_MAX_SELECTOR 0x7
#define WM8994_LDO2_MAX_SELECTOR 0x3
-static int wm8994_ldo_enable(struct regulator_dev *rdev)
-{
- struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-
- /* If we have no soft control assume that the LDO is always enabled. */
- if (!ldo->enable)
- return 0;
-
- gpio_set_value_cansleep(ldo->enable, 1);
- ldo->is_enabled = true;
-
- return 0;
-}
-
-static int wm8994_ldo_disable(struct regulator_dev *rdev)
-{
- struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-
- /* If we have no soft control assume that the LDO is always enabled. */
- if (!ldo->enable)
- return -EINVAL;
-
- gpio_set_value_cansleep(ldo->enable, 0);
- ldo->is_enabled = false;
-
- return 0;
-}
-
-static int wm8994_ldo_is_enabled(struct regulator_dev *rdev)
-{
- struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-
- return ldo->is_enabled;
-}
-
-static int wm8994_ldo_enable_time(struct regulator_dev *rdev)
-{
- /* 3ms is fairly conservative but this shouldn't be too performance
- * critical; can be tweaked per-system if required. */
- return 3000;
-}
-
-static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector > WM8994_LDO1_MAX_SELECTOR)
- return -EINVAL;
-
- return (selector * 100000) + 2400000;
-}
-
static struct regulator_ops wm8994_ldo1_ops = {
- .enable = wm8994_ldo_enable,
- .disable = wm8994_ldo_disable,
- .is_enabled = wm8994_ldo_is_enabled,
- .enable_time = wm8994_ldo_enable_time,
-
- .list_voltage = wm8994_ldo1_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
@@ -124,11 +67,6 @@ static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
}
static struct regulator_ops wm8994_ldo2_ops = {
- .enable = wm8994_ldo_enable,
- .disable = wm8994_ldo_disable,
- .is_enabled = wm8994_ldo_is_enabled,
- .enable_time = wm8994_ldo_enable_time,
-
.list_voltage = wm8994_ldo2_list_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -143,6 +81,9 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
.vsel_reg = WM8994_LDO_1,
.vsel_mask = WM8994_LDO1_VSEL_MASK,
.ops = &wm8994_ldo1_ops,
+ .min_uV = 2400000,
+ .uV_step = 100000,
+ .enable_time = 3000,
.owner = THIS_MODULE,
},
{
@@ -153,6 +94,7 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
.vsel_reg = WM8994_LDO_2,
.vsel_mask = WM8994_LDO2_VSEL_MASK,
.ops = &wm8994_ldo2_ops,
+ .enable_time = 3000,
.owner = THIS_MODULE,
},
};
@@ -176,39 +118,26 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
ldo->wm8994 = wm8994;
- if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
- ldo->enable = pdata->ldo[id].enable;
-
- ret = gpio_request_one(ldo->enable, 0, "WM8994 LDO enable");
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n",
- ret);
- goto err;
- }
- } else
- ldo->is_enabled = true;
-
config.dev = wm8994->dev;
config.driver_data = ldo;
config.regmap = wm8994->regmap;
- if (pdata)
+ if (pdata) {
config.init_data = pdata->ldo[id].init_data;
+ config.ena_gpio = pdata->ldo[id].enable;
+ }
ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
id + 1, ret);
- goto err_gpio;
+ goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
-err_gpio:
- if (gpio_is_valid(ldo->enable))
- gpio_free(ldo->enable);
err:
return ret;
}
@@ -220,8 +149,6 @@ static __devexit int wm8994_ldo_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
regulator_unregister(ldo->regulator);
- if (gpio_is_valid(ldo->enable))
- gpio_free(ldo->enable);
return 0;
}
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 24d880e..f8d818a 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -4,9 +4,11 @@ menu "Remoteproc drivers (EXPERIMENTAL)"
config REMOTEPROC
tristate
depends on EXPERIMENTAL
+ select FW_CONFIG
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
+ depends on EXPERIMENTAL
depends on ARCH_OMAP4
depends on OMAP_IOMMU
select REMOTEPROC
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 69425c4..de138e3 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret);
+ dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
return ret;
}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index d6f8ada..66324ee 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -78,7 +78,7 @@ typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
* the recovery of the remote processor.
*/
static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
- unsigned long iova, int flags)
+ unsigned long iova, int flags, void *token)
{
dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
@@ -117,7 +117,7 @@ static int rproc_enable_iommu(struct rproc *rproc)
return -ENOMEM;
}
- iommu_set_fault_handler(domain, rproc_iommu_fault);
+ iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
ret = iommu_attach_device(domain, dev);
if (ret) {
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
}
if (offset + filesz > len) {
- dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n",
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
offset + filesz, len);
ret = -EINVAL;
break;
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
if (unmapped != entry->len) {
/* nothing much to do besides complaining */
- dev_err(dev, "failed to unmap %u/%u\n", entry->len,
+ dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
unmapped);
}
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
ehdr = (struct elf32_hdr *)fw->data;
- dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size);
+ dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
* if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/* look for the resource table */
table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
- if (!table)
+ if (!table) {
+ ret = -EINVAL;
goto clean_up;
+ }
/* handle fw resources which are required to boot rproc */
ret = rproc_handle_boot_rsc(rproc, table, tablesz);
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 75506ec..f56c8ba 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -188,6 +188,26 @@ static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
rpdev->id.name);
}
+/**
+ * __ept_release() - deallocate an rpmsg endpoint
+ * @kref: the ept's reference count
+ *
+ * This function deallocates an ept, and is invoked when its @kref refcount
+ * drops to zero.
+ *
+ * Never invoke this function directly!
+ */
+static void __ept_release(struct kref *kref)
+{
+ struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+ refcount);
+ /*
+ * At this point no one holds a reference to ept anymore,
+ * so we can directly free it
+ */
+ kfree(ept);
+}
+
/* for more info, see below documentation of rpmsg_create_ept() */
static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
@@ -206,6 +226,9 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
return NULL;
}
+ kref_init(&ept->refcount);
+ mutex_init(&ept->cb_lock);
+
ept->rpdev = rpdev;
ept->cb = cb;
ept->priv = priv;
@@ -238,7 +261,7 @@ rem_idr:
idr_remove(&vrp->endpoints, request);
free_ept:
mutex_unlock(&vrp->endpoints_lock);
- kfree(ept);
+ kref_put(&ept->refcount, __ept_release);
return NULL;
}
@@ -302,11 +325,17 @@ EXPORT_SYMBOL(rpmsg_create_ept);
static void
__rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
{
+ /* make sure new inbound messages can't find this ept anymore */
mutex_lock(&vrp->endpoints_lock);
idr_remove(&vrp->endpoints, ept->addr);
mutex_unlock(&vrp->endpoints_lock);
- kfree(ept);
+ /* make sure in-flight inbound messages won't invoke cb anymore */
+ mutex_lock(&ept->cb_lock);
+ ept->cb = NULL;
+ mutex_unlock(&ept->cb_lock);
+
+ kref_put(&ept->refcount, __ept_release);
}
/**
@@ -790,12 +819,28 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
/* use the dst addr to fetch the callback of the appropriate user */
mutex_lock(&vrp->endpoints_lock);
+
ept = idr_find(&vrp->endpoints, msg->dst);
+
+ /* let's make sure no one deallocates ept while we use it */
+ if (ept)
+ kref_get(&ept->refcount);
+
mutex_unlock(&vrp->endpoints_lock);
- if (ept && ept->cb)
- ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
- else
+ if (ept) {
+ /* make sure ept->cb doesn't go away while we use it */
+ mutex_lock(&ept->cb_lock);
+
+ if (ept->cb)
+ ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
+ msg->src);
+
+ mutex_unlock(&ept->cb_lock);
+
+ /* farewell, ept, we don't need you anymore */
+ kref_put(&ept->refcount, __ept_release);
+ } else
dev_warn(dev, "msg received with no recepient\n");
/* publish the real size of the buffer */
@@ -1040,7 +1085,7 @@ static int __init rpmsg_init(void)
return ret;
}
-module_init(rpmsg_init);
+subsys_initcall(rpmsg_init);
static void __exit rpmsg_fini(void)
{
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4161bfe..08cbdb9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,27 +620,6 @@ config RTC_DRV_MSM6242
This driver can also be built as a module. If so, the module
will be called rtc-msm6242.
-config RTC_DRV_IMXDI
- tristate "Freescale IMX DryIce Real Time Clock"
- depends on ARCH_MX25
- depends on RTC_CLASS
- help
- Support for Freescale IMX DryIce RTC
-
- This driver can also be built as a module, if so, the module
- will be called "rtc-imxdi".
-
-config RTC_MXC
- tristate "Freescale MXC Real Time Clock"
- depends on ARCH_MXC
- depends on RTC_CLASS
- help
- If you say yes here you get support for the Freescale MXC
- RTC module.
-
- This driver can also be built as a module, if so, the module
- will be called "rtc-mxc".
-
config RTC_DRV_BQ4802
tristate "TI BQ4802"
help
@@ -738,6 +717,16 @@ config RTC_DRV_DAVINCI
This driver can also be built as a module. If so, the module
will be called rtc-davinci.
+config RTC_DRV_IMXDI
+ tristate "Freescale IMX DryIce Real Time Clock"
+ depends on SOC_IMX25
+ depends on RTC_CLASS
+ help
+ Support for Freescale IMX DryIce RTC
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-imxdi".
+
config RTC_DRV_OMAP
tristate "TI OMAP1"
depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -1087,4 +1076,15 @@ config RTC_DRV_LOONGSON1
This driver can also be built as a module. If so, the module
will be called rtc-ls1x.
+config RTC_DRV_MXC
+ tristate "Freescale MXC Real Time Clock"
+ depends on ARCH_MXC
+ depends on RTC_CLASS
+ help
+ If you say yes here you get support for the Freescale MXC
+ RTC module.
+
+ This driver can also be built as a module, if so, the module
+ will be called "rtc-mxc".
+
endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 727ae77..2973921 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -61,7 +61,7 @@ obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
-obj-$(CONFIG_RTC_MXC) += rtc-mxc.o
+obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 4bcf9ca..370889d 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -17,6 +17,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/delay.h>
+#include <linux/of.h>
#define AB8500_RTC_SOFF_STAT_REG 0x00
#define AB8500_RTC_CC_CONF_REG 0x01
@@ -422,7 +423,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
}
err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
- IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
+ IRQF_NO_SUSPEND | IRQF_ONESHOT, "ab8500-rtc", rtc);
if (err < 0) {
rtc_device_unregister(rtc);
return err;
@@ -430,7 +431,6 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
-
err = ab8500_sysfs_rtc_register(&pdev->dev);
if (err) {
dev_err(&pdev->dev, "sysfs RTC failed to register\n");
@@ -454,10 +454,16 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_rtc_match[] = {
+ { .compatible = "stericsson,ab8500-rtc", },
+ {}
+};
+
static struct platform_driver ab8500_rtc_driver = {
.driver = {
.name = "ab8500-rtc",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_rtc_match,
},
.probe = ab8500_rtc_probe,
.remove = __devexit_p(ab8500_rtc_remove),
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index dc474bc..fca9790 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/completion.h>
+#include <linux/io.h>
#include <asm/uaccess.h>
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7d5f56e..132333d 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -568,6 +568,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
+ pm_wakeup_event(cmos_rtc.dev, 0);
}
spin_unlock(&rtc_lock);
@@ -910,14 +911,17 @@ static inline int cmos_poweroff(struct device *dev)
static u32 rtc_handler(void *context)
{
+ struct device *dev = context;
+
+ pm_wakeup_event(dev, 0);
acpi_clear_event(ACPI_EVENT_RTC);
acpi_disable_event(ACPI_EVENT_RTC, 0);
return ACPI_INTERRUPT_HANDLED;
}
-static inline void rtc_wake_setup(void)
+static inline void rtc_wake_setup(struct device *dev)
{
- acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
+ acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
/*
* After the RTC handler is installed, the Fixed_RTC event should
* be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +954,7 @@ cmos_wake_setup(struct device *dev)
if (acpi_disabled)
return;
- rtc_wake_setup();
+ rtc_wake_setup(dev);
acpi_rtc_info.wake_on = rtc_wake_on;
acpi_rtc_info.wake_off = rtc_wake_off;
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c293d0c..836710c 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -17,8 +17,7 @@
#include <linux/string.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
-
-
+#include <linux/rtc/ds1307.h>
/*
* We can't determine type by probing, but if we expect pre-Linux code
@@ -92,7 +91,8 @@ enum ds_type {
# define DS1337_BIT_A2I 0x02
# define DS1337_BIT_A1I 0x01
#define DS1339_REG_ALARM1_SECS 0x07
-#define DS1339_REG_TRICKLE 0x10
+
+#define DS13XX_TRICKLE_CHARGER_MAGIC 0xa0
#define RX8025_REG_CTRL1 0x0e
# define RX8025_BIT_2412 0x20
@@ -124,6 +124,7 @@ struct chip_desc {
unsigned alarm:1;
u16 nvram_offset;
u16 nvram_size;
+ u16 trickle_charger_reg;
};
static const struct chip_desc chips[last_ds_type] = {
@@ -140,6 +141,13 @@ static const struct chip_desc chips[last_ds_type] = {
},
[ds_1339] = {
.alarm = 1,
+ .trickle_charger_reg = 0x10,
+ },
+ [ds_1340] = {
+ .trickle_charger_reg = 0x08,
+ },
+ [ds_1388] = {
+ .trickle_charger_reg = 0x0a,
},
[ds_3231] = {
.alarm = 1,
@@ -619,6 +627,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
int want_irq = false;
unsigned char *buf;
+ struct ds1307_platform_data *pdata = client->dev.platform_data;
static const int bbsqi_bitpos[] = {
[ds_1337] = 0,
[ds_1339] = DS1339_BIT_BBSQI,
@@ -637,7 +646,10 @@ static int __devinit ds1307_probe(struct i2c_client *client,
ds1307->client = client;
ds1307->type = id->driver_data;
- ds1307->offset = 0;
+
+ if (pdata && pdata->trickle_charger_setup && chip->trickle_charger_reg)
+ i2c_smbus_write_byte_data(client, chip->trickle_charger_reg,
+ DS13XX_TRICKLE_CHARGER_MAGIC | pdata->trickle_charger_setup);
buf = ds1307->regs;
if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 14a42a1..9602278 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -127,7 +127,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
.attrs = ep93xx_rtc_attrs,
};
-static int __init ep93xx_rtc_probe(struct platform_device *pdev)
+static int __devinit ep93xx_rtc_probe(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc;
struct resource *res;
@@ -174,7 +174,7 @@ exit:
return err;
}
-static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
+static int __devexit ep93xx_rtc_remove(struct platform_device *pdev)
{
struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
@@ -186,31 +186,19 @@ static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:ep93xx-rtc");
-
static struct platform_driver ep93xx_rtc_driver = {
.driver = {
.name = "ep93xx-rtc",
.owner = THIS_MODULE,
},
- .remove = __exit_p(ep93xx_rtc_remove),
+ .probe = ep93xx_rtc_probe,
+ .remove = __devexit_p(ep93xx_rtc_remove),
};
-static int __init ep93xx_rtc_init(void)
-{
- return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
-}
-
-static void __exit ep93xx_rtc_exit(void)
-{
- platform_driver_unregister(&ep93xx_rtc_driver);
-}
+module_platform_driver(ep93xx_rtc_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("EP93XX RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(ep93xx_rtc_init);
-module_exit(ep93xx_rtc_exit);
+MODULE_ALIAS("platform:ep93xx-rtc");
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 63c7218..d521855 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -19,6 +19,7 @@
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of.h>
/*
* Clock and Power control register offsets
@@ -386,13 +387,22 @@ static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
#define LPC32XX_RTC_PM_OPS NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id lpc32xx_rtc_match[] = {
+ { .compatible = "nxp,lpc3220-rtc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
+#endif
+
static struct platform_driver lpc32xx_rtc_driver = {
.probe = lpc32xx_rtc_probe,
.remove = __devexit_p(lpc32xx_rtc_remove),
.driver = {
.name = RTC_NAME,
.owner = THIS_MODULE,
- .pm = LPC32XX_RTC_PM_OPS
+ .pm = LPC32XX_RTC_PM_OPS,
+ .of_match_table = of_match_ptr(lpc32xx_rtc_match),
},
};
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 10f1c29..efab3d4 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -48,6 +48,7 @@ static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
{
struct spi_device *spi = to_spi_device(dev);
+ int tmp;
u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */
u8 * const data = &buf[1]; /* ptr to first data byte */
@@ -62,6 +63,30 @@ static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
return -EINVAL;
}
+ tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+ if (tmp < 0)
+ return tmp;
+
+ if (tmp & M41T93_FLAG_OF) {
+ dev_warn(&spi->dev, "OF bit is set, resetting.\n");
+ m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
+
+ tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+ if (tmp < 0) {
+ return tmp;
+ } else if (tmp & M41T93_FLAG_OF) {
+ /* OF cannot be immediately reset: oscillator has to be
+ * restarted. */
+ u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
+
+ dev_warn(&spi->dev,
+ "OF bit is still set, kickstarting clock.\n");
+ m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+ reset_osc &= ~M41T93_FLAG_ST;
+ m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+ }
+ }
+
data[M41T93_REG_SSEC] = 0;
data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec);
data[M41T93_REG_MIN] = bin2bcd(tm->tm_min);
@@ -89,10 +114,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
1. halt bit (HT) is set: the clock is running but update of readout
registers has been disabled due to power failure. This is normal
case after poweron. Time is valid after resetting HT bit.
- 2. oscillator fail bit (OF) is set. Oscillator has be stopped and
- time is invalid:
- a) OF can be immeditely reset.
- b) OF cannot be immediately reset: oscillator has to be restarted.
+ 2. oscillator fail bit (OF) is set: time is invalid.
*/
tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
if (tmp < 0)
@@ -110,21 +132,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
if (tmp & M41T93_FLAG_OF) {
ret = -EINVAL;
- dev_warn(&spi->dev, "OF bit is set, resetting.\n");
- m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
-
- tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
- if (tmp < 0)
- return tmp;
- else if (tmp & M41T93_FLAG_OF) {
- u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
-
- dev_warn(&spi->dev,
- "OF bit is still set, kickstarting clock.\n");
- m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
- reset_osc &= ~M41T93_FLAG_ST;
- m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
- }
+ dev_warn(&spi->dev, "OF bit is set, write time to restart.\n");
}
if (tmp & M41T93_FLAG_BL)
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 5e1d64e..e3e50d6 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -202,10 +202,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
+ unsigned long flags;
u32 status;
u32 events = 0;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
/* clear interrupt sources */
writew(status, ioaddr + RTC_RTCISR);
@@ -224,7 +225,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
events |= (RTC_PF | RTC_IRQF);
rtc_update_irq(pdata->rtc, 1, events);
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index bc0677d..97a3284 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -64,6 +64,7 @@ struct pcf8563 {
* 1970...2069.
*/
int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
+ int voltage_low; /* incicates if a low_voltage was detected */
};
/*
@@ -86,9 +87,11 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
return -EIO;
}
- if (buf[PCF8563_REG_SC] & PCF8563_SC_LV)
+ if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
+ pcf8563->voltage_low = 1;
dev_info(&client->dev,
"low voltage detected, date/time is not reliable.\n");
+ }
dev_dbg(&client->dev,
"%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
@@ -173,6 +176,44 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
return 0;
}
+#ifdef CONFIG_RTC_INTF_DEV
+static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
+ struct rtc_time tm;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ if (pcf8563->voltage_low)
+ dev_info(dev, "low voltage detected, date/time is not reliable.\n");
+
+ if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
+ sizeof(int)))
+ return -EFAULT;
+ return 0;
+ case RTC_VL_CLR:
+ /*
+ * Clear the VL bit in the seconds register in case
+ * the time has not been set already (which would
+ * have cleared it). This does not really matter
+ * because of the cached voltage_low value but do it
+ * anyway for consistency.
+ */
+ if (pcf8563_get_datetime(to_i2c_client(dev), &tm))
+ pcf8563_set_datetime(to_i2c_client(dev), &tm);
+
+ /* Clear the cached value. */
+ pcf8563->voltage_low = 0;
+
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+#else
+#define pcf8563_rtc_ioctl NULL
+#endif
+
static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
return pcf8563_get_datetime(to_i2c_client(dev), tm);
@@ -184,6 +225,7 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
}
static const struct rtc_class_ops pcf8563_rtc_ops = {
+ .ioctl = pcf8563_rtc_ioctl,
.read_time = pcf8563_rtc_read_time,
.set_time = pcf8563_rtc_set_time,
};
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f027c06..cc05339 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -220,17 +220,9 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id)
unsigned long events = 0;
rtcmis = readl(ldata->base + RTC_MIS);
- if (rtcmis) {
- writel(rtcmis, ldata->base + RTC_ICR);
-
- if (rtcmis & RTC_BIT_AI)
- events |= (RTC_AF | RTC_IRQF);
-
- /* Timer interrupt is only available in ST variants */
- if ((rtcmis & RTC_BIT_PI) &&
- (ldata->hw_designer == AMBA_VENDOR_ST))
- events |= (RTC_PF | RTC_IRQF);
-
+ if (rtcmis & RTC_BIT_AI) {
+ writel(RTC_BIT_AI, ldata->base + RTC_ICR);
+ events |= (RTC_AF | RTC_IRQF);
rtc_update_irq(ldata->rtc, 1, events);
return IRQ_HANDLED;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 3f3a297..7e6af0b 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -670,6 +670,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C2410] = { TYPE_S3C2410 },
[TYPE_S3C2416] = { TYPE_S3C2416 },
@@ -677,7 +678,6 @@ static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
[TYPE_S3C64XX] = { TYPE_S3C64XX },
};
-#ifdef CONFIG_OF
static const struct of_device_id s3c_rtc_dt_match[] = {
{
.compatible = "samsung,s3c2410-rtc",
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index e38da0d..e278547 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -457,12 +458,12 @@ static int __devexit spear_rtc_remove(struct platform_device *pdev)
clk_disable(config->clk);
clk_put(config->clk);
iounmap(config->ioaddr);
- kfree(config);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
rtc_device_unregister(config->rtc);
+ kfree(config);
return 0;
}
@@ -519,6 +520,14 @@ static void spear_rtc_shutdown(struct platform_device *pdev)
clk_disable(config->clk);
}
+#ifdef CONFIG_OF
+static const struct of_device_id spear_rtc_id_table[] = {
+ { .compatible = "st,spear600-rtc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
+#endif
+
static struct platform_driver spear_rtc_driver = {
.probe = spear_rtc_probe,
.remove = __devexit_p(spear_rtc_remove),
@@ -527,6 +536,7 @@ static struct platform_driver spear_rtc_driver = {
.shutdown = spear_rtc_shutdown,
.driver = {
.name = "rtc-spear",
+ .of_match_table = of_match_ptr(spear_rtc_id_table),
},
};
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 1028786..739ef556 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/rtc.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#include <mach/common.h>
@@ -265,6 +266,12 @@ static int stmp3xxx_rtc_resume(struct platform_device *dev)
#define stmp3xxx_rtc_resume NULL
#endif
+static const struct of_device_id rtc_dt_ids[] = {
+ { .compatible = "fsl,stmp3xxx-rtc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rtc_dt_ids);
+
static struct platform_driver stmp3xxx_rtcdrv = {
.probe = stmp3xxx_rtc_probe,
.remove = stmp3xxx_rtc_remove,
@@ -273,6 +280,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
.driver = {
.name = "stmp3xxx-rtc",
.owner = THIS_MODULE,
+ .of_match_table = rtc_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 75259fe..c006025 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -309,7 +309,8 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info),
+ GFP_KERNEL);
if (!info)
return -ENOMEM;
@@ -317,29 +318,18 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
if (!res) {
dev_err(&pdev->dev,
"Unable to allocate resources for device.\n");
- ret = -EBUSY;
- goto err_free_info;
+ return -EBUSY;
}
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev,
- "Unable to request mem region for device.\n");
- ret = -EBUSY;
- goto err_free_info;
+ info->rtc_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!info->rtc_base) {
+ dev_err(&pdev->dev, "Unable to request mem region and grab IOs for device.\n");
+ return -EBUSY;
}
info->tegra_rtc_irq = platform_get_irq(pdev, 0);
- if (info->tegra_rtc_irq <= 0) {
- ret = -EBUSY;
- goto err_release_mem_region;
- }
-
- info->rtc_base = ioremap_nocache(res->start, resource_size(res));
- if (!info->rtc_base) {
- dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
- ret = -EBUSY;
- goto err_release_mem_region;
- }
+ if (info->tegra_rtc_irq <= 0)
+ return -EBUSY;
/* set context info. */
info->pdev = pdev;
@@ -362,11 +352,12 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Unable to register device (err=%d).\n",
ret);
- goto err_iounmap;
+ return ret;
}
- ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler,
- IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev);
+ ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
+ tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
+ "rtc alarm", &pdev->dev);
if (ret) {
dev_err(&pdev->dev,
"Unable to request interrupt for device (err=%d).\n",
@@ -380,12 +371,6 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
err_dev_unreg:
rtc_device_unregister(info->rtc_dev);
-err_iounmap:
- iounmap(info->rtc_base);
-err_release_mem_region:
- release_mem_region(res->start, resource_size(res));
-err_free_info:
- kfree(info);
return ret;
}
@@ -393,17 +378,8 @@ err_free_info:
static int __devexit tegra_rtc_remove(struct platform_device *pdev)
{
struct tegra_rtc_info *info = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EBUSY;
- free_irq(info->tegra_rtc_irq, &pdev->dev);
rtc_device_unregister(info->rtc_dev);
- iounmap(info->rtc_base);
- release_mem_region(res->start, resource_size(res));
- kfree(info);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 258abea..c5d06fe 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -510,7 +510,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev_name(&rtc->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index f350912..15370a2 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1,5 +1,4 @@
/*
- * File...........: linux/drivers/s390/block/dasd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
@@ -52,7 +51,7 @@ void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
- " Copyright 2000 IBM Corporation");
+ " Copyright IBM Corp. 2000");
MODULE_SUPPORTED_DEVICE("dasd");
MODULE_LICENSE("GPL");
@@ -82,6 +81,7 @@ static void dasd_profile_exit(struct dasd_profile *);
static wait_queue_head_t dasd_init_waitq;
static wait_queue_head_t dasd_flush_wq;
static wait_queue_head_t generic_waitq;
+static wait_queue_head_t shutdown_waitq;
/*
* Allocate memory for a new device structure.
@@ -1994,6 +1994,8 @@ static void dasd_device_tasklet(struct dasd_device *device)
/* Now check if the head of the ccw queue needs to be started. */
__dasd_device_start_head(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ if (waitqueue_active(&shutdown_waitq))
+ wake_up(&shutdown_waitq);
dasd_put_device(device);
}
@@ -2632,6 +2634,8 @@ static void dasd_block_tasklet(struct dasd_block *block)
__dasd_block_start_head(block);
spin_unlock(&block->queue_lock);
spin_unlock_irq(&block->request_queue_lock);
+ if (waitqueue_active(&shutdown_waitq))
+ wake_up(&shutdown_waitq);
dasd_put_device(block->base);
}
@@ -3474,6 +3478,32 @@ char *dasd_get_sense(struct irb *irb)
}
EXPORT_SYMBOL_GPL(dasd_get_sense);
+static inline int _wait_for_empty_queues(struct dasd_device *device)
+{
+ if (device->block)
+ return list_empty(&device->ccw_queue) &&
+ list_empty(&device->block->ccw_queue);
+ else
+ return list_empty(&device->ccw_queue);
+}
+
+void dasd_generic_shutdown(struct ccw_device *cdev)
+{
+ struct dasd_device *device;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return;
+
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+
+ dasd_schedule_device_bh(device);
+
+ wait_event(shutdown_waitq, _wait_for_empty_queues(device));
+}
+EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
+
static int __init dasd_init(void)
{
int rc;
@@ -3481,6 +3511,7 @@ static int __init dasd_init(void)
init_waitqueue_head(&dasd_init_waitq);
init_waitqueue_head(&dasd_flush_wq);
init_waitqueue_head(&generic_waitq);
+ init_waitqueue_head(&shutdown_waitq);
/* register 'common' DASD debug area, used for all DBF_XXX calls */
dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 0326571..f8212d5 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1,9 +1,8 @@
/*
- * File...........: linux/drivers/s390/block/dasd_3990_erp.c
* Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
* Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
+ * Copyright IBM Corp. 2000, 2001
*
*/
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index b3beed5..157defe 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -1,7 +1,7 @@
/*
* PAV alias management for the DASD ECKD discipline
*
- * Copyright IBM Corporation, 2007
+ * Copyright IBM Corp. 2007
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index d71511c..b2b8c18 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd_devmap.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ * Copyright IBM Corp. 1999,2001
*
* Device mapping and dasd= parameter parsing functions. All devmap
* functions may not be called from interrupt context. In particular
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 0cea7e9..9bd5da3 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -1,10 +1,9 @@
/*
- * File...........: linux/drivers/s390/block/dasd_diag.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.c
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index 4f71fbe..a803cc7 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -1,10 +1,9 @@
/*
- * File...........: linux/drivers/s390/block/dasd_diag.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.h
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bc2e8a7..40a826a 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1,5 +1,4 @@
/*
- * File...........: linux/drivers/s390/block/dasd_eckd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
@@ -4247,6 +4246,7 @@ static struct ccw_driver dasd_eckd_driver = {
.set_online = dasd_eckd_set_online,
.notify = dasd_generic_notify,
.path_event = dasd_generic_path_event,
+ .shutdown = dasd_generic_shutdown,
.freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 4a688a8..2555e49 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -1,9 +1,8 @@
/*
- * File...........: linux/drivers/s390/block/dasd_eckd.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 16c5208..ff901b5 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -1,7 +1,7 @@
/*
* Character device driver for extended error reporting.
*
- * Copyright (C) 2005 IBM Corporation
+ * Copyright IBM Corp. 2005
* extended error reporting for DASD ECKD devices
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 0eafe2e..d01ef82 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ * Copyright IBM Corp. 1999, 2001
*
*/
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a62a753..fb7f3bd 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -1,5 +1,4 @@
/*
- * File...........: linux/drivers/s390/block/dasd_fba.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2009
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
index 14c910b..b5d3db0 100644
--- a/drivers/s390/block/dasd_fba.h
+++ b/drivers/s390/block/dasd_fba.h
@@ -1,8 +1,7 @@
/*
- * File...........: linux/drivers/s390/block/dasd_fba.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Coypright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 19a1ff0..f649217 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd_genhd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ * Copyright IBM Corp. 1999, 2001
*
* gendisk related functions for the dasd driver.
*
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 33a6743..7ff93ee 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -1,5 +1,4 @@
/*
- * File...........: linux/drivers/s390/block/dasd_int.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -10,8 +9,6 @@
#ifndef DASD_INT_H
#define DASD_INT_H
-#ifdef __KERNEL__
-
/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -688,6 +685,7 @@ int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int);
int dasd_generic_last_path_gone(struct dasd_device *);
int dasd_generic_path_operational(struct dasd_device *);
+void dasd_generic_shutdown(struct ccw_device *);
void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *);
@@ -791,6 +789,4 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
#define dasd_eer_enabled(d) (0)
#endif /* CONFIG_DASD_ERR */
-#endif /* __KERNEL__ */
-
#endif /* DASD_H */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 792c69e..cceae70 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd_ioctl.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ * Copyright IBM Corp. 1999, 2001
*
* i/o controls for the dasd driver.
*/
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index e12989f..78ac905a 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd_proc.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002
+ * Coypright IBM Corp. 1999, 2002
*
* /proc interface for the dasd driver.
*
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index 0e9a309..8de2deb 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/ctrlchar.c
* Unified handling of special chars.
*
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
index 935ffa0..1a53552 100644
--- a/drivers/s390/char/ctrlchar.h
+++ b/drivers/s390/char/ctrlchar.h
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/ctrlchar.c
* Unified handling of special chars.
*
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 7ef9cfd..01463b0 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/keyboard.c
* ebcdic keycode functions for s390 console drivers
*
* S390 version
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index f682f4e..d0ae2be 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/keyboard.h
* ebcdic keycode functions for s390 console drivers
*
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 30f29a0..3fcc000 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -654,16 +654,6 @@ sclp_remove_processed(struct sccb_header *sccb)
EXPORT_SYMBOL(sclp_remove_processed);
-struct init_sccb {
- struct sccb_header header;
- u16 _reserved;
- u16 mask_length;
- sccb_mask_t receive_mask;
- sccb_mask_t send_mask;
- sccb_mask_t sclp_receive_mask;
- sccb_mask_t sclp_send_mask;
-} __attribute__((packed));
-
/* Prepare init mask request. Called while sclp_lock is locked. */
static inline void
__sclp_make_init_req(u32 receive_mask, u32 send_mask)
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 49a1bb5..d7e97ae 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -88,6 +88,16 @@ struct sccb_header {
u16 response_code;
} __attribute__((packed));
+struct init_sccb {
+ struct sccb_header header;
+ u16 _reserved;
+ u16 mask_length;
+ sccb_mask_t receive_mask;
+ sccb_mask_t send_mask;
+ sccb_mask_t sclp_receive_mask;
+ sccb_mask_t sclp_send_mask;
+} __attribute__((packed));
+
extern u64 sclp_facilities;
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 766cb7b..71ea923c 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -48,6 +48,7 @@ struct read_info_sccb {
u8 _reserved5[4096 - 112]; /* 112-4095 */
} __attribute__((packed, aligned(PAGE_SIZE)));
+static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE);
static struct read_info_sccb __initdata early_read_info_sccb;
static int __initdata early_read_info_sccb_valid;
@@ -104,6 +105,19 @@ static void __init sclp_read_info_early(void)
}
}
+static void __init sclp_event_mask_early(void)
+{
+ struct init_sccb *sccb = &early_event_mask_sccb;
+ int rc;
+
+ do {
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ sccb->mask_length = sizeof(sccb_mask_t);
+ rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
+ } while (rc == -EBUSY);
+}
+
void __init sclp_facilities_detect(void)
{
struct read_info_sccb *sccb;
@@ -119,6 +133,30 @@ void __init sclp_facilities_detect(void)
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
+
+ sclp_event_mask_early();
+}
+
+bool __init sclp_has_linemode(void)
+{
+ struct init_sccb *sccb = &early_event_mask_sccb;
+
+ if (sccb->header.response_code != 0x20)
+ return 0;
+ if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))
+ return 1;
+ return 0;
+}
+
+bool __init sclp_has_vt220(void)
+{
+ struct init_sccb *sccb = &early_event_mask_sccb;
+
+ if (sccb->header.response_code != 0x20)
+ return 0;
+ if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
+ return 1;
+ return 0;
}
unsigned long long sclp_get_rnmax(void)
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 3c03c10..444d361 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/char/sclp_config.c
- *
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 5716487..d70d8c2 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/sclp_cpi.c
* SCLP control programm identification
*
* Copyright IBM Corp. 2001, 2007
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index bd1b9c9..2acea80 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/sclp_cpi_sys.c
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2001, 2007
diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h
index deef3e6..65bb6a9 100644
--- a/drivers/s390/char/sclp_cpi_sys.h
+++ b/drivers/s390/char/sclp_cpi_sys.h
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/sclp_cpi_sys.h
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2007
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index ab294d5..2553db0 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/sclp_ocf.c
* SCLP OCF communication parameters sysfs interface
*
* Copyright IBM Corp. 2011
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 69df137..475e470 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/sclp_quiesce.c
* signal quiesce handler
*
- * (C) Copyright IBM Corp. 1999,2004
+ * Copyright IBM Corp. 1999, 2004
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 69e6c50..6a6f76b 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -1,7 +1,7 @@
/*
* Sclp "store data in absolut storage"
*
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003, 2007
* Author(s): Michael Holzheu
*/
@@ -211,7 +211,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
sccb.evbuf.event_qual = EQ_STORE_DATA;
sccb.evbuf.data_id = DI_FCP_DUMP;
sccb.evbuf.event_id = 4712;
-#ifdef __s390x__
+#ifdef CONFIG_64BIT
sccb.evbuf.asa_size = ASA_SIZE_64;
#else
sccb.evbuf.asa_size = ASA_SIZE_32;
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index e66a75b..0792c85 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/sclp_tty.c
* SCLP line mode terminal driver.
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
index 4b965b2..c877342 100644
--- a/drivers/s390/char/sclp_tty.h
+++ b/drivers/s390/char/sclp_tty.h
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/sclp_tty.h
* interface to the SCLP-read/write driver
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index bc6c7cf..c06be6c 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/tape.h
* tape device driver for 3480/3490E/3590 tapes.
*
* S390 and zSeries version
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index b28de80..6ae929c 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/tape_34xx.c
* tape device discipline for 3480/3490 tapes.
*
* Copyright IBM Corp. 2001, 2009
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index a5c6614..1b0eb49 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/tape_3590.c
* tape device discipline for 3590 tapes.
*
* Copyright IBM Corp. 2001, 2009
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index 4534055..36b759e 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/tape_3590.h
* tape device discipline for 3590 tapes.
*
- * Copyright IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Stefan Bader <shbader@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 46886a7..2d61db3 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/tape_char.c
* character device frontend for tape device driver
*
* S390 and zSeries version
- * Copyright IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 55343df..54b3c79 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -1,6 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2004
- * tape_class.c
+ * Copyright IBM Corp. 2004
*
* Tape class device support
*
@@ -17,7 +16,7 @@
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
MODULE_DESCRIPTION(
- "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
+ "Copyright IBM Corp. 2004 All Rights Reserved.\n"
"tape_class.c"
);
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index ba2092f..a332c10 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -1,6 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2004 All Rights Reserved.
- * tape_class.h
+ * Copyright IBM Corp. 2004 All Rights Reserved.
*
* Tape class device support
*
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 5856186..f3b5123 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/tape_core.c
* basic function of the tape device driver
*
* S390 and zSeries version
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 0ceb379..8733b23 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/tape.c
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
- * Copyright (C) 2001 IBM Corporation
+ * Copyright IBM Corp. 2001
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index e765017..981a99f 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/tape_std.c
* standard tape device functions for ibm tapes.
*
* S390 and zSeries version
- * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2001, 2002
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index 1fc9523..c5816ad 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/tape_std.h
* standard tape device functions for ibm tapes.
*
- * Copyright (C) IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 10ec690..1928f34 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/tty3270.c
* IBM/3270 Driver - tty functions.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * -- Copyright IBM Corp. 2003
*/
#include <linux/module.h>
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
index 799da57..11141a8 100644
--- a/drivers/s390/char/tty3270.h
+++ b/drivers/s390/char/tty3270.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/char/tty3270.h
- *
* Copyright IBM Corp. 2007
*
*/
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 89c03e6..0fdedad 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2004,2010
+ * Copyright IBM Corp. 2004, 2010
* Interface implementation for communication with the z/VM control program
*
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 6a99394..1e29b04 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004, 2005 IBM Corporation
+ * Copyright IBM Corp. 2004, 2005
* Interface implementation for communication with the z/VM control program
* Version 1.0
* Author(s): Christian Borntraeger <cborntra@de.ibm.com>
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 524d988..c131bc4 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/vmlogrdr.c
* character device driver for reading z/VM system service records
*
*
@@ -656,10 +655,19 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
len = strlen(buf);
return len;
}
-
-
static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
NULL);
+static struct attribute *vmlogrdr_drv_attrs[] = {
+ &driver_attr_recording_status.attr,
+ NULL,
+};
+static struct attribute_group vmlogrdr_drv_attr_group = {
+ .attrs = vmlogrdr_drv_attrs,
+};
+static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
+ &vmlogrdr_drv_attr_group,
+ NULL,
+};
static struct attribute *vmlogrdr_attrs[] = {
&dev_attr_autopurge.attr,
@@ -668,6 +676,13 @@ static struct attribute *vmlogrdr_attrs[] = {
&dev_attr_recording.attr,
NULL,
};
+static struct attribute_group vmlogrdr_attr_group = {
+ .attrs = vmlogrdr_attrs,
+};
+static const struct attribute_group *vmlogrdr_attr_groups[] = {
+ &vmlogrdr_attr_group,
+ NULL,
+};
static int vmlogrdr_pm_prepare(struct device *dev)
{
@@ -692,18 +707,14 @@ static const struct dev_pm_ops vmlogrdr_pm_ops = {
.prepare = vmlogrdr_pm_prepare,
};
-static struct attribute_group vmlogrdr_attr_group = {
- .attrs = vmlogrdr_attrs,
-};
-
static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
.pm = &vmlogrdr_pm_ops,
+ .groups = vmlogrdr_drv_attr_groups,
};
-
static int vmlogrdr_register_driver(void)
{
int ret;
@@ -717,21 +728,14 @@ static int vmlogrdr_register_driver(void)
if (ret)
goto out_iucv;
- ret = driver_create_file(&vmlogrdr_driver,
- &driver_attr_recording_status);
- if (ret)
- goto out_driver;
-
vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
if (IS_ERR(vmlogrdr_class)) {
ret = PTR_ERR(vmlogrdr_class);
vmlogrdr_class = NULL;
- goto out_attr;
+ goto out_driver;
}
return 0;
-out_attr:
- driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
out_driver:
driver_unregister(&vmlogrdr_driver);
out_iucv:
@@ -745,7 +749,6 @@ static void vmlogrdr_unregister_driver(void)
{
class_destroy(vmlogrdr_class);
vmlogrdr_class = NULL;
- driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
@@ -762,6 +765,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
+ dev->groups = vmlogrdr_attr_groups;
dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
@@ -779,11 +783,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
return ret;
}
- ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
- if (ret) {
- device_unregister(dev);
- return ret;
- }
priv->class_device = device_create(vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
@@ -791,7 +790,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
if (IS_ERR(priv->class_device)) {
ret = PTR_ERR(priv->class_device);
priv->class_device=NULL;
- sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
device_unregister(dev);
return ret;
}
@@ -804,7 +802,6 @@ static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
- sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
device_unregister(priv->device);
priv->device=NULL;
}
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 2211277..e9b7231 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -1,7 +1,7 @@
/*
* Watchdog implementation based on z/VM Watchdog Timer API
*
- * Copyright IBM Corp. 2004,2009
+ * Copyright IBM Corp. 2004, 2009
*
* The user space watchdog daemon can use this driver as
* /dev/vmwatchdog to have z/VM execute the specified CP
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 3303d66..e3b9308 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -5,7 +5,7 @@
*
* For more information please refer to Documentation/s390/zfcpdump.txt
*
- * Copyright IBM Corp. 2003,2008
+ * Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
*/
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 65d2e76..bc10220 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/airq.c
* Support for adapter interruptions
*
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2007
* Author(s): Ingo Adlung <adlung@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Arnd Bergmann <arndb@de.ibm.com>
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 08c6603..2d2a966 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,9 +1,7 @@
/*
- * drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
*
- * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 1999, 2002
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index e792436..50ad5fd 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/chp.c
- *
- * Copyright IBM Corp. 1999,2010
+ * Copyright IBM Corp. 1999, 2010
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -362,10 +360,13 @@ static struct attribute *chp_attrs[] = {
&dev_attr_shared.attr,
NULL,
};
-
static struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
};
+static const struct attribute_group *chp_attr_groups[] = {
+ &chp_attr_group,
+ NULL,
+};
static void chp_release(struct device *dev)
{
@@ -397,6 +398,7 @@ int chp_new(struct chp_id chpid)
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
+ chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
mutex_init(&chp->lock);
@@ -426,16 +428,10 @@ int chp_new(struct chp_id chpid)
put_device(&chp->dev);
goto out;
}
- ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
- if (ret) {
- device_unregister(&chp->dev);
- goto out;
- }
mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
if (channel_subsystems[chpid.cssid]->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
- sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
device_unregister(&chp->dev);
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out;
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 12b4903..e1399db 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/chp.h
- *
- * Copyright IBM Corp. 2007,2010
+ * Copyright IBM Corp. 2007, 2010
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a84631a..cfe0c08 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
*
- * Copyright IBM Corp. 1999,2010
+ * Copyright IBM Corp. 1999, 2010
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index a6ddaed..33d1ef7 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999, 2008
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 204ca72..c9fc61c 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/cio/cmf.c
- *
* Linux on zSeries Channel Measurement Facility support
*
- * Copyright 2000,2006 IBM Corporation
+ * Copyright IBM Corp. 2000, 2006
*
* Authors: Arnd Bergmann <arndb@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -1341,7 +1339,7 @@ module_init(init_cmf);
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("channel measurement facility base driver\n"
- "Copyright 2003 IBM Corporation\n");
+ "Copyright IBM Corp. 2003\n");
EXPORT_SYMBOL_GPL(enable_cmf);
EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d0a2dff..0f8a25f 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -1,7 +1,7 @@
/*
* Channel report handling code
*
- * Copyright IBM Corp. 2000,2009
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f8f952d..ed25c87 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/device.c
* bus driver for ccw devices
*
- * Copyright IBM Corp. 2002,2008
+ * Copyright IBM Corp. 2002, 2008
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1b85351..1bb1d00 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/device_fsm.c
* finite state machine for device handling
*
- * Copyright IBM Corp. 2002,2008
+ * Copyright IBM Corp. 2002, 2008
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 78a0b43..d4fa3054 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,7 +1,7 @@
/*
* CCW device SENSE ID I/O handling.
*
- * Copyright IBM Corp. 2002,2009
+ * Copyright IBM Corp. 2002, 2009
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 07a4fd2..368368f 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,7 +1,7 @@
/*
* CCW device PGID and path verification I/O handling.
*
- * Copyright IBM Corp. 2002,2009
+ * Copyright IBM Corp. 2002, 2009
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 66d8066..15b56a1 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -1,8 +1,5 @@
/*
- * drivers/s390/cio/device_status.c
- *
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 2002
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 4d10981..e6d5f8c 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/idset.c
- *
* Copyright IBM Corp. 2007
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 7543da4..3d943f0 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/idset.h
- *
* Copyright IBM Corp. 2007
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b962ffb..5132554 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/cio/qdio.h
- *
- * Copyright 2000,2009 IBM Corp.
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 29021f4..e6e0d31 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/qdio_debug.c
- *
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 5d70bd1..e1f6468 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/qdio_debug.h
- *
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 7493efa..e06fa03 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/cio/qdio_main.c
- *
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
- * Copyright 2000,2008 IBM Corp.
+ * Copyright IBM Corp. 2000, 2008
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
* 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index ecf12f0..6c973db 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -1,9 +1,7 @@
/*
- * driver/s390/cio/qdio_setup.c
- *
* qdio queue initialization
*
- * Copyright (C) IBM Corp. 2008
+ * Copyright IBM Corp. 2008
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 011eade..2e06008 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/cio/thinint_qdio.c
- *
- * Copyright 2000,2009 IBM Corp.
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b987d46..ae258a4 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/crypto/ap_bus.c
- *
- * Copyright (C) 2006 IBM Corporation
+ * Copyright IBM Corp. 2006
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -70,7 +68,7 @@ static int ap_select_domain(void);
*/
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
- "Copyright 2006 IBM Corporation");
+ "Copyright IBM Corp. 2006");
MODULE_LICENSE("GPL");
/*
@@ -338,6 +336,12 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
break;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
+ if (i < AP_MAX_RESET - 1) {
+ udelay(5);
+ status = ap_queue_interruption_control(qid,
+ ind);
+ continue;
+ }
break;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 726fc65..52d6199 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/crypto/ap_bus.h
- *
- * Copyright (C) 2006 IBM Corporation
+ * Copyright IBM Corp. 2006
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 8852320..2f94132 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_api.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -47,7 +45,7 @@
*/
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static DEFINE_SPINLOCK(zcrypt_device_lock);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 9688f39..7a32c4b 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_api.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index ed82f2f..1f42f10 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_cca_key.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 4681244..744c668 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_cex2a.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -66,7 +64,7 @@ static struct ap_device_id zcrypt_cex2a_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 0350665..0dce4b9 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_cex2a.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 03ba27f..0965e26 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_error.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index ad7951c..f2b71d8 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcica.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -56,7 +54,7 @@ static struct ap_device_id zcrypt_pcica_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static int zcrypt_pcica_probe(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h
index 3be1118..9a59155 100644
--- a/drivers/s390/crypto/zcrypt_pcica.h
+++ b/drivers/s390/crypto/zcrypt_pcica.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcica.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index e5dd335..0d90a43 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcicc.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -68,7 +66,7 @@ static struct ap_device_id zcrypt_pcicc_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h
index 6d44548..7fe27e1 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.h
+++ b/drivers/s390/crypto/zcrypt_pcicc.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcicc.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index f7cc434..ccb4f8b6 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcixcc.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -78,7 +76,7 @@ static struct ap_device_id zcrypt_pcixcc_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index 8cb7d7a..c7cdf59 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcixcc.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index d74e9ae..47cccd5 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -1,5 +1,5 @@
/*
- * kvm_virtio.c - virtio for kvm on s390
+ * virtio for kvm on s390
*
* Copyright IBM Corp. 2008
*
@@ -25,6 +25,7 @@
#include <asm/io.h>
#include <asm/kvm_para.h>
#include <asm/kvm_virtio.h>
+#include <asm/sclp.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -468,7 +469,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
static int __init s390_virtio_console_init(void)
{
- if (!MACHINE_IS_KVM)
+ if (sclp_has_vt220() || sclp_has_linemode())
return -ENODEV;
return virtio_cons_early_init(early_put_chars);
}
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 6b1ff90..a0a4afe 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/net/claw.c
* ESCON CLAW network driver
*
* Linux for zSeries version
@@ -3380,5 +3379,5 @@ module_exit(claw_cleanup);
MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
- "Copyright 2000,2008 IBM Corporation\n");
+ "Copyright IBM Corp. 2000, 2008\n");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index d962fd7..6514e1c 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_dbug.c
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
index 26966d0..47bf050 100644
--- a/drivers/s390/net/ctcm_dbug.h
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_dbug.h
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index a697669..d4ade9e 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_fsms.c
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 046d077..c963d04 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_fsms.h
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 3cd2554..5227e57 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_main.c
- *
* Copyright IBM Corp. 2001, 2009
* Author(s):
* Original CTC driver(s):
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index b9056a5..477c933 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_main.h
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index ac7975b..05b734a 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_mpc.c
- *
* Copyright IBM Corp. 2004, 2007
* Authors: Belinda Thompson (belindat@us.ibm.com)
* Andy Richter (richtera@us.ibm.com)
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index 1fa07b0..bd1b1cc 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_mpc.h
- *
* Copyright IBM Corp. 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 0c27ae7..985b5dc 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_sysfs.c
- *
* Copyright IBM Corp. 2007, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 06e8f31..fa7adad 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core.h
- *
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e118e1e..7a8b096 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core_main.c
- *
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 7fab654..5cebfdd 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core_mpc.c
- *
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index a11b30c..3690bbf 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core_mpc.h
- *
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index f163af5..9655dc0 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core_sys.c
- *
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 4269865..2db4093 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_l2_main.c
- *
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
@@ -647,7 +645,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
}
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
} else {
- random_ether_addr(card->dev->dev_addr);
+ eth_random_addr(card->dev->dev_addr);
memcpy(card->dev->dev_addr, vendor_pre, 3);
}
return 0;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index e367315..29c1c00 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_l3.h
- *
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7be5e97..0cf7066 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_l3_main.c
- *
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
@@ -1473,7 +1471,7 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
memcpy(card->dev->dev_addr,
cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
else
- random_ether_addr(card->dev->dev_addr);
+ eth_random_addr(card->dev->dev_addr);
return 0;
}
@@ -2700,10 +2698,11 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour_noref(dst);
+ n = dst_neigh_lookup_skb(dst, skb);
if (n) {
cast_type = n->type;
rcu_read_unlock();
+ neigh_release(n);
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 4cafedf..ebc3794 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_l3_sys.c
- *
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
index 149a115..45bc925 100644
--- a/drivers/s390/net/smsgiucv.h
+++ b/drivers/s390/net/smsgiucv.h
@@ -1,7 +1,7 @@
/*
* IUCV special message driver
*
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 0860181..aff8621 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
/*
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 96f13ad8..e37f045 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,7 +3,7 @@
*
* Registration and callback for the s390 common I/O layer.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index fab2c25..fbd8b4d 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -5,7 +5,7 @@
* Access Control Lists / Control File Data Channel;
* handling of response code and states for ports and LUNs.
*
- * Copyright IBM Corporation 2008, 2010
+ * Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a9a816e..3c1d220 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index ed5d921..2955e1a 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef ZFCP_DEF_H
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e1b4f80..92d3df6 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 2302e1c..36f4227 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef ZFCP_EXT_H
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 297e6b7..88688a8 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
*
* Fibre Channel related functions for the zfcp device driver.
*
- * Copyright IBM Corporation 2008, 2010
+ * Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 4561f3b..b1d2024 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
- * Copyright IBM Corporation 2009
+ * Copyright IBM Corp. 2009
*/
#ifndef ZFCP_FC_H
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e9a787e..e1c1efc 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index db8c853..5e795b8 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef FSF_H
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index e14da57..b9fffc8 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 8ac7f53..497cd37 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -3,7 +3,7 @@
*
* Header file for zfcp qdio interface
*
- * Copyright IBM Corporation 2010
+ * Copyright IBM Corp. 2010
*/
#ifndef ZFCP_QDIO_H
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index a72d1b7..7c2c619 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -4,7 +4,7 @@
* Data structure and helper functions for tracking pending FSF
* requests.
*
- * Copyright IBM Corporation 2009
+ * Copyright IBM Corp. 2009
*/
#ifndef ZFCP_REQLIST_H
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b79576b..7b31e3f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index cdc4ff7..c66af27 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
*
* sysfs attributes.
*
- * Copyright IBM Corporation 2008, 2010
+ * Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 20796eb..3f2bff0 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -4,7 +4,7 @@
* Tracking of manually configured LUNs and helper functions to
* register the LUNs with the SCSI midlayer.
*
- * Copyright IBM Corporation 2010
+ * Copyright IBM Corp. 2010
*/
#include "zfcp_def.h"
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 532d212..393e7ce 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
- memcpy(&resp->ending_fis[0], r+16, 24);
+ memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
ts->buf_valid_size = sizeof(*resp);
}
}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 01bb04c..2a09679 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -571,13 +571,12 @@ free_cmd:
static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
int iscsi_cmd, int size)
{
- cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size),
- &cmd->dma);
+ cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
if (!cmd->va) {
SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
return -ENOMEM;
}
- memset(cmd->va, 0, sizeof(size));
+ memset(cmd->va, 0, size);
cmd->size = size;
be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
return 0;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 8b6c6bf..b839274 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
vshost = vport->drv_port.im_port->shost;
fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
+ fc_host_supported_classes(vshost) = FC_COS_CLASS3;
+
+ memset(fc_host_supported_fc4s(vshost), 0,
+ sizeof(fc_host_supported_fc4s(vshost)));
+
+ /* For FCP type 0x08 */
+ if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
+ fc_host_supported_fc4s(vshost)[2] = 1;
+
+ /* For fibre channel services type 0x20 */
+ fc_host_supported_fc4s(vshost)[7] = 1;
+
+ fc_host_supported_speeds(vshost) =
+ bfad_im_supported_speeds(&bfad->bfa);
+ fc_host_maxframe_size(vshost) =
+ bfa_fcport_get_maxfrsize(&bfad->bfa);
+
fc_vport->dd_data = vport;
vport->drv_port.im_port->fc_vport = fc_vport;
} else if (rc == BFA_STATUS_INVALID_WWN)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 3153923..1ac09af 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -987,7 +987,7 @@ done:
return 0;
}
-static u32
+u32
bfad_im_supported_speeds(struct bfa_s *bfa)
{
struct bfa_ioc_attr_s *ioc_attr;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 0814367..f6c1023 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -37,6 +37,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
struct bfad_im_port_s *im_port, struct device *dev);
void bfad_im_scsi_host_free(struct bfad_s *bfad,
struct bfad_im_port_s *im_port);
+u32 bfad_im_supported_speeds(struct bfa_s *bfa);
#define MAX_FCP_TARGET 1024
#define MAX_FCP_LUN 16384
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index a4953ef..42969e8 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -59,10 +59,11 @@
#include "57xx_hsi_bnx2fc.h"
#include "bnx2fc_debug.h"
#include "../../net/ethernet/broadcom/cnic_if.h"
+#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.10"
+#define BNX2FC_VERSION "1.0.11"
#define PFX "bnx2fc: "
@@ -84,6 +85,8 @@
#define BNX2FC_NUM_MAX_SESS 1024
#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
+#define BNX2FC_MAX_NPIV 256
+
#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
@@ -206,6 +209,7 @@ struct bnx2fc_hba {
struct fcoe_statistics_params *stats_buffer;
dma_addr_t stats_buf_dma;
struct completion stat_req_done;
+ struct fcoe_capabilities fcoe_cap;
/*destroy handling */
struct timer_list destroy_timer;
@@ -228,13 +232,16 @@ struct bnx2fc_interface {
struct packet_type fip_packet_type;
struct workqueue_struct *timer_work_queue;
struct kref kref;
- struct fcoe_ctlr ctlr;
u8 vlan_enabled;
int vlan_id;
bool enabled;
};
-#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr)
+#define bnx2fc_from_ctlr(x) \
+ ((struct bnx2fc_interface *)((x) + 1))
+
+#define bnx2fc_to_ctlr(x) \
+ ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
struct bnx2fc_lport {
struct list_head list;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ce0ce3e..bdbbb13 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
struct fc_exch *exch = fc_seq_exch(seq);
struct fc_lport *lport = exch->lp;
u8 *mac;
- struct fc_frame_header *fh;
u8 op;
if (IS_ERR(fp))
@@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
mac = fr_cb(fp)->granted_mac;
if (is_zero_ether_addr(mac)) {
- fh = fc_frame_header_get(fp);
- if (fh->fh_type != FC_TYPE_ELS) {
- printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
- "fh_type != FC_TYPE_ELS\n");
- fc_frame_free(fp);
- return;
- }
op = fc_frame_payload_op(fp);
if (lport->vport) {
if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
return;
}
}
- if (fcoe_ctlr_recv_flogi(fip, lport, fp)) {
- fc_frame_free(fp);
- return;
- }
+ fcoe_ctlr_recv_flogi(fip, lport, fp);
}
- fip->update_mac(lport, mac);
+ if (!is_zero_ether_addr(mac))
+ fip->update_mac(lport, mac);
done:
fc_lport_flogi_resp(seq, fp, lport);
}
@@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
{
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_interface *interface = port->priv;
- struct fcoe_ctlr *fip = &interface->ctlr;
+ struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c1c6a92..05fe662 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jan 22, 2011"
+#define DRV_MODULE_RELDATE "Apr 24, 2012"
static char version[] __devinitdata =
@@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
static struct libfc_function_template bnx2fc_libfc_fcn_templ;
static struct scsi_host_template bnx2fc_shost_template;
static struct fc_function_template bnx2fc_transport_function;
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
static struct fc_function_template bnx2fc_vport_xport_function;
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
__fcoe_get_lesb(lport, fc_lesb, netdev);
}
+static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = bnx2fc_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
+
+static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev =
+ fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ fcf_dev->vlan_id = fcoe->vlan_id;
+}
+
static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
{
struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct sk_buff *skb;
struct fc_frame_header *fh;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct bnx2fc_hba *hba;
struct fcoe_port *port;
struct fcoe_hdr *hp;
@@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
port = (struct fcoe_port *)lport_priv(lport);
interface = port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
hba = interface->hba;
fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
- if (!interface->ctlr.sel_fcf) {
+ if (!ctlr->sel_fcf) {
BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
kfree_skb(skb);
return -EINVAL;
}
- if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb))
+ if (fcoe_ctlr_els_send(ctlr, lport, skb))
return 0;
}
@@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
- if (interface->ctlr.map_dest)
+ if (ctlr->map_dest)
fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
else
/* insert GW address */
- memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN);
+ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
- if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN))
- memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN);
+ if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
{
struct fc_lport *lport;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fc_frame_header *fh;
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
if (unlikely(lport == NULL)) {
printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
{
struct bnx2fc_hba *hba;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
u64 wwnn, wwpn;
port = lport_priv(lport);
interface = port->priv;
+ ctlr = bnx2fc_to_ctlr(interface);
hba = interface->hba;
/* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
- wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+ wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
1, 0);
BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
- wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr,
+ wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
2, 0);
BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
struct fc_lport *lport;
struct fc_lport *vport;
struct bnx2fc_interface *interface, *tmp;
+ struct fcoe_ctlr *ctlr;
int wait_for_upload = 0;
u32 link_possible = 1;
@@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
if (interface->hba != hba)
continue;
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
interface->netdev->name, event);
@@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
* on a stale vlan
*/
if (interface->enabled)
- fcoe_ctlr_link_up(&interface->ctlr);
- } else if (fcoe_ctlr_link_down(&interface->ctlr)) {
+ fcoe_ctlr_link_up(ctlr);
+ } else if (fcoe_ctlr_link_down(ctlr)) {
mutex_lock(&lport->lp_mutex);
list_for_each_entry(vport, &lport->vports, list)
fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
struct net_device *orig_dev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
interface = container_of(ptype, struct bnx2fc_interface,
fip_packet_type);
- fcoe_ctlr_recv(&interface->ctlr, skb);
+ ctlr = bnx2fc_to_ctlr(interface);
+ fcoe_ctlr_recv(ctlr, skb);
return 0;
}
@@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
{
struct net_device *netdev = interface->netdev;
struct net_device *physdev = interface->hba->phys_dev;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct netdev_hw_addr *ha;
int sel_san_mac = 0;
@@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
(is_valid_ether_addr(ha->addr))) {
- memcpy(interface->ctlr.ctl_src_addr, ha->addr,
+ memcpy(ctlr->ctl_src_addr, ha->addr,
ETH_ALEN);
sel_san_mac = 1;
BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
static void bnx2fc_interface_release(struct kref *kref)
{
+ struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct net_device *netdev;
interface = container_of(kref, struct bnx2fc_interface, kref);
BNX2FC_MISC_DBG("Interface is being released\n");
+ ctlr = bnx2fc_to_ctlr(interface);
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
netdev = interface->netdev;
/* tear-down FIP controller */
if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
- fcoe_ctlr_destroy(&interface->ctlr);
+ fcoe_ctlr_destroy(ctlr);
- kfree(interface);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
@@ -1274,6 +1326,7 @@ static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
{
struct bnx2fc_hba *hba;
+ struct fcoe_capabilities *fcoe_cap;
int rc;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
@@ -1309,6 +1362,21 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
goto cmgr_err;
}
+ fcoe_cap = &hba->fcoe_cap;
+
+ fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
+ FCOE_IOS_PER_CONNECTION_SHIFT;
+ fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
+ FCOE_LOGINS_PER_PORT_SHIFT;
+ fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS <<
+ FCOE_NUMBER_OF_EXCHANGES_SHIFT;
+ fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
+ FCOE_NPIV_WWN_PER_PORT_SHIFT;
+ fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
+ FCOE_TARGETS_SUPPORTED_SHIFT;
+ fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS <<
+ FCOE_OUTSTANDING_COMMANDS_SHIFT;
+ fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
init_waitqueue_head(&hba->shutdown_wait);
init_waitqueue_head(&hba->destroy_wait);
@@ -1329,33 +1397,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
struct net_device *netdev,
enum fip_state fip_mode)
{
+ struct fcoe_ctlr_device *ctlr_dev;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
+ int size;
int rc = 0;
- interface = kzalloc(sizeof(*interface), GFP_KERNEL);
- if (!interface) {
+ size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
+ ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
+ size);
+ if (!ctlr_dev) {
printk(KERN_ERR PFX "Unable to allocate interface structure\n");
return NULL;
}
+ ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ interface = fcoe_ctlr_priv(ctlr);
dev_hold(netdev);
kref_init(&interface->kref);
interface->hba = hba;
interface->netdev = netdev;
/* Initialize FIP */
- fcoe_ctlr_init(&interface->ctlr, fip_mode);
- interface->ctlr.send = bnx2fc_fip_send;
- interface->ctlr.update_mac = bnx2fc_update_src_mac;
- interface->ctlr.get_src_addr = bnx2fc_get_src_mac;
+ fcoe_ctlr_init(ctlr, fip_mode);
+ ctlr->send = bnx2fc_fip_send;
+ ctlr->update_mac = bnx2fc_update_src_mac;
+ ctlr->get_src_addr = bnx2fc_get_src_mac;
set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
rc = bnx2fc_interface_setup(interface);
if (!rc)
return interface;
- fcoe_ctlr_destroy(&interface->ctlr);
+ fcoe_ctlr_destroy(ctlr);
dev_put(netdev);
- kfree(interface);
+ fcoe_ctlr_device_delete(ctlr_dev);
return NULL;
}
@@ -1373,6 +1448,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
struct Scsi_Host *shost;
@@ -1383,7 +1459,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
if (!blport) {
- BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n");
+ BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
return NULL;
}
@@ -1479,7 +1555,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
{
- struct fc_lport *lport = interface->ctlr.lp;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport = ctlr->lp;
struct fcoe_port *port = lport_priv(lport);
struct bnx2fc_hba *hba = interface->hba;
@@ -1519,7 +1596,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
{
- struct fc_lport *lport = interface->ctlr.lp;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
+ struct fc_lport *lport = ctlr->lp;
struct fcoe_port *port = lport_priv(lport);
bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1621,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
{
struct bnx2fc_interface *interface = NULL;
struct workqueue_struct *timer_work_queue;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
goto netdev_err;
@@ -1627,6 +1707,32 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
hba->pcidev = NULL;
}
+/**
+ * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
+ *
+ * @handle: transport handle pointing to adapter struture
+ */
+static int bnx2fc_ulp_get_stats(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct cnic_dev *cnic;
+ struct fcoe_stats_info *stats_addr;
+
+ if (!hba)
+ return -EINVAL;
+
+ cnic = hba->cnic;
+ stats_addr = &cnic->stats_addr->fcoe_stat;
+ if (!stats_addr)
+ return -EINVAL;
+
+ strncpy(stats_addr->version, BNX2FC_VERSION,
+ sizeof(stats_addr->version));
+ stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
+ stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
+
+ return 0;
+}
/**
@@ -1646,6 +1752,7 @@ static void bnx2fc_ulp_start(void *handle)
{
struct bnx2fc_hba *hba = handle;
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
struct fc_lport *lport;
mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1764,8 @@ static void bnx2fc_ulp_start(void *handle)
list_for_each_entry(interface, &if_list, list) {
if (interface->hba == hba) {
- lport = interface->ctlr.lp;
+ ctlr = bnx2fc_to_ctlr(interface);
+ lport = ctlr->lp;
/* Kick off Fabric discovery*/
printk(KERN_ERR PFX "ulp_init: start discovery\n");
lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1785,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
static void bnx2fc_stop(struct bnx2fc_interface *interface)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport;
struct fc_lport *vport;
if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
return;
- lport = interface->ctlr.lp;
+ lport = ctlr->lp;
bnx2fc_port_shutdown(lport);
mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1801,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
FC_PORTTYPE_UNKNOWN;
mutex_unlock(&lport->lp_mutex);
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- fcoe_ctlr_link_down(&interface->ctlr);
+ fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(lport);
}
@@ -1804,6 +1913,7 @@ exit:
static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
{
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct fc_lport *lport;
int wait_cnt = 0;
@@ -1814,18 +1924,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
return;
}
- lport = interface->ctlr.lp;
+ lport = ctlr->lp;
BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
if (!bnx2fc_link_ok(lport) && interface->enabled) {
BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
- fcoe_ctlr_link_up(&interface->ctlr);
+ fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
/* wait for the FCF to be selected before issuing FLOGI */
- while (!interface->ctlr.sel_fcf) {
+ while (!ctlr->sel_fcf) {
msleep(250);
/* give up after 3 secs */
if (++wait_cnt > 12)
@@ -1876,6 +1986,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
adapter_count++;
mutex_unlock(&bnx2fc_dev_lock);
+ dev->fcoe_cap = &hba->fcoe_cap;
clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
rc = dev->register_device(dev, CNIC_ULP_FCOE,
(void *) hba);
@@ -1889,19 +2000,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
static int bnx2fc_disable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
} else {
interface->enabled = false;
- fcoe_ctlr_link_down(&interface->ctlr);
- fcoe_clean_pending_queue(interface->ctlr.lp);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +2026,19 @@ static int bnx2fc_disable(struct net_device *netdev)
static int bnx2fc_enable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
+ struct fcoe_ctlr *ctlr;
int rc = 0;
rtnl_lock();
mutex_lock(&bnx2fc_dev_lock);
interface = bnx2fc_interface_lookup(netdev);
- if (!interface || !interface->ctlr.lp) {
+ ctlr = bnx2fc_to_ctlr(interface);
+ if (!interface || !ctlr->lp) {
rc = -ENODEV;
printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
- } else if (!bnx2fc_link_ok(interface->ctlr.lp)) {
- fcoe_ctlr_link_up(&interface->ctlr);
+ } else if (!bnx2fc_link_ok(ctlr->lp)) {
+ fcoe_ctlr_link_up(ctlr);
interface->enabled = true;
}
@@ -1944,6 +2059,7 @@ static int bnx2fc_enable(struct net_device *netdev)
*/
static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
{
+ struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
struct net_device *phys_dev;
@@ -2010,6 +2126,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto ifput_err;
}
+ ctlr = bnx2fc_to_ctlr(interface);
interface->vlan_id = vlan_id;
interface->vlan_enabled = 1;
@@ -2035,10 +2152,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
lport->boot_time = jiffies;
/* Make this master N_port */
- interface->ctlr.lp = lport;
+ ctlr->lp = lport;
if (!bnx2fc_link_ok(lport)) {
- fcoe_ctlr_link_up(&interface->ctlr);
+ fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
}
@@ -2439,6 +2556,19 @@ static void __exit bnx2fc_mod_exit(void)
module_init(bnx2fc_mod_init);
module_exit(bnx2fc_mod_exit);
+static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
+ .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+
+ .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+ .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
+};
+
static struct fc_function_template bnx2fc_transport_function = {
.show_host_node_name = 1,
.show_host_port_name = 1,
@@ -2556,4 +2686,5 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb = {
.cnic_stop = bnx2fc_ulp_stop,
.indicate_kcqes = bnx2fc_indicate_kcqe,
.indicate_netevent = bnx2fc_indicate_netevent,
+ .cnic_get_stats = bnx2fc_ulp_get_stats,
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index afd5709..2ca6bfe 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
{
struct fc_lport *lport = port->lport;
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct kwqe *kwqe_arr[4];
struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
- ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
+ ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
/* fcf mac */
- ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
{
struct kwqe *kwqe_arr[2];
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable enbl_req;
struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
- enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
- enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
port_id = fc_host_port_id(lport->host);
if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct bnx2fc_interface *interface = port->priv;
+ struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
struct bnx2fc_hba *hba = interface->hba;
struct fcoe_kwqe_conn_enable_disable disable_req;
struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
- disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5];
- disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4];
- disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3];
- disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2];
- disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1];
- disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0];
+ disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
+ disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
+ disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
+ disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
+ disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
+ disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
port_id = tgt->sid;
disable_req.s_id[0] = (port_id & 0x000000FF);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e897ce9..4f7453b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -810,8 +810,22 @@ retry_tmf:
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
- if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags)))
+ if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
+ if (io_req->on_tmf_queue) {
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ }
+ io_req->wait_for_comp = 1;
+ bnx2fc_initiate_cleanup(io_req);
+ spin_unlock_bh(&tgt->tgt_lock);
+ rc = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_FW_TIMEOUT);
+ spin_lock_bh(&tgt->tgt_lock);
+ io_req->wait_for_comp = 0;
+ if (!rc)
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
spin_unlock_bh(&tgt->tgt_lock);
@@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
}
+int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
+{
+ struct bnx2fc_rport *tgt = io_req->tgt;
+ struct fc_rport_priv *rdata = tgt->rdata;
+ int logo_issued;
+ int rc = SUCCESS;
+ int wait_cnt = 0;
+
+ BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
+ tgt->flags);
+ logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
+ &tgt->flags);
+ io_req->wait_for_comp = 1;
+ bnx2fc_initiate_cleanup(io_req);
+
+ spin_unlock_bh(&tgt->tgt_lock);
+
+ wait_for_completion(&io_req->tm_done);
+
+ io_req->wait_for_comp = 0;
+ /*
+ * release the reference taken in eh_abort to allow the
+ * target to re-login after flushing IOs
+ */
+ kref_put(&io_req->refcount, bnx2fc_cmd_release);
+
+ if (!logo_issued) {
+ clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->tt.rport_logoff(rdata);
+ mutex_unlock(&lport->disc.disc_mutex);
+ do {
+ msleep(BNX2FC_RELOGIN_WAIT_TIME);
+ if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
+ rc = FAILED;
+ break;
+ }
+ } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
+ }
+ spin_lock_bh(&tgt->tgt_lock);
+ return rc;
+}
/**
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
* SCSI command
@@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct bnx2fc_cmd *io_req;
struct fc_lport *lport;
- struct fc_rport_priv *rdata;
struct bnx2fc_rport *tgt;
- int logo_issued;
- int wait_cnt = 0;
int rc = FAILED;
@@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
list_add_tail(&io_req->link, &tgt->io_retire_queue);
init_completion(&io_req->tm_done);
- io_req->wait_for_comp = 1;
- if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
- /* Cancel the current timer running on this io_req */
- if (cancel_delayed_work(&io_req->timeout_work))
- kref_put(&io_req->refcount,
- bnx2fc_cmd_release); /* drop timer hold */
- set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
- rc = bnx2fc_initiate_abts(io_req);
- } else {
+ if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
"already in abts processing\n", io_req->xid);
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
- bnx2fc_initiate_cleanup(io_req);
+ rc = bnx2fc_expl_logo(lport, io_req);
+ goto out;
+ }
+ /* Cancel the current timer running on this io_req */
+ if (cancel_delayed_work(&io_req->timeout_work))
+ kref_put(&io_req->refcount,
+ bnx2fc_cmd_release); /* drop timer hold */
+ set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
+ io_req->wait_for_comp = 1;
+ rc = bnx2fc_initiate_abts(io_req);
+ if (rc == FAILED) {
+ bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
-
wait_for_completion(&io_req->tm_done);
-
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
- rdata = io_req->tgt->rdata;
- logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
- &tgt->flags);
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
-
- if (!logo_issued) {
- BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
- tgt->flags);
- mutex_lock(&lport->disc.disc_mutex);
- lport->tt.rport_logoff(rdata);
- mutex_unlock(&lport->disc.disc_mutex);
- do {
- msleep(BNX2FC_RELOGIN_WAIT_TIME);
- /*
- * If session not recovered, let SCSI-ml
- * escalate error recovery.
- */
- if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
- return FAILED;
- } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
- &tgt->flags));
- }
- return SUCCESS;
- }
- if (rc == FAILED) {
- kref_put(&io_req->refcount, bnx2fc_cmd_release);
- spin_unlock_bh(&tgt->tgt_lock);
- return rc;
+ goto done;
}
spin_unlock_bh(&tgt->tgt_lock);
@@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
- rc = FAILED;
+ rc = bnx2fc_expl_logo(lport, io_req);
+ goto out;
} else {
/*
* We come here even when there was a race condition
@@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
bnx2fc_scsi_done(io_req, DID_ABORT);
kref_put(&io_req->refcount, bnx2fc_cmd_release);
}
-
+done:
/* release the reference taken in eh_abort */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+out:
spin_unlock_bh(&tgt->tgt_lock);
return rc;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c1800b5..082a25c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
BUG_ON(rc);
}
+ list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
+ i++;
+ io_req = (struct bnx2fc_cmd *)list;
+ list_del_init(&io_req->link);
+ io_req->on_tmf_queue = 0;
+ BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+ }
+
list_for_each_safe(list, tmp, &tgt->els_queue) {
i++;
io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
- if (cancel_delayed_work(&io_req->timeout_work))
+ if (cancel_delayed_work(&io_req->timeout_work)) {
+ if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
+ &io_req->req_flags)) {
+ /* Handle eh_abort timeout */
+ BNX2FC_IO_DBG(io_req, "eh_abort for IO "
+ "in retire_q\n");
+ if (io_req->wait_for_comp)
+ complete(&io_req->tm_done);
+ }
kref_put(&io_req->refcount, bnx2fc_cmd_release);
+ }
clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
}
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dc0a08e..f2db5fe 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -267,7 +267,13 @@ struct bnx2i_cmd_request {
* task statistics for write response
*/
struct bnx2i_write_resp_task_stat {
- u32 num_data_ins;
+#if defined(__BIG_ENDIAN)
+ u16 num_r2ts;
+ u16 num_data_outs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_data_outs;
+ u16 num_r2ts;
+#endif
};
/*
@@ -275,11 +281,11 @@ struct bnx2i_write_resp_task_stat {
*/
struct bnx2i_read_resp_task_stat {
#if defined(__BIG_ENDIAN)
- u16 num_data_outs;
- u16 num_r2ts;
+ u16 reserved;
+ u16 num_data_ins;
#elif defined(__LITTLE_ENDIAN)
- u16 num_r2ts;
- u16 num_data_outs;
+ u16 num_data_ins;
+ u16 reserved;
#endif
};
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0c53c28..3f9e706 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -44,6 +44,8 @@
#include "57xx_iscsi_hsi.h"
#include "57xx_iscsi_constants.h"
+#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
+
#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
#define BNX2I_MAX_ADAPTERS 8
@@ -126,6 +128,43 @@
#define REG_WR(__hba, offset, val) \
writel(val, __hba->regview + offset)
+#ifdef CONFIG_32BIT
+#define GET_STATS_64(__hba, dst, field) \
+ do { \
+ spin_lock_bh(&__hba->stat_lock); \
+ dst->field##_lo = __hba->stats.field##_lo; \
+ dst->field##_hi = __hba->stats.field##_hi; \
+ spin_unlock_bh(&__hba->stat_lock); \
+ } while (0)
+
+#define ADD_STATS_64(__hba, field, len) \
+ do { \
+ if (spin_trylock(&__hba->stat_lock)) { \
+ if (__hba->stats.field##_lo + len < \
+ __hba->stats.field##_lo) \
+ __hba->stats.field##_hi++; \
+ __hba->stats.field##_lo += len; \
+ spin_unlock(&__hba->stat_lock); \
+ } \
+ } while (0)
+
+#else
+#define GET_STATS_64(__hba, dst, field) \
+ do { \
+ u64 val, *out; \
+ \
+ val = __hba->bnx2i_stats.field; \
+ out = (u64 *)&__hba->stats.field##_lo; \
+ *out = cpu_to_le64(val); \
+ out = (u64 *)&dst->field##_lo; \
+ *out = cpu_to_le64(val); \
+ } while (0)
+
+#define ADD_STATS_64(__hba, field, len) \
+ do { \
+ __hba->bnx2i_stats.field += len; \
+ } while (0)
+#endif
/**
* struct generic_pdu_resc - login pdu resource structure
@@ -288,6 +327,15 @@ struct iscsi_cid_queue {
struct bnx2i_conn **conn_cid_tbl;
};
+
+struct bnx2i_stats_info {
+ u64 rx_pdus;
+ u64 rx_bytes;
+ u64 tx_pdus;
+ u64 tx_bytes;
+};
+
+
/**
* struct bnx2i_hba - bnx2i adapter structure
*
@@ -341,6 +389,8 @@ struct iscsi_cid_queue {
* @ctx_ccell_tasks: captures number of ccells and tasks supported by
* currently offloaded connection, used to decode
* context memory
+ * @stat_lock: spin lock used by the statistic collector (32 bit)
+ * @stats: local iSCSI statistic collection place holder
*
* Adapter Data Structure
*/
@@ -350,6 +400,7 @@ struct bnx2i_hba {
struct pci_dev *pcidev;
struct net_device *netdev;
void __iomem *regview;
+ resource_size_t reg_base;
u32 age;
unsigned long cnic_dev_type;
@@ -426,6 +477,12 @@ struct bnx2i_hba {
u32 num_sess_opened;
u32 num_conn_opened;
unsigned int ctx_ccell_tasks;
+
+#ifdef CONFIG_32BIT
+ spinlock_t stat_lock;
+#endif
+ struct bnx2i_stats_info bnx2i_stats;
+ struct iscsi_stats_info stats;
};
@@ -749,6 +806,8 @@ extern void bnx2i_ulp_init(struct cnic_dev *dev);
extern void bnx2i_ulp_exit(struct cnic_dev *dev);
extern void bnx2i_start(void *handle);
extern void bnx2i_stop(void *handle);
+extern int bnx2i_get_stats(void *handle);
+
extern struct bnx2i_hba *get_adapter_list_head(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index ece47e5..33d6630 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1350,6 +1350,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct bnx2i_cmd_response *resp_cqe;
struct bnx2i_cmd *bnx2i_cmd;
struct iscsi_task *task;
@@ -1367,16 +1368,26 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
conn->datain_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_data_outs;
+ resp_cqe->task_stat.read_stat.num_data_ins;
conn->rxdata_octets +=
bnx2i_cmd->req.total_data_transfer_length;
+ ADD_STATS_64(hba, rx_pdus,
+ resp_cqe->task_stat.read_stat.num_data_ins);
+ ADD_STATS_64(hba, rx_bytes,
+ bnx2i_cmd->req.total_data_transfer_length);
} else {
conn->dataout_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_data_outs;
+ resp_cqe->task_stat.write_stat.num_data_outs;
conn->r2t_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_r2ts;
+ resp_cqe->task_stat.write_stat.num_r2ts;
conn->txdata_octets +=
bnx2i_cmd->req.total_data_transfer_length;
+ ADD_STATS_64(hba, tx_pdus,
+ resp_cqe->task_stat.write_stat.num_data_outs);
+ ADD_STATS_64(hba, tx_bytes,
+ bnx2i_cmd->req.total_data_transfer_length);
+ ADD_STATS_64(hba, rx_pdus,
+ resp_cqe->task_stat.write_stat.num_r2ts);
}
bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
@@ -1961,6 +1972,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct qp_info *qp;
struct bnx2i_nop_in_msg *nopin;
int tgt_async_msg;
@@ -1973,7 +1985,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (!qp->cq_virt) {
printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
- bnx2i_conn->hba->netdev->name);
+ hba->netdev->name);
goto out;
}
while (1) {
@@ -1985,9 +1997,9 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
- "NOP-In detected for suspended "
- "connection dev=%s!\n",
- bnx2i_conn->hba->netdev->name);
+ "NOP-In detected for suspended "
+ "connection dev=%s!\n",
+ hba->netdev->name);
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
goto cqe_out;
}
@@ -2001,7 +2013,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
/* Run the kthread engine only for data cmds
All other cmds will be completed in this bh! */
bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
- break;
+ goto done;
case ISCSI_OP_LOGIN_RSP:
bnx2i_process_login_resp(session, bnx2i_conn,
qp->cq_cons_qe);
@@ -2044,11 +2056,15 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
nopin->op_code);
}
+
+ ADD_STATS_64(hba, rx_pdus, 1);
+ ADD_STATS_64(hba, rx_bytes, nopin->data_length);
+done:
if (!tgt_async_msg) {
if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
printk(KERN_ALERT "bnx2i (%s): no active cmd! "
"op 0x%x\n",
- bnx2i_conn->hba->netdev->name,
+ hba->netdev->name,
nopin->op_code);
else
atomic_dec(&bnx2i_conn->ep->num_active_cmds);
@@ -2692,6 +2708,7 @@ struct cnic_ulp_ops bnx2i_cnic_cb = {
.cm_remote_close = bnx2i_cm_remote_close,
.cm_remote_abort = bnx2i_cm_remote_abort,
.iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+ .cnic_get_stats = bnx2i_get_stats,
.owner = THIS_MODULE
};
@@ -2724,7 +2741,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
goto arm_cq;
}
- reg_base = ep->hba->netdev->base_addr;
if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
(ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2756,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
/* 5709 device in normal node and 5706/5708 devices */
reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
- ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+ ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
MB_KERNEL_CTX_SIZE);
if (!ep->qp.ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 8b68167..b17637a 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -381,6 +381,46 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
/**
+ * bnx2i_get_stats - Retrieve various statistic from iSCSI offload
+ * @handle: bnx2i_hba
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * retrieve various iSCSI offload related statistics.
+ */
+int bnx2i_get_stats(void *handle)
+{
+ struct bnx2i_hba *hba = handle;
+ struct iscsi_stats_info *stats;
+
+ if (!hba)
+ return -EINVAL;
+
+ stats = (struct iscsi_stats_info *)hba->cnic->stats_addr;
+
+ if (!stats)
+ return -ENOMEM;
+
+ strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version));
+ memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN);
+
+ stats->max_frame_size = hba->netdev->mtu;
+ stats->txq_size = hba->max_sqes;
+ stats->rxq_size = hba->max_cqes;
+
+ stats->txq_avg_depth = 0;
+ stats->rxq_avg_depth = 0;
+
+ GET_STATS_64(hba, stats, rx_pdus);
+ GET_STATS_64(hba, stats, rx_bytes);
+
+ GET_STATS_64(hba, stats, tx_pdus);
+ GET_STATS_64(hba, stats, tx_bytes);
+
+ return 0;
+}
+
+
+/**
* bnx2i_percpu_thread_create - Create a receive thread for an
* online CPU
*
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f8d516b..3b34c13 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
bnx2i_identify_device(hba);
bnx2i_setup_host_queue_size(hba, shost);
+ hba->reg_base = pci_resource_start(hba->pcidev, 0);
if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
- hba->regview = ioremap_nocache(hba->netdev->base_addr,
- BNX2_MQ_CONFIG2);
+ hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
if (!hba->regview)
goto ioreg_map_err;
} else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
- hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+ hba->regview = pci_iomap(hba->pcidev, 0, 4096);
if (!hba->regview)
goto ioreg_map_err;
}
@@ -874,6 +874,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
hba->conn_ctx_destroy_tmo = 2 * HZ;
}
+#ifdef CONFIG_32BIT
+ spin_lock_init(&hba->stat_lock);
+#endif
+ memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
+
if (iscsi_host_add(shost, &hba->pcidev->dev))
goto free_dump_mem;
return hba;
@@ -884,7 +889,7 @@ cid_que_err:
bnx2i_free_mp_bdt(hba);
mp_bdt_mem_err:
if (hba->regview) {
- iounmap(hba->regview);
+ pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
ioreg_map_err:
@@ -910,7 +915,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
pci_dev_put(hba->pcidev);
if (hba->regview) {
- iounmap(hba->regview);
+ pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
bnx2i_free_mp_bdt(hba);
@@ -1181,12 +1186,18 @@ static int
bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct bnx2i_cmd *cmd = task->dd_data;
memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
bnx2i_setup_cmd_wqe_template(cmd);
bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+
+ /* Tx PDU/data length count */
+ ADD_STATS_64(hba, tx_pdus, 1);
+ ADD_STATS_64(hba, tx_bytes, task->data_count);
+
if (task->data_count) {
memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
task->data_count);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 36739da..49692a1 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->saddr.sin_addr.s_addr = chba->ipv4addr;
csk->rss_qid = 0;
- csk->l2t = t3_l2t_get(t3dev, dst, ndev);
+ csk->l2t = t3_l2t_get(t3dev, dst, ndev,
+ &csk->daddr.sin_addr.s_addr);
if (!csk->l2t) {
pr_err("NO l2t available.\n");
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a4a3bf..cc9a068 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- n = dst_get_neighbour_noref(csk->dst);
+ n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
if (!n) {
pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
goto rel_resource;
@@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
+ neigh_release(n);
return 0;
rel_resource:
+ if (n)
+ neigh_release(n);
if (skb)
__kfree_skb(skb);
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d9253db..b44c1cf 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
goto err_out;
}
dst = &rt->dst;
- n = dst_get_neighbour_noref(dst);
+ n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
if (!n) {
err = -ENODEV;
goto rel_rt;
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
&daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
ndev->name);
err = -ENETUNREACH;
- goto rel_rt;
+ goto rel_neigh;
}
if (ndev->flags & IFF_LOOPBACK) {
@@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
&daddr->sin_addr.s_addr, ndev->name);
err = -ENETUNREACH;
- goto rel_rt;
+ goto rel_neigh;
}
log_debug(1 << CXGBI_DBG_SOCK,
"route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
@@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk = cxgbi_sock_create(cdev);
if (!csk) {
err = -ENOMEM;
- goto rel_rt;
+ goto rel_neigh;
}
csk->cdev = cdev;
csk->port_id = port;
@@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk->daddr.sin_port = daddr->sin_port;
csk->daddr.sin_family = daddr->sin_family;
csk->saddr.sin_addr.s_addr = fl4.saddr;
+ neigh_release(n);
return csk;
+rel_neigh:
+ neigh_release(n);
+
rel_rt:
ip_rt_put(rt);
if (csk)
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index f6d37d0..aed0f5d 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FCOE) += fcoe.o
obj-$(CONFIG_LIBFCOE) += libfcoe.o
-libfcoe-objs := fcoe_ctlr.o fcoe_transport.o
+libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 76e3d0b..fe30b1b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -41,6 +41,7 @@
#include <scsi/fc/fc_encaps.h>
#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
+
+static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
+ .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
+
+ .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
+ .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
+};
static struct libfc_function_template fcoe_libfc_fcn_templ = {
.frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
static int fcoe_interface_setup(struct fcoe_interface *fcoe,
struct net_device *netdev)
{
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct netdev_hw_addr *ha;
struct net_device *real_dev;
u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
enum fip_state fip_mode)
{
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
+ int size;
int err;
if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
goto out;
}
- fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
- if (!fcoe) {
- FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
+ size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
+ ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
+ size);
+ if (!ctlr_dev) {
+ FCOE_DBG("Failed to add fcoe_ctlr_device\n");
fcoe = ERR_PTR(-ENOMEM);
goto out_putmod;
}
+ ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ fcoe = fcoe_ctlr_priv(ctlr);
+
dev_hold(netdev);
/*
* Initialize FIP.
*/
- fcoe_ctlr_init(&fcoe->ctlr, fip_mode);
- fcoe->ctlr.send = fcoe_fip_send;
- fcoe->ctlr.update_mac = fcoe_update_src_mac;
- fcoe->ctlr.get_src_addr = fcoe_get_src_mac;
+ fcoe_ctlr_init(ctlr, fip_mode);
+ ctlr->send = fcoe_fip_send;
+ ctlr->update_mac = fcoe_update_src_mac;
+ ctlr->get_src_addr = fcoe_get_src_mac;
err = fcoe_interface_setup(fcoe, netdev);
if (err) {
- fcoe_ctlr_destroy(&fcoe->ctlr);
- kfree(fcoe);
+ fcoe_ctlr_destroy(ctlr);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
fcoe = ERR_PTR(err);
goto out_putmod;
@@ -419,7 +443,7 @@ out:
static void fcoe_interface_remove(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
u8 flogi_maddr[ETH_ALEN];
const struct net_device_ops *ops;
@@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
rtnl_lock();
if (!fcoe->removed)
@@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
/* Release the self-reference taken during fcoe_interface_create() */
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
- scsi_host_put(fcoe->ctlr.lp->host);
- kfree(fcoe);
+ scsi_host_put(fip->lp->host);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
}
@@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
struct net_device *orig_dev)
{
struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
- fcoe_ctlr_recv(&fcoe->ctlr, skb);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_recv(ctlr, skb);
return 0;
}
@@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
u32 mfs;
u64 wwnn, wwpn;
struct fcoe_interface *fcoe;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
/* Setup lport private data to point to fcoe softc */
port = lport_priv(lport);
fcoe = port->priv;
+ ctlr = fcoe_to_ctlr(fcoe);
/*
* Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
- wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0);
+ wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
fc_set_wwnn(lport, wwnn);
if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
- wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr,
+ wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
2, 0);
fc_set_wwpn(lport, wwpn);
}
@@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
struct device *parent, int npiv)
{
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
struct net_device *netdev = fcoe->netdev;
struct fc_lport *lport, *n_port;
struct fcoe_port *port;
@@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
}
/* Initialize the library */
- rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1);
+ rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
if (rc) {
FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
"interface\n");
@@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
{
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
unsigned int cpu;
fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
if (unlikely(!lport)) {
FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
goto err2;
@@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
eh = eth_hdr(skb);
- if (is_fip_mode(&fcoe->ctlr) &&
- compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
+ if (is_fip_mode(ctlr) &&
+ compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
eh->h_source);
goto err;
@@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
unsigned int elen; /* eth header, may include vlan */
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
u8 sof, eof;
struct fcoe_hdr *hp;
@@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
- fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb))
+ fcoe_ctlr_els_send(ctlr, lport, skb))
return 0;
sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
/* fill up mac and fcoe headers */
eh = eth_hdr(skb);
eh->h_proto = htons(ETH_P_FCOE);
- memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
- if (fcoe->ctlr.map_dest)
+ memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
+ if (ctlr->map_dest)
memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
- if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
- memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
+ if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
+ memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
else
memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
@@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fc_frame *fp)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
- if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
+ ctlr = fcoe_to_ctlr(fcoe);
+ if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
return -EINVAL;
@@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
struct dcb_app_type *entry = ptr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct net_device *netdev;
struct fcoe_port *port;
@@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
if (!fcoe)
return NOTIFY_OK;
+ ctlr = fcoe_to_ctlr(fcoe);
+
if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
prio = ffs(entry->app.priority) - 1;
else
@@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
if (entry->app.protocol == ETH_P_FIP ||
entry->app.protocol == ETH_P_FCOE)
- fcoe->ctlr.priority = prio;
+ ctlr->priority = prio;
if (entry->app.protocol == ETH_P_FCOE) {
- port = lport_priv(fcoe->ctlr.lp);
+ port = lport_priv(ctlr->lp);
port->priority = prio;
}
@@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
{
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fcoe_port *port;
struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
list_for_each_entry(fcoe, &fcoe_hostlist, list) {
if (fcoe->netdev == netdev) {
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
break;
}
}
@@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
break;
case NETDEV_UNREGISTER:
list_del(&fcoe->list);
- port = lport_priv(fcoe->ctlr.lp);
+ port = lport_priv(ctlr->lp);
queue_work(fcoe_wq, &port->destroy_work);
goto out;
break;
@@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
fcoe_link_speed_update(lport);
if (link_possible && !fcoe_link_ok(lport))
- fcoe_ctlr_link_up(&fcoe->ctlr);
- else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
+ fcoe_ctlr_link_up(ctlr);
+ else if (fcoe_ctlr_link_down(ctlr)) {
stats = per_cpu_ptr(lport->dev_stats, get_cpu());
stats->LinkFailureCount++;
put_cpu();
@@ -2003,6 +2043,7 @@ out:
*/
static int fcoe_disable(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int rc = 0;
@@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
rtnl_unlock();
if (fcoe) {
- fcoe_ctlr_link_down(&fcoe->ctlr);
- fcoe_clean_pending_queue(fcoe->ctlr.lp);
+ ctlr = fcoe_to_ctlr(fcoe);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
} else
rc = -ENODEV;
@@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
*/
static int fcoe_enable(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
int rc = 0;
@@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
fcoe = fcoe_hostlist_lookup_port(netdev);
rtnl_unlock();
- if (!fcoe)
+ if (!fcoe) {
rc = -ENODEV;
- else if (!fcoe_link_ok(fcoe->ctlr.lp))
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ goto out;
+ }
+
+ ctlr = fcoe_to_ctlr(fcoe);
+
+ if (!fcoe_link_ok(ctlr->lp))
+ fcoe_ctlr_link_up(ctlr);
+out:
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
*/
static int fcoe_destroy(struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
struct fcoe_port *port;
@@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
rc = -ENODEV;
goto out_nodev;
}
- lport = fcoe->ctlr.lp;
+ ctlr = fcoe_to_ctlr(fcoe);
+ lport = ctlr->lp;
port = lport_priv(lport);
list_del(&fcoe->list);
queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
int dcbx;
u8 fup, up;
struct net_device *netdev = fcoe->realdev;
- struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct fcoe_port *port = lport_priv(ctlr->lp);
struct dcb_app app = {
.priority = 0,
.protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
}
port->priority = ffs(up) ? ffs(up) - 1 : 0;
- fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+ ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
}
#endif
}
@@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
{
int rc = 0;
+ struct fcoe_ctlr_device *ctlr_dev;
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fc_lport *lport;
@@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
goto out_nodev;
}
- lport = fcoe_if_create(fcoe, &netdev->dev, 0);
+ ctlr = fcoe_to_ctlr(fcoe);
+ ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
+ lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
if (IS_ERR(lport)) {
printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
netdev->name);
@@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
}
/* Make this the "master" N_Port */
- fcoe->ctlr.lp = lport;
+ ctlr->lp = lport;
/* setup DCB priority attributes. */
fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
fc_fabric_login(lport);
if (!fcoe_link_ok(lport)) {
rtnl_unlock();
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ fcoe_ctlr_link_up(ctlr);
mutex_unlock(&fcoe_config_mutex);
return rc;
}
@@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
- fcoe_ctlr_link_down(&fcoe->ctlr);
- fcoe_clean_pending_queue(fcoe->ctlr.lp);
- if (!fcoe_link_ok(fcoe->ctlr.lp))
- fcoe_ctlr_link_up(&fcoe->ctlr);
+ fcoe_ctlr_link_down(ctlr);
+ fcoe_clean_pending_queue(ctlr->lp);
+ if (!fcoe_link_ok(ctlr->lp))
+ fcoe_ctlr_link_up(ctlr);
return 0;
}
@@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
*/
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
{
+ struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
fcoe = fcoe_hostlist_lookup_port(netdev);
- return (fcoe) ? fcoe->ctlr.lp : NULL;
+ ctlr = fcoe_to_ctlr(fcoe);
+ return (fcoe) ? ctlr->lp : NULL;
}
/**
@@ -2466,6 +2525,7 @@ module_init(fcoe_init);
static void __exit fcoe_exit(void)
{
struct fcoe_interface *fcoe, *tmp;
+ struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
unsigned int cpu;
@@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
list_del(&fcoe->list);
- port = lport_priv(fcoe->ctlr.lp);
+ ctlr = fcoe_to_ctlr(fcoe);
+ port = lport_priv(ctlr->lp);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
- struct fcoe_ctlr *fip = &fcoe->ctlr;
+ struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
struct fc_frame_header *fh = fc_frame_header_get(fp);
switch (op) {
@@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
__fcoe_get_lesb(lport, fc_lesb, netdev);
}
+static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = fcoe_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+
+static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev =
+ fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
+
+ fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
+}
+
/**
* fcoe_set_port_id() - Callback from libfc when Port_ID is set.
* @lport: the local port
@@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
{
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
+ struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
- fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp);
+ fcoe_ctlr_recv_flogi(ctlr, lport, fp);
}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 96ac938..a624add4 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -68,7 +68,6 @@ do { \
* @netdev: The associated net device
* @fcoe_packet_type: FCoE packet type
* @fip_packet_type: FIP packet type
- * @ctlr: The FCoE controller (for FIP)
* @oem: The offload exchange manager for all local port
* instances associated with this port
* @removed: Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@ struct fcoe_interface {
struct net_device *realdev;
struct packet_type fcoe_packet_type;
struct packet_type fip_packet_type;
- struct fcoe_ctlr ctlr;
struct fc_exch_mgr *oem;
u8 removed;
};
-#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
+#define fcoe_to_ctlr(x) \
+ (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
+
+#define fcoe_from_ctlr(x) \
+ ((struct fcoe_interface *)((x) + 1))
/**
* fcoe_netdev() - Return the net device associated with a local port
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5a4c725..d68d572 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
}
EXPORT_SYMBOL(fcoe_ctlr_init);
+static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
+{
+ struct fcoe_ctlr *fip = new->fip;
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ struct fcoe_fcf_device temp, *fcf_dev;
+ int rc = 0;
+
+ LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
+ new->fabric_name, new->fcf_mac);
+
+ mutex_lock(&ctlr_dev->lock);
+
+ temp.fabric_name = new->fabric_name;
+ temp.switch_name = new->switch_name;
+ temp.fc_map = new->fc_map;
+ temp.vfid = new->vfid;
+ memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
+ temp.priority = new->pri;
+ temp.fka_period = new->fka_period;
+ temp.selected = 0; /* default to unselected */
+
+ fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
+ if (unlikely(!fcf_dev)) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * The fcoe_sysfs layer can return a CONNECTED fcf that
+ * has a priv (fcf was never deleted) or a CONNECTED fcf
+ * that doesn't have a priv (fcf was deleted). However,
+ * libfcoe will always delete FCFs before trying to add
+ * them. This is ensured because both recv_adv and
+ * age_fcfs are protected by the the fcoe_ctlr's mutex.
+ * This means that we should never get a FCF with a
+ * non-NULL priv pointer.
+ */
+ BUG_ON(fcf_dev->priv);
+
+ fcf_dev->priv = new;
+ new->fcf_dev = fcf_dev;
+
+ list_add(&new->list, &fip->fcfs);
+ fip->fcf_count++;
+
+out:
+ mutex_unlock(&ctlr_dev->lock);
+ return rc;
+}
+
+static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
+{
+ struct fcoe_ctlr *fip = new->fip;
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
+ struct fcoe_fcf_device *fcf_dev;
+
+ list_del(&new->list);
+ fip->fcf_count--;
+
+ mutex_lock(&ctlr_dev->lock);
+
+ fcf_dev = fcoe_fcf_to_fcf_dev(new);
+ WARN_ON(!fcf_dev);
+ new->fcf_dev = NULL;
+ fcoe_fcf_device_delete(fcf_dev);
+ kfree(new);
+
+ mutex_unlock(&ctlr_dev->lock);
+}
+
/**
* fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
* @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
fip->sel_fcf = NULL;
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
- list_del(&fcf->list);
- kfree(fcf);
+ fcoe_sysfs_fcf_del(fcf);
}
- fip->fcf_count = 0;
+ WARN_ON(fip->fcf_count);
+
fip->sel_time = 0;
}
@@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
unsigned long deadline;
unsigned long sel_time = 0;
+ struct list_head del_list;
struct fcoe_dev_stats *stats;
+ INIT_LIST_HEAD(&del_list);
+
stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
if (time_after_eq(jiffies, deadline)) {
if (fip->sel_fcf == fcf)
fip->sel_fcf = NULL;
+ /*
+ * Move to delete list so we can call
+ * fcoe_sysfs_fcf_del (which can sleep)
+ * after the put_cpu().
+ */
list_del(&fcf->list);
- WARN_ON(!fip->fcf_count);
- fip->fcf_count--;
- kfree(fcf);
+ list_add(&fcf->list, &del_list);
stats->VLinkFailureCount++;
} else {
if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
}
}
put_cpu();
+
+ list_for_each_entry_safe(fcf, next, &del_list, list) {
+ /* Removes fcf from current list */
+ fcoe_sysfs_fcf_del(fcf);
+ }
+
if (sel_time && !fip->sel_fcf && !fip->sel_time) {
sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
fip->sel_time = sel_time;
@@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fcoe_fcf *fcf;
struct fcoe_fcf new;
- struct fcoe_fcf *found;
unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
int first = 0;
int mtu_valid;
+ int found = 0;
+ int rc = 0;
if (fcoe_ctlr_parse_adv(fip, skb, &new))
return;
mutex_lock(&fip->ctlr_mutex);
first = list_empty(&fip->fcfs);
- found = NULL;
list_for_each_entry(fcf, &fip->fcfs, list) {
if (fcf->switch_name == new.switch_name &&
fcf->fabric_name == new.fabric_name &&
fcf->fc_map == new.fc_map &&
compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
- found = fcf;
+ found = 1;
break;
}
}
@@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
if (!fcf)
goto out;
- fip->fcf_count++;
memcpy(fcf, &new, sizeof(new));
- list_add(&fcf->list, &fip->fcfs);
+ fcf->fip = fip;
+ rc = fcoe_sysfs_fcf_add(fcf);
+ if (rc) {
+ printk(KERN_ERR "Failed to allocate sysfs instance "
+ "for FCF, fab %16.16llx mac %pM\n",
+ new.fabric_name, new.fcf_mac);
+ kfree(fcf);
+ goto out;
+ }
} else {
/*
* Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
fcf->fka_period = new.fka_period;
memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
}
+
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies;
if (!found)
@@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
time_before(fip->sel_time, fip->timer.expires))
mod_timer(&fip->timer, fip->sel_time);
}
+
out:
mutex_unlock(&fip->ctlr_mutex);
}
@@ -2718,9 +2809,9 @@ unlock:
/**
* fcoe_libfc_config() - Sets up libfc related properties for local port
- * @lp: The local port to configure libfc for
- * @fip: The FCoE controller in use by the local port
- * @tt: The libfc function template
+ * @lport: The local port to configure libfc for
+ * @fip: The FCoE controller in use by the local port
+ * @tt: The libfc function template
* @init_fcp: If non-zero, the FCP portion of libfc should be initialized
*
* Returns : 0 for success
@@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_libfc_config);
+
+void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
+{
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct fcoe_fcf *fcf;
+
+ mutex_lock(&fip->ctlr_mutex);
+ mutex_lock(&ctlr_dev->lock);
+
+ fcf = fcoe_fcf_device_priv(fcf_dev);
+ if (fcf)
+ fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
+ else
+ fcf_dev->selected = 0;
+
+ mutex_unlock(&ctlr_dev->lock);
+ mutex_unlock(&fip->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_fcf_get_selected);
+
+void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
+
+ mutex_lock(&ctlr->ctlr_mutex);
+ switch (ctlr->mode) {
+ case FIP_MODE_FABRIC:
+ ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
+ break;
+ case FIP_MODE_VN2VN:
+ ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+ break;
+ default:
+ ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+ break;
+ }
+ mutex_unlock(&ctlr->ctlr_mutex);
+}
+EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644
index 0000000..2bc1631
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -0,0 +1,832 @@
+/*
+ * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/etherdevice.h>
+
+#include <scsi/fcoe_sysfs.h>
+
+static atomic_t ctlr_num;
+static atomic_t fcf_num;
+
+/*
+ * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
+ * should insulate the loss of a fcf.
+ */
+static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
+
+module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
+ uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fcf_dev_loss_tmo,
+ "Maximum number of seconds that libfcoe should"
+ " insulate the loss of a fcf. Once this value is"
+ " exceeded, the fcf is removed.");
+
+/*
+ * These are used by the fcoe_*_show_function routines, they
+ * are intentionally placed in the .c file as they're not intended
+ * for use throughout the code.
+ */
+#define fcoe_ctlr_id(x) \
+ ((x)->id)
+#define fcoe_ctlr_work_q_name(x) \
+ ((x)->work_q_name)
+#define fcoe_ctlr_work_q(x) \
+ ((x)->work_q)
+#define fcoe_ctlr_devloss_work_q_name(x) \
+ ((x)->devloss_work_q_name)
+#define fcoe_ctlr_devloss_work_q(x) \
+ ((x)->devloss_work_q)
+#define fcoe_ctlr_mode(x) \
+ ((x)->mode)
+#define fcoe_ctlr_fcf_dev_loss_tmo(x) \
+ ((x)->fcf_dev_loss_tmo)
+#define fcoe_ctlr_link_fail(x) \
+ ((x)->lesb.lesb_link_fail)
+#define fcoe_ctlr_vlink_fail(x) \
+ ((x)->lesb.lesb_vlink_fail)
+#define fcoe_ctlr_miss_fka(x) \
+ ((x)->lesb.lesb_miss_fka)
+#define fcoe_ctlr_symb_err(x) \
+ ((x)->lesb.lesb_symb_err)
+#define fcoe_ctlr_err_block(x) \
+ ((x)->lesb.lesb_err_block)
+#define fcoe_ctlr_fcs_error(x) \
+ ((x)->lesb.lesb_fcs_error)
+#define fcoe_fcf_state(x) \
+ ((x)->state)
+#define fcoe_fcf_fabric_name(x) \
+ ((x)->fabric_name)
+#define fcoe_fcf_switch_name(x) \
+ ((x)->switch_name)
+#define fcoe_fcf_fc_map(x) \
+ ((x)->fc_map)
+#define fcoe_fcf_vfid(x) \
+ ((x)->vfid)
+#define fcoe_fcf_mac(x) \
+ ((x)->mac)
+#define fcoe_fcf_priority(x) \
+ ((x)->priority)
+#define fcoe_fcf_fka_period(x) \
+ ((x)->fka_period)
+#define fcoe_fcf_dev_loss_tmo(x) \
+ ((x)->dev_loss_tmo)
+#define fcoe_fcf_selected(x) \
+ ((x)->selected)
+#define fcoe_fcf_vlan_id(x) \
+ ((x)->vlan_id)
+
+/*
+ * dev_loss_tmo attribute
+ */
+static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
+{
+ int ret;
+
+ ret = kstrtoul(buf, 0, val);
+ if (ret || *val < 0)
+ return -EINVAL;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (*val > UINT_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
+ unsigned long val)
+{
+ if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
+ (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
+ (fcf->state == FCOE_FCF_STATE_DELETED))
+ return -EBUSY;
+ /*
+ * Check for overflow; dev_loss_tmo is u32
+ */
+ if (val > UINT_MAX)
+ return -EINVAL;
+
+ fcoe_fcf_dev_loss_tmo(fcf) = val;
+ return 0;
+}
+
+#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
+struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+#define fcoe_ctlr_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
+ if (ctlr->f->get_fcoe_ctlr_##field) \
+ ctlr->f->get_fcoe_ctlr_##field(ctlr); \
+ return snprintf(buf, sz, format_string, \
+ cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
+ if (ctlr->f->get_fcoe_fcf_##field) \
+ ctlr->f->get_fcoe_fcf_##field(fcf); \
+ return snprintf(buf, sz, format_string, \
+ cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
+ return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
+}
+
+#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
+static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
+ return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
+}
+
+#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
+ fcoe_ctlr_private_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_ctlr_rd_attr(field, format_string, sz) \
+ fcoe_ctlr_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_rd_attr(field, format_string, sz) \
+ fcoe_fcf_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr(field, format_string, sz) \
+ fcoe_fcf_private_show_function(field, format_string, sz, ) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
+ fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
+ static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
+ show_fcoe_ctlr_device_##field, NULL)
+
+#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
+ fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
+ static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
+ show_fcoe_fcf_device_##field, NULL)
+
+#define fcoe_enum_name_search(title, table_type, table) \
+static const char *get_fcoe_##title##_name(enum table_type table_key) \
+{ \
+ int i; \
+ char *name = NULL; \
+ \
+ for (i = 0; i < ARRAY_SIZE(table); i++) { \
+ if (table[i].value == table_key) { \
+ name = table[i].name; \
+ break; \
+ } \
+ } \
+ return name; \
+}
+
+static struct {
+ enum fcf_state value;
+ char *name;
+} fcf_state_names[] = {
+ { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
+ { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
+ { FCOE_FCF_STATE_CONNECTED, "Connected" },
+};
+fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
+#define FCOE_FCF_STATE_MAX_NAMELEN 50
+
+static ssize_t show_fcf_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ const char *name;
+ name = get_fcoe_fcf_state_name(fcf->state);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
+
+static struct {
+ enum fip_conn_type value;
+ char *name;
+} fip_conn_type_names[] = {
+ { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
+ { FIP_CONN_TYPE_FABRIC, "Fabric" },
+ { FIP_CONN_TYPE_VN2VN, "VN2VN" },
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+#define FCOE_CTLR_MODE_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ if (ctlr->f->get_fcoe_ctlr_mode)
+ ctlr->f->get_fcoe_ctlr_mode(ctlr);
+
+ name = get_fcoe_ctlr_mode_name(ctlr->mode);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+ "%s\n", name);
+}
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
+ show_ctlr_mode, NULL);
+
+static ssize_t
+store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ struct fcoe_fcf_device *fcf;
+ unsigned long val;
+ int rc;
+
+ rc = fcoe_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
+ mutex_lock(&ctlr->lock);
+ list_for_each_entry(fcf, &ctlr->fcfs, peers)
+ fcoe_fcf_set_dev_loss_tmo(fcf, val);
+ mutex_unlock(&ctlr->lock);
+ return count;
+}
+fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
+static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fcoe_ctlr_device_fcf_dev_loss_tmo,
+ store_private_fcoe_ctlr_fcf_dev_loss_tmo);
+
+/* Link Error Status Block (LESB) */
+fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
+fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
+fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
+fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
+fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
+
+fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
+fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
+fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
+fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
+fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
+fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
+fcoe_fcf_rd_attr(selected, "%u\n", 20);
+fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
+
+fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
+static ssize_t
+store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ unsigned long val;
+ int rc;
+
+ rc = fcoe_str_to_dev_loss(buf, &val);
+ if (rc)
+ return rc;
+
+ rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
+ if (rc)
+ return rc;
+ return count;
+}
+static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
+ show_fcoe_fcf_device_dev_loss_tmo,
+ store_fcoe_fcf_dev_loss_tmo);
+
+static struct attribute *fcoe_ctlr_lesb_attrs[] = {
+ &device_attr_fcoe_ctlr_link_fail.attr,
+ &device_attr_fcoe_ctlr_vlink_fail.attr,
+ &device_attr_fcoe_ctlr_miss_fka.attr,
+ &device_attr_fcoe_ctlr_symb_err.attr,
+ &device_attr_fcoe_ctlr_err_block.attr,
+ &device_attr_fcoe_ctlr_fcs_error.attr,
+ NULL,
+};
+
+static struct attribute_group fcoe_ctlr_lesb_attr_group = {
+ .name = "lesb",
+ .attrs = fcoe_ctlr_lesb_attrs,
+};
+
+static struct attribute *fcoe_ctlr_attrs[] = {
+ &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_mode.attr,
+ NULL,
+};
+
+static struct attribute_group fcoe_ctlr_attr_group = {
+ .attrs = fcoe_ctlr_attrs,
+};
+
+static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
+ &fcoe_ctlr_attr_group,
+ &fcoe_ctlr_lesb_attr_group,
+ NULL,
+};
+
+static struct attribute *fcoe_fcf_attrs[] = {
+ &device_attr_fcoe_fcf_fabric_name.attr,
+ &device_attr_fcoe_fcf_switch_name.attr,
+ &device_attr_fcoe_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_fcf_fc_map.attr,
+ &device_attr_fcoe_fcf_vfid.attr,
+ &device_attr_fcoe_fcf_mac.attr,
+ &device_attr_fcoe_fcf_priority.attr,
+ &device_attr_fcoe_fcf_fka_period.attr,
+ &device_attr_fcoe_fcf_state.attr,
+ &device_attr_fcoe_fcf_selected.attr,
+ &device_attr_fcoe_fcf_vlan_id.attr,
+ NULL
+};
+
+static struct attribute_group fcoe_fcf_attr_group = {
+ .attrs = fcoe_fcf_attrs,
+};
+
+static const struct attribute_group *fcoe_fcf_attr_groups[] = {
+ &fcoe_fcf_attr_group,
+ NULL,
+};
+
+struct bus_type fcoe_bus_type;
+
+static int fcoe_bus_match(struct device *dev,
+ struct device_driver *drv)
+{
+ if (dev->bus == &fcoe_bus_type)
+ return 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_device_release() - Release the FIP ctlr memory
+ * @dev: Pointer to the FIP ctlr's embedded device
+ *
+ * Called when the last FIP ctlr reference is released.
+ */
+static void fcoe_ctlr_device_release(struct device *dev)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ kfree(ctlr);
+}
+
+/**
+ * fcoe_fcf_device_release() - Release the FIP fcf memory
+ * @dev: Pointer to the fcf's embedded device
+ *
+ * Called when the last FIP fcf reference is released.
+ */
+static void fcoe_fcf_device_release(struct device *dev)
+{
+ struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
+ kfree(fcf);
+}
+
+struct device_type fcoe_ctlr_device_type = {
+ .name = "fcoe_ctlr",
+ .groups = fcoe_ctlr_attr_groups,
+ .release = fcoe_ctlr_device_release,
+};
+
+struct device_type fcoe_fcf_device_type = {
+ .name = "fcoe_fcf",
+ .groups = fcoe_fcf_attr_groups,
+ .release = fcoe_fcf_device_release,
+};
+
+struct bus_type fcoe_bus_type = {
+ .name = "fcoe",
+ .match = &fcoe_bus_match,
+};
+
+/**
+ * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
+{
+ if (!fcoe_ctlr_work_q(ctlr)) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to flush work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fcoe_ctlr_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work: Work to queue for execution
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
+ struct work_struct *work)
+{
+ if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to queue work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_work(fcoe_ctlr_work_q(ctlr), work);
+}
+
+/**
+ * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
+ */
+void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
+{
+ if (!fcoe_ctlr_devloss_work_q(ctlr)) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to flush work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+ return;
+ }
+
+ flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
+}
+
+/**
+ * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
+ * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
+ * @work: Work to queue for execution
+ * @delay: jiffies to delay the work queuing
+ *
+ * Return value:
+ * 1 on success / 0 already queued / < 0 for error
+ */
+int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
+ struct delayed_work *work,
+ unsigned long delay)
+{
+ if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
+ printk(KERN_ERR
+ "ERROR: FIP Ctlr '%d' attempted to queue work, "
+ "when no workqueue created.\n", ctlr->id);
+ dump_stack();
+
+ return -EINVAL;
+ }
+
+ return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
+}
+
+static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
+ struct fcoe_fcf_device *old)
+{
+ if (new->switch_name == old->switch_name &&
+ new->fabric_name == old->fabric_name &&
+ new->fc_map == old->fc_map &&
+ compare_ether_addr(new->mac, old->mac) == 0)
+ return 1;
+ return 0;
+}
+
+/**
+ * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
+ * @parent: The parent device to which the fcoe_ctlr instance
+ * should be attached
+ * @f: The LLD's FCoE sysfs function template pointer
+ * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
+ *
+ * This routine allocates a FIP ctlr object with some additional memory
+ * for the LLD. The FIP ctlr is initialized, added to sysfs and then
+ * attributes are added to it.
+ */
+struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
+ struct fcoe_sysfs_function_template *f,
+ int priv_size)
+{
+ struct fcoe_ctlr_device *ctlr;
+ int error = 0;
+
+ ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
+ GFP_KERNEL);
+ if (!ctlr)
+ goto out;
+
+ ctlr->id = atomic_inc_return(&ctlr_num) - 1;
+ ctlr->f = f;
+ INIT_LIST_HEAD(&ctlr->fcfs);
+ mutex_init(&ctlr->lock);
+ ctlr->dev.parent = parent;
+ ctlr->dev.bus = &fcoe_bus_type;
+ ctlr->dev.type = &fcoe_ctlr_device_type;
+
+ ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
+
+ snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
+ "ctlr_wq_%d", ctlr->id);
+ ctlr->work_q = create_singlethread_workqueue(
+ ctlr->work_q_name);
+ if (!ctlr->work_q)
+ goto out_del;
+
+ snprintf(ctlr->devloss_work_q_name,
+ sizeof(ctlr->devloss_work_q_name),
+ "ctlr_dl_wq_%d", ctlr->id);
+ ctlr->devloss_work_q = create_singlethread_workqueue(
+ ctlr->devloss_work_q_name);
+ if (!ctlr->devloss_work_q)
+ goto out_del_q;
+
+ dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
+ error = device_register(&ctlr->dev);
+ if (error)
+ goto out_del_q2;
+
+ return ctlr;
+
+out_del_q2:
+ destroy_workqueue(ctlr->devloss_work_q);
+ ctlr->devloss_work_q = NULL;
+out_del_q:
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+out_del:
+ kfree(ctlr);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
+
+/**
+ * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
+ * @ctlr: A pointer to the ctlr to be deleted
+ *
+ * Deletes a FIP ctlr and any fcfs attached
+ * to it. Deleting fcfs will cause their childen
+ * to be deleted as well.
+ *
+ * The ctlr is detached from sysfs and it's resources
+ * are freed (work q), but the memory is not freed
+ * until its last reference is released.
+ *
+ * This routine expects no locks to be held before
+ * calling.
+ *
+ * TODO: Currently there are no callbacks to clean up LLD data
+ * for a fcoe_fcf_device. LLDs must keep this in mind as they need
+ * to clean up each of their LLD data for all fcoe_fcf_device before
+ * calling fcoe_ctlr_device_delete.
+ */
+void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
+{
+ struct fcoe_fcf_device *fcf, *next;
+ /* Remove any attached fcfs */
+ mutex_lock(&ctlr->lock);
+ list_for_each_entry_safe(fcf, next,
+ &ctlr->fcfs, peers) {
+ list_del(&fcf->peers);
+ fcf->state = FCOE_FCF_STATE_DELETED;
+ fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+ }
+ mutex_unlock(&ctlr->lock);
+
+ fcoe_ctlr_device_flush_work(ctlr);
+
+ destroy_workqueue(ctlr->devloss_work_q);
+ ctlr->devloss_work_q = NULL;
+ destroy_workqueue(ctlr->work_q);
+ ctlr->work_q = NULL;
+
+ device_unregister(&ctlr->dev);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
+
+/**
+ * fcoe_fcf_device_final_delete() - Final delete routine
+ * @work: The FIP fcf's embedded work struct
+ *
+ * It is expected that the fcf has been removed from
+ * the FIP ctlr's list before calling this routine.
+ */
+static void fcoe_fcf_device_final_delete(struct work_struct *work)
+{
+ struct fcoe_fcf_device *fcf =
+ container_of(work, struct fcoe_fcf_device, delete_work);
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+ /*
+ * Cancel any outstanding timers. These should really exist
+ * only when rmmod'ing the LLDD and we're asking for
+ * immediate termination of the rports
+ */
+ if (!cancel_delayed_work(&fcf->dev_loss_work))
+ fcoe_ctlr_device_flush_devloss(ctlr);
+
+ device_unregister(&fcf->dev);
+}
+
+/**
+ * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
+ * @work: The FIP fcf's embedded work struct
+ *
+ * Removes the fcf from the FIP ctlr's list of fcfs and
+ * queues the final deletion.
+ */
+static void fip_timeout_deleted_fcf(struct work_struct *work)
+{
+ struct fcoe_fcf_device *fcf =
+ container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+
+ mutex_lock(&ctlr->lock);
+
+ /*
+ * If the fcf is deleted or reconnected before the timer
+ * fires the devloss queue will be flushed, but the state will
+ * either be CONNECTED or DELETED. If that is the case we
+ * cancel deleting the fcf.
+ */
+ if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
+ goto out;
+
+ dev_printk(KERN_ERR, &fcf->dev,
+ "FIP fcf connection time out: removing fcf\n");
+
+ list_del(&fcf->peers);
+ fcf->state = FCOE_FCF_STATE_DELETED;
+ fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
+
+out:
+ mutex_unlock(&ctlr->lock);
+}
+
+/**
+ * fcoe_fcf_device_delete() - Delete a FIP fcf
+ * @fcf: Pointer to the fcf which is to be deleted
+ *
+ * Queues the FIP fcf on the devloss workqueue
+ *
+ * Expects the ctlr_attrs mutex to be held for fcf
+ * state change.
+ */
+void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
+{
+ struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
+ int timeout = fcf->dev_loss_tmo;
+
+ if (fcf->state != FCOE_FCF_STATE_CONNECTED)
+ return;
+
+ fcf->state = FCOE_FCF_STATE_DISCONNECTED;
+
+ /*
+ * FCF will only be re-connected by the LLD calling
+ * fcoe_fcf_device_add, and it should be setting up
+ * priv then.
+ */
+ fcf->priv = NULL;
+
+ fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
+ timeout * HZ);
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
+
+/**
+ * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
+ * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
+ * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
+ *
+ * Expects to be called with the ctlr->lock held
+ */
+struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
+ struct fcoe_fcf_device *new_fcf)
+{
+ struct fcoe_fcf_device *fcf;
+ int error = 0;
+
+ list_for_each_entry(fcf, &ctlr->fcfs, peers) {
+ if (fcoe_fcf_device_match(new_fcf, fcf)) {
+ if (fcf->state == FCOE_FCF_STATE_CONNECTED)
+ return fcf;
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+
+ if (!cancel_delayed_work(&fcf->dev_loss_work))
+ fcoe_ctlr_device_flush_devloss(ctlr);
+
+ return fcf;
+ }
+ }
+
+ fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
+ if (unlikely(!fcf))
+ goto out;
+
+ INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
+ INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
+
+ fcf->dev.parent = &ctlr->dev;
+ fcf->dev.bus = &fcoe_bus_type;
+ fcf->dev.type = &fcoe_fcf_device_type;
+ fcf->id = atomic_inc_return(&fcf_num) - 1;
+ fcf->state = FCOE_FCF_STATE_UNKNOWN;
+
+ fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
+
+ dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
+
+ fcf->fabric_name = new_fcf->fabric_name;
+ fcf->switch_name = new_fcf->switch_name;
+ fcf->fc_map = new_fcf->fc_map;
+ fcf->vfid = new_fcf->vfid;
+ memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
+ fcf->priority = new_fcf->priority;
+ fcf->fka_period = new_fcf->fka_period;
+ fcf->selected = new_fcf->selected;
+
+ error = device_register(&fcf->dev);
+ if (error)
+ goto out_del;
+
+ fcf->state = FCOE_FCF_STATE_CONNECTED;
+ list_add_tail(&fcf->peers, &ctlr->fcfs);
+
+ return fcf;
+
+out_del:
+ kfree(fcf);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
+
+int __init fcoe_sysfs_setup(void)
+{
+ int error;
+
+ atomic_set(&ctlr_num, 0);
+ atomic_set(&fcf_num, 0);
+
+ error = bus_register(&fcoe_bus_type);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+void __exit fcoe_sysfs_teardown(void)
+{
+ bus_unregister(&fcoe_bus_type);
+}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 710e149..b46f43dc 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -815,9 +815,17 @@ out_nodev:
*/
static int __init libfcoe_init(void)
{
- fcoe_transport_init();
+ int rc = 0;
- return 0;
+ rc = fcoe_transport_init();
+ if (rc)
+ return rc;
+
+ rc = fcoe_sysfs_setup();
+ if (rc)
+ fcoe_transport_exit();
+
+ return rc;
}
module_init(libfcoe_init);
@@ -826,6 +834,7 @@ module_init(libfcoe_init);
*/
static void __exit libfcoe_exit(void)
{
+ fcoe_sysfs_teardown();
fcoe_transport_exit();
}
module_exit(libfcoe_exit);
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 441d88a..d109cc3 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
- ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+ memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
} else {
- link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
if (unlikely(link->eh_info.err_mask))
qc->flags |= ATA_QCFLAG_FAILED;
}
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
qc->flags |= ATA_QCFLAG_FAILED;
}
- dev->sata_dev.tf.feature = 0x04; /* status err */
- dev->sata_dev.tf.command = ATA_ERR;
+ dev->sata_dev.fis[3] = 0x04; /* status err */
+ dev->sata_dev.fis[2] = ATA_ERR;
}
}
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct domain_device *dev = qc->ap->private_data;
- memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+ ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
return true;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6102ef2..9d46fcb 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
static inline u8
_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
{
- return ioc->cpu_msix_table[smp_processor_id()];
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
}
/**
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 6208d56..317a7fd 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -25,3 +25,12 @@ config SCSI_QLA_FC
Firmware images can be retrieved from:
ftp://ftp.qlogic.com/outgoing/linux/firmware/
+
+config TCM_QLA2XXX
+ tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
+ depends on SCSI_QLA_FC && TARGET_CORE
+ select LIBFC
+ select BTREE
+ default n
+ ---help---
+ Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 5df782f..dce7d78 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,5 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o
+ qla_nx.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
+obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5926f5a..5ab9530 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/kthread.h>
#include <linux/vmalloc.h>
@@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_block_requests(vha->host);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
if (IS_QLA82XX(ha)) {
+ ha->flags.isp82xx_no_md_cap = 1;
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
scsi_unblock_requests(vha->host);
break;
case 0x2025d:
- if (!IS_QLA81XX(ha))
+ if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
return -EPERM;
ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- struct qla_hw_data *ha = vha->hw;
return snprintf(buf, PAGE_SIZE, "%d\n",
- ha->qla_stats.total_isp_aborts);
+ vha->qla_stats.total_isp_aborts);
}
static ssize_t
@@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
dma_addr_t stats_dma;
struct fc_host_statistics *pfc_host_stat;
- pfc_host_stat = &ha->fc_host_stat;
+ pfc_host_stat = &vha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->dumped_frames = stats->dumped_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
}
- pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
- pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
+ pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
+ pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
fc_host_supported_speeds(vha->host) =
fc_host_supported_speeds(base_vha->host);
+ qlt_vport_create(vha, ha);
qla24xx_vport_disable(fc_vport, disable);
if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
- fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+ fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
+ (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
if (IS_CNA_CAPABLE(ha))
speed = FC_PORTSPEED_10GBIT;
+ else if (IS_QLA2031(ha))
+ speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
+ FC_PORTSPEED_4GBIT;
else if (IS_QLA25XX(ha))
speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index bc3cc6d..c688838 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa =
bsg_job->request->rqst_data.h_els.port_id[0];
fcport->d_id.b.area =
@@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Initialize all required fields of fcport */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
int rval = 0;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_set_internal;
new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
if ((ha->current_topology == ISP_CFG_F ||
- (atomic_read(&vha->loop_state) == LOOP_DOWN) ||
- ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
if (rval)
return rval;
+ /* Set the isp82xx_no_md_cap not to capture minidump */
+ ha->flags.isp82xx_no_md_cap = 1;
+
sg_copy_to_buffer(bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 62324a1..fdee561 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,27 +11,31 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x113e | 0x112c-0x112e |
+ * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x1140 | 0x111a-0x111b |
+ * | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2086 | 0x2020-0x2022 |
* | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
* | | | 0x302d-0x302e |
- * | DPC Thread | 0x401c | |
- * | Async Events | 0x505d | 0x502b-0x502f |
+ * | DPC Thread | 0x401c | 0x4002,0x4013 |
+ * | Async Events | 0x505f | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
- * | Timer Routines | 0x6011 | 0x600e-0x600f |
+ * | Timer Routines | 0x6011 | |
* | User Space Interactions | 0x709f | 0x7018,0x702e, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c |
* | Task Management | 0x803c | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
- * | AER/EEH | 0x900f | |
+ * | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb054 | 0xb053 |
+ * | ISP82XX Specific | 0xb054 | 0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
+ * | Target Mode | 0xe06f | |
+ * | Target Mode Management | 0xf071 | |
+ * | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
@@ -379,6 +383,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
}
static inline void *
+qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
+ uint32_t **last_chain)
+{
+ struct qla2xxx_mqueue_chain *q;
+ struct qla2xxx_mqueue_header *qh;
+ uint32_t num_queues;
+ int que;
+ struct {
+ int length;
+ void *ring;
+ } aq, *aqp;
+
+ if (!ha->tgt.atio_q_length)
+ return ptr;
+
+ num_queues = 1;
+ aqp = &aq;
+ aqp->length = ha->tgt.atio_q_length;
+ aqp->ring = ha->tgt.atio_ring;
+
+ for (que = 0; que < num_queues; que++) {
+ /* aqp = ha->atio_q_map[que]; */
+ q = ptr;
+ *last_chain = &q->type;
+ q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
+ q->chain_size = htonl(
+ sizeof(struct qla2xxx_mqueue_chain) +
+ sizeof(struct qla2xxx_mqueue_header) +
+ (aqp->length * sizeof(request_t)));
+ ptr += sizeof(struct qla2xxx_mqueue_chain);
+
+ /* Add header. */
+ qh = ptr;
+ qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
+ qh->number = htonl(que);
+ qh->size = htonl(aqp->length * sizeof(request_t));
+ ptr += sizeof(struct qla2xxx_mqueue_header);
+
+ /* Add data. */
+ memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
+
+ ptr += aqp->length * sizeof(request_t);
+ }
+
+ return ptr;
+}
+
+static inline void *
qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
{
struct qla2xxx_mqueue_chain *q;
@@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
struct qla24xx_fw_dump *fw;
uint32_t ext_mem_cnt;
void *nxt;
+ void *nxt_chain;
+ uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
qla24xx_copy_eft(ha, nxt);
+ nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
+ if (last_chain) {
+ ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
+ *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
+ }
+
+ /* Adjust valid length. */
+ ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
+
qla24xx_fw_dump_failed_0:
qla2xxx_dump_post_process(base_vha, rval);
@@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@ copy_queue:
/* Chain entries -- started with MQ. */
nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
+ nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
if (last_chain) {
ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2157bdf..f278df8 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
uint32_t queue;
#define TYPE_REQUEST_QUEUE 0x1
#define TYPE_RESPONSE_QUEUE 0x2
+#define TYPE_ATIO_QUEUE 0x3
uint32_t number;
uint32_t size;
};
@@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
* not covered by upper categories
*/
+#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
+ * This is to be used with other levels where
+ * more verbosity is required. It might not
+ * be applicable to all the levels.
+ */
+#define ql_dbg_tgt 0x00004000 /* Target mode */
+#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
+#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a244303..39007f5 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -186,6 +186,7 @@
#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
+#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
struct req_que;
@@ -1234,11 +1235,27 @@ typedef struct {
* ISP queue - response queue entry definition.
*/
typedef struct {
- uint8_t data[60];
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint8_t data[52];
uint32_t signature;
#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
} response_t;
+/*
+ * ISP queue - ATIO queue entry definition.
+ */
+struct atio {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t data[58];
+ uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
+};
+
typedef union {
uint16_t extended;
struct {
@@ -1719,11 +1736,13 @@ typedef struct fc_port {
struct fc_rport *rport, *drport;
u32 supported_classes;
- uint16_t vp_idx;
uint8_t fc4_type;
uint8_t scan_state;
} fc_port_t;
+#define QLA_FCPORT_SCAN_NONE 0
+#define QLA_FCPORT_SCAN_FOUND 1
+
/*
* Fibre channel port/lun states.
*/
@@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
#define FCF_LOGIN_NEEDED BIT_1
#define FCF_FCP2_DEVICE BIT_2
#define FCF_ASYNC_SENT BIT_3
+#define FCF_CONF_COMP_SUPPORTED BIT_4
/* No loop ID flag. */
#define FC_NO_LOOP_ID 0x1000
@@ -2419,6 +2439,40 @@ struct qlfc_fw {
uint32_t len;
};
+struct qlt_hw_data {
+ /* Protected by hw lock */
+ uint32_t enable_class_2:1;
+ uint32_t enable_explicit_conf:1;
+ uint32_t ini_mode_force_reverse:1;
+ uint32_t node_name_set:1;
+
+ dma_addr_t atio_dma; /* Physical address. */
+ struct atio *atio_ring; /* Base virtual address */
+ struct atio *atio_ring_ptr; /* Current address. */
+ uint16_t atio_ring_index; /* Current index. */
+ uint16_t atio_q_length;
+
+ void *target_lport_ptr;
+ struct qla_tgt_func_tmpl *tgt_ops;
+ struct qla_tgt *qla_tgt;
+ struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+ uint16_t current_handle;
+
+ struct qla_tgt_vp_map *tgt_vp_map;
+ struct mutex tgt_mutex;
+ struct mutex tgt_host_action_mutex;
+
+ int saved_set;
+ uint16_t saved_exchange_count;
+ uint32_t saved_firmware_options_1;
+ uint32_t saved_firmware_options_2;
+ uint32_t saved_firmware_options_3;
+ uint8_t saved_firmware_options[2];
+ uint8_t saved_add_firmware_options[2];
+
+ uint8_t tgt_node_name[WWN_SIZE];
+};
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -2460,7 +2514,9 @@ struct qla_hw_data {
uint32_t thermal_supported:1;
uint32_t isp82xx_reset_hdlr_active:1;
uint32_t isp82xx_reset_owner:1;
- /* 28 bits */
+ uint32_t isp82xx_no_md_cap:1;
+ uint32_t host_shutting_down:1;
+ /* 30 bits */
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@ struct qla_hw_data {
/* ISP2322: red, green, amber. */
uint16_t zio_mode;
uint16_t zio_timer;
- struct fc_host_statistics fc_host_stat;
struct qla_msix_entry *msix_entries;
@@ -2817,7 +2872,6 @@ struct qla_hw_data {
int cur_vport_count;
struct qla_chip_state_84xx *cs84xx;
- struct qla_statistics qla_stats;
struct isp_operations *isp_ops;
struct workqueue_struct *wq;
struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@ struct qla_hw_data {
dma_addr_t md_tmplt_hdr_dma;
void *md_dump;
uint32_t md_dump_size;
+
+ struct qlt_hw_data tgt;
};
/*
@@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
+#define SCR_PENDING 21 /* SCR in target mode */
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
struct req_que *req;
int fw_heartbeat_counter;
int seconds_since_last_heartbeat;
+ struct fc_host_statistics fc_host_stat;
+ struct qla_statistics qla_stats;
atomic_t vref_count;
} scsi_qla_host_t;
+#define SET_VP_IDX 1
+#define SET_AL_PA 2
+#define RESET_VP_IDX 3
+#define RESET_AL_PA 4
+struct qla_tgt_vp_map {
+ uint8_t idx;
+ scsi_qla_host_t *vha;
+};
+
/*
* Macros to help code, maintain, etc.
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9f06580..9eacd2d 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -175,6 +175,7 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_iocb.c source file.
*/
+
extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
+extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
+extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
/*
* Global Function Prototypes in qla_mbx.c source file.
@@ -239,6 +242,9 @@ extern int
qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
extern int
+qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
+
+extern int
qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
extern int
@@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
+extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+
/*
* Global Function Prototypes in qla_sup.c source file.
*/
@@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
extern void qla2x00_sp_timeout(unsigned long);
extern void qla2x00_bsg_job_done(void *, void *, int);
extern void qla2x00_bsg_sp_free(void *, void *);
+extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
/* Interrupt related */
extern irqreturn_t qla82xx_intr_handler(int, void *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 3128f80..05260d2 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
- ct_req->req.rff_id.fc4_feature = BIT_1;
+ qlt_rff_id(vha, ct_req);
+
ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
/* Execute MS IOCB */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b946564..ca50847 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,6 +17,9 @@
#include <asm/prom.h>
#endif
+#include <target/target_core_base.h>
+#include "qla_target.h"
+
/*
* QLogic ISP2x00 Hardware Support Function Prototypes.
*/
@@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
}
- rval = qla2x00_init_rings(vha);
+
+ if (qla_ini_mode_enabled(vha))
+ rval = qla2x00_init_rings(vha);
+
ha->flags.chip_reset_done = 1;
if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mq_size += ha->max_rsp_queues *
(rsp->length * sizeof(response_t));
}
+ if (ha->tgt.atio_q_length)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
goto try_eft;
@@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
+ /* Setup ATIO queue dma pointers for target mode */
+ icb->atio_q_inpointer = __constant_cpu_to_le16(0);
+ icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
+ icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
+ icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
+
if (ha->mqenable || IS_QLA83XX(ha)) {
icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
+ qlt_24xx_config_rings(vha, reg);
+
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
}
@@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
spin_unlock(&ha->vport_slock);
+ ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+ ha->tgt.atio_ring_index = 0;
+ /* Initialize ATIO queue entries */
+ qlt_init_atio_q_entries(vha);
+
ha->isp_ops->config_rings(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
vha->d_id.b.area = area;
vha->d_id.b.al_pa = al_pa;
+ spin_lock(&ha->vport_slock);
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock(&ha->vport_slock);
+
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
"Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x0064,
- "Inconisistent NVRAM "
+ "Inconsistent NVRAM "
"detected: checksum=0x%x id=%c version=0x%x.\n",
chksum, nv->id[0], nv->nvram_version);
ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
if (IS_QLA23XX(ha)) {
nv->firmware_options[0] |= BIT_2;
nv->firmware_options[0] &= ~BIT_3;
- nv->firmware_options[0] &= ~BIT_6;
+ nv->special_options[0] &= ~BIT_6;
nv->add_firmware_options[1] |= BIT_5 | BIT_4;
if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
{
fc_port_t *fcport = data;
struct fc_rport *rport;
+ scsi_qla_host_t *vha = fcport->vha;
unsigned long flags;
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
rport = fcport->drport ? fcport->drport: fcport->rport;
fcport->drport = NULL;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport)
+ if (rport) {
fc_remote_port_delete(rport);
+ /*
+ * Release the target mode FC NEXUS in qla_target.c code
+ * if target mod is enabled.
+ */
+ qlt_fc_port_deleted(vha, fcport);
+ }
}
/**
@@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
/* Setup fcport template structure. */
fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
fcport->port_type = FCT_UNKNOWN;
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
+ fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
@@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
new_fcport->d_id.b.area = area;
new_fcport->d_id.b.al_pa = al_pa;
new_fcport->loop_id = loop_id;
- new_fcport->vp_idx = vha->vp_idx;
rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
if (rval2 != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
if (!found) {
/* New device, add to fcports list. */
- if (vha->vp_idx) {
- new_fcport->vha = vha;
- new_fcport->vp_idx = vha->vp_idx;
- }
list_add_tail(&new_fcport->list, &vha->vp_fcports);
/* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@ cleanup_allocation:
static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
-#define LS_UNKNOWN 2
- static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
char *link_speed;
int rval;
uint16_t mb[4];
@@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]);
} else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (fcport->fp_speed < 5)
- link_speed = link_speeds[fcport->fp_speed];
- else if (fcport->fp_speed == 0x13)
- link_speed = link_speeds[5];
+ link_speed = qla2x00_get_link_speed_str(ha);
ql_dbg(ql_dbg_disc, vha, 0x2005,
"iIDMA adjusted to %s GB/s "
"on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
"Unable to allocate fc remote port.\n");
return;
}
+ /*
+ * Create target mode FC NEXUS in qla_target.c if target mode is
+ * enabled..
+ */
+ qlt_fc_port_added(vha, fcport);
+
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport, *fcptemp;
+ fc_port_t *fcport;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
@@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- return rval;
+ break;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
-#define QLA_FCPORT_SCAN 1
-#define QLA_FCPORT_FOUND 2
-
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- fcport->scan_state = QLA_FCPORT_SCAN;
- }
-
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
- /*
- * Logout all previous fabric devices marked lost, except
- * FCP2 devices.
- */
+ /* Add new ports to existing port list */
+ list_splice_tail_init(&new_fcports, &vha->vp_fcports);
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- if (fcport->scan_state == QLA_FCPORT_SCAN &&
+ /* Logout lost/gone fabric devices (non-FCP2) */
+ if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- fcport->loop_id = FC_NO_LOOP_ID;
}
- }
- }
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
- /*
- * Scan through our port list and login entries that need to be
- * logged in.
- */
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
- (fcport->flags & FCF_LOGIN_NEEDED) == 0)
continue;
-
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
- }
}
- /* Login and update database */
- qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
- }
-
- /* Exit if out of loop IDs. */
- if (rval != QLA_SUCCESS) {
- break;
- }
-
- /*
- * Login and add the new devices to our port list.
- */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- if (atomic_read(&vha->loop_down_timer) ||
- test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
- break;
-
- /* Find a new loop ID to use. */
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- break;
+ fcport->scan_state = QLA_FCPORT_SCAN_NONE;
+
+ /* Login fabric devices that need a login */
+ if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
+ atomic_read(&vha->loop_down_timer) == 0) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ continue;
+ }
+ }
}
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
-
- if (vha->vp_idx) {
- fcport->vha = vha;
- fcport->vp_idx = vha->vp_idx;
- }
- list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
- /* Free all new device structures not processed. */
- list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
- list_del(&fcport->list);
- kfree(fcport);
- }
-
if (rval) {
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
- fcport->scan_state = QLA_FCPORT_FOUND;
+ fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
found++;
@@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
if (mb[10] & BIT_1)
fcport->supported_classes |= FC_COS_CLASS3;
+ if (IS_FWI2_CAPABLE(ha)) {
+ if (mb[10] & BIT_7)
+ fcport->flags |=
+ FCF_CONF_COMP_SUPPORTED;
+ }
+
rval = QLA_SUCCESS;
break;
} else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- ha->qla_stats.total_isp_aborts++;
+ vha->qla_stats.total_isp_aborts++;
ql_log(ql_log_info, vha, 0x00af,
"Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct rsp_que *rsp = ha->rsp_q_map[0];
+ unsigned long flags;
/* If firmware needs to be loaded */
if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
vha->flags.online = 1;
+
+ /*
+ * Process any ATIO queue entries that came in
+ * while we weren't online.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (qla_tgt_mode_enabled(vha))
+ qlt_24xx_process_atio_queue(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
/* Wait at most MAX_TARGET RSCNs for a stable link. */
wait_time = 256;
do {
@@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_warn, vha, 0x006b,
- "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
"version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
ql_log(ql_log_warn, vha, 0x006c,
"Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
rval = 1;
}
+ if (!qla_ini_mode_enabled(vha)) {
+ /* Don't enable full login after initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Don't enable LIP full login for initiator */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ }
+
+ qlt_24xx_config_nvram_stage1(vha, nv);
+
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLA2462");
- /* Use alternate WWN? */
+ qlt_24xx_config_nvram_stage2(vha, icb);
+
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
+ /* Use alternate WWN? */
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
}
@@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
/* Reset NVRAM data. */
ql_log(ql_log_info, vha, 0x0073,
- "Inconisistent NVRAM detected: checksum=0x%x id=%c "
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
"version=0x%x.\n", chksum, nv->id[0],
le16_to_cpu(nv->nvram_version));
ql_log(ql_log_info, vha, 0x0074,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index eac9509..70dbf53 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/blkdev.h>
#include <linux/delay.h>
@@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
{
uint16_t cflags;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+ struct scsi_qla_host *vha = sp->fcport->vha;
cflags = 0;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
- sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
- sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
return (cflags);
}
@@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ /* If still no head room then bail out */
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
/* Build command packet */
req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@ queuing_error:
/**
* qla2x00_start_iocbs() - Execute the IOCB command
*/
-static void
+void
qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
{
struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
return (ret);
}
+/*
+ * qla2x00_issue_marker
+ *
+ * Issue marker
+ * Caller CAN have hardware lock held as specified by ha_locked parameter.
+ * Might release it, then reaquire.
+ */
+int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
+{
+ if (ha_locked) {
+ if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ } else {
+ if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
+ MK_SYNC_ALL) != QLA_SUCCESS)
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+
+ return QLA_SUCCESS;
+}
+
/**
* qla24xx_calc_iocbs() - Determine number of Command Type 3 and
* Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_WRITE_DATA);
- ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_READ_DATA);
- ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
- sp->fcport->vha->hw->qla_stats.output_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.output_bytes += scsi_bufflen(cmd);
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
- sp->fcport->vha->hw->qla_stats.input_bytes +=
- scsi_bufflen(cmd);
+ vha->qla_stats.input_bytes += scsi_bufflen(cmd);
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
}
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
/* Build command packet. */
req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
status |= QDSS_GOT_Q_SPACE;
/* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
}
static void
@@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
logio->port_id[2] = sp->fcport->d_id.b.domain;
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
sp->fcport->d_id.b.al_pa);
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
/* Implicit: mbx->mbx10 = 0. */
}
@@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- logio->vp_index = sp->fcport->vp_idx;
+ logio->vp_index = sp->fcport->vha->vp_idx;
}
static void
@@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
- mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+ mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
}
static void
@@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
tsk->port_id[0] = fcport->d_id.b.al_pa;
tsk->port_id[1] = fcport->d_id.b.area;
tsk->port_id[2] = fcport->d_id.b.domain;
- tsk->vp_index = fcport->vp_idx;
+ tsk->vp_index = fcport->vha->vp_idx;
if (flags == TCF_LUN_RESET) {
int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->handle = sp->handle;
els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
- els_iocb->vp_index = sp->fcport->vp_idx;
+ els_iocb->vp_index = sp->fcport->vha->vp_idx;
els_iocb->sof_type = EST_SOFI3;
els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
@@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->handle = sp->handle;
ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- ct_iocb->vp_index = sp->fcport->vp_idx;
+ ct_iocb->vp_index = sp->fcport->vha->vp_idx;
ct_iocb->comp_status = __constant_cpu_to_le16(0);
ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@ sufficient_dsds:
else
req->cnt = req->length -
(req->ring_index - cnt);
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
}
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
ctx = sp->u.scmd.ctx =
mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!ctx) {
@@ -2362,7 +2382,7 @@ sufficient_dsds:
if (!ctx->fcp_cmnd) {
ql_log(ql_log_fatal, vha, 0x3011,
"Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
- goto queuing_error_fcp_cmnd;
+ goto queuing_error;
}
/* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
/* Build IOCB segments */
if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@ sufficient_dsds:
cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
+ cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ce42288..6f67a9d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/delay.h>
#include <linux/slab.h>
@@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
"IDC failed to post ACK.\n");
}
+#define LS_UNKNOWN 2
+char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+{
+ static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
+ char *link_speed;
+ int fw_speed = ha->link_data_rate;
+
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
+ link_speed = link_speeds[0];
+ else if (fw_speed == 0x13)
+ link_speed = link_speeds[6];
+ else {
+ link_speed = link_speeds[LS_UNKNOWN];
+ if (fw_speed < 6)
+ link_speed =
+ link_speeds[fw_speed];
+ }
+
+ return link_speed;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
void
qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
{
-#define LS_UNKNOWN 2
- static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
- char *link_speed;
uint16_t handle_cnt;
uint16_t cnt, mbx;
uint32_t handles[5];
@@ -454,8 +474,8 @@ skip_rio:
case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
ql_dbg(ql_dbg_async, vha, 0x5008,
"Asynchronous WAKEUP_THRES.\n");
- break;
+ break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@ skip_rio:
break;
case MBA_LOOP_UP: /* Loop Up Event */
- if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- link_speed = link_speeds[0];
+ if (IS_QLA2100(ha) || IS_QLA2200(ha))
ha->link_data_rate = PORT_SPEED_1GB;
- } else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (mb[1] < 6)
- link_speed = link_speeds[mb[1]];
- else if (mb[1] == 0x13)
- link_speed = link_speeds[6];
+ else
ha->link_data_rate = mb[1];
- }
ql_dbg(ql_dbg_async, vha, 0x500a,
- "LOOP UP detected (%s Gbps).\n", link_speed);
+ "LOOP UP detected (%s Gbps).\n",
+ qla2x00_get_link_speed_str(ha));
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5010,
"Port unavailable %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x505e,
+ "Link is offline.\n");
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x5011,
"Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
mb[1], mb[2], mb[3]);
+
+ qlt_async_event(mb[0], vha, mb);
break;
}
ql_dbg(ql_dbg_async, vha, 0x5012,
"Port database changed %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x505f,
+ "Link is operational (%s Gbps).\n",
+ qla2x00_get_link_speed_str(ha));
/*
* Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@ skip_rio:
qla2x00_mark_all_devices_lost(vha, 1);
+ if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
+ set_bit(SCR_PENDING, &vha->dpc_flags);
+
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+
+ qlt_async_event(mb[0], vha, mb);
break;
case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -807,6 +833,8 @@ skip_rio:
mb[0], mb[1], mb[2], mb[3]);
}
+ qlt_async_event(mb[0], vha, mb);
+
if (!vha->vp_idx && ha->num_vhosts)
qla2x00_alert_all_vps(rsp, mb);
}
@@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
} else if (iop[0] & BIT_5)
fcport->port_type = FCT_INITIATOR;
+ if (iop[0] & BIT_7)
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+
if (logio->io_parameter[7] || logio->io_parameter[8])
fcport->supported_classes |= FC_COS_CLASS2;
if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (pkt->entry_status != 0) {
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
+
+ (void)qlt_24xx_process_response_error(vha, pkt);
+
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
continue;
@@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
case ELS_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
break;
+ case ABTS_RECV_24XX:
+ /* ensure that the ATIO queue is empty */
+ qlt_24xx_process_atio_queue(vha);
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case NOTIFY_ACK_TYPE:
+ qlt_response_pkt_all_vps(vha, (response_t *)pkt);
+ break;
case MARKER_TYPE:
/* Do nothing in this case, this check is to prevent it
* from falling into default case
@@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
case 0x14:
qla24xx_process_response_queue(vha, rsp);
break;
+ case 0x1C: /* ATIO queue updated */
+ qlt_24xx_process_atio_queue(vha);
+ break;
+ case 0x1D: /* ATIO and response queues updated */
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
case 0x14:
qla24xx_process_response_queue(vha, rsp);
break;
+ case 0x1C: /* ATIO queue updated */
+ qlt_24xx_process_atio_queue(vha);
+ break;
+ case 0x1D: /* ATIO and response queues updated */
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+ break;
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@ void
qla2x00_free_irqs(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct rsp_que *rsp;
+
+ /*
+ * We need to check that ha->rsp_q_map is valid in case we are called
+ * from a probe failure context.
+ */
+ if (!ha->rsp_q_map || !ha->rsp_q_map[0])
+ return;
+ rsp = ha->rsp_q_map[0];
if (ha->flags.msix_enabled)
qla24xx_disable_msix(ha);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b4a2339..d5ce92c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -5,6 +5,7 @@
* See LICENSE.qla2xxx for copyright and licensing details.
*/
#include "qla_def.h"
+#include "qla_target.h"
#include <linux/delay.h>
#include <linux/gfp.h>
@@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ictrl = RD_REG_WORD(&reg->isp.ictrl);
}
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
- "MBX Command timeout for cmd %x.\n", command);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a,
- "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
- "mb[0] = 0x%x.\n", mb0);
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
/*
@@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101c,
- "Mailbox cmd timeout occured, cmd=0x%x, "
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
"mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
"abort.\n", command, mcp->mb[0],
ha->flags.eeh_busy);
@@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
CRB_NIU_XG_PAUSE_CTL_P1);
}
ql_log(ql_log_info, base_vha, 0x101e,
- "Mailbox cmd timeout occured, cmd=0x%x, "
+ "Mailbox cmd timeout occurred, cmd=0x%x, "
"mb[0]=0x%x. Scheduling ISP abort ",
command, mcp->mb[0]);
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
+ "Entered %s.\n", __func__);
if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
ql_dbg(ql_dbg_mbx, vha, 0x1023,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+ "Done %s.\n", __func__);
}
return rval;
@@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
if (IS_FWI2_CAPABLE(ha)) {
- ql_dbg(ql_dbg_mbx, vha, 0x1027,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
"Done exchanges=%x.\n", mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
+ "Done %s.\n", __func__);
}
}
@@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_attributes_h = mcp->mb[15];
ha->fw_attributes_ext[0] = mcp->mb[16];
ha->fw_attributes_ext[1] = mcp->mb[17];
- ql_dbg(ql_dbg_mbx, vha, 0x1139,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
"%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
__func__, mcp->mb[15], mcp->mb[6]);
} else
- ql_dbg(ql_dbg_mbx, vha, 0x112f,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
"%s: FwAttributes [Upper] invalid, MB6:%04x\n",
__func__, mcp->mb[6]);
}
@@ -576,7 +579,8 @@ failed:
ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
+ "Done %s.\n", __func__);
}
return rval;
}
@@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
fwopts[2] = mcp->mb[2];
fwopts[3] = mcp->mb[3];
- ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
+ "Done %s.\n", __func__);
}
return rval;
@@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
"Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
+ "Done %s.\n", __func__);
}
return rval;
@@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
+ "Done %s.\n", __func__);
}
return rval;
@@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_VERIFY_CHECKSUM;
mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
"Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
(mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
+ "Done %s.\n", __func__);
}
return rval;
@@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_IOCB_COMMAND_A64;
mcp->mb[1] = 0;
@@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
/* Mask reserved bits. */
sts_entry->entry_status &=
IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
- ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
+ "Done %s.\n", __func__);
}
return rval;
@@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp)
struct req_que *req = vha->req;
struct scsi_cmnd *cmd = GET_CMD_SP(sp);
- ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
+ "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
+ "Done %s.\n", __func__);
}
return rval;
@@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
l = l;
vha = fcport->vha;
- ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
+ "Entered %s.\n", __func__);
req = vha->hw->req_q_map[0];
rsp = req->rsp;
@@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
+ "Failed=%x.\n", rval);
}
/* Issue marker IOCB. */
@@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
ql_dbg(ql_dbg_mbx, vha, 0x1040,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
+ "Done %s.\n", __func__);
}
return rval;
@@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
vha = fcport->vha;
- ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
+ "Entered %s.\n", __func__);
req = vha->hw->req_q_map[0];
rsp = req->rsp;
@@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
ql_dbg(ql_dbg_mbx, vha, 0x1044,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
+ "Done %s.\n", __func__);
if (IS_CNA_CAPABLE(vha->hw)) {
vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RETRY_COUNT;
mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
*tov = ratov;
}
- ql_dbg(ql_dbg_mbx, vha, 0x104b,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
"Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
}
@@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
+ "Entered %s.\n", __func__);
if (IS_QLA82XX(ha) && ql2xdbwr)
qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
+ * qla2x00_get_node_name_list
+ * Issue get node name list mailbox command, kmalloc()
+ * and return the resulting list. Caller must kfree() it!
+ *
+ * Input:
+ * ha = adapter state pointer.
+ * out_data = resulting list
+ * out_len = length of the resulting list
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_port_24xx_data *list = NULL;
+ void *pmap;
+ mbx_cmd_t mc;
+ dma_addr_t pmap_dma;
+ ulong dma_size;
+ int rval, left;
+
+ left = 1;
+ while (left > 0) {
+ dma_size = left * sizeof(*list);
+ pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
+ &pmap_dma, GFP_KERNEL);
+ if (!pmap) {
+ ql_log(ql_log_warn, vha, 0x113f,
+ "%s(%ld): DMA Alloc failed of %ld\n",
+ __func__, vha->host_no, dma_size);
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto out;
+ }
+
+ mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
+ mc.mb[1] = BIT_1 | BIT_3;
+ mc.mb[2] = MSW(pmap_dma);
+ mc.mb[3] = LSW(pmap_dma);
+ mc.mb[6] = MSW(MSD(pmap_dma));
+ mc.mb[7] = LSW(MSD(pmap_dma));
+ mc.mb[8] = dma_size;
+ mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
+ mc.in_mb = MBX_0|MBX_1;
+ mc.tov = 30;
+ mc.flags = MBX_DMA_IN;
+
+ rval = qla2x00_mailbox_command(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
+ (mc.mb[1] == 0xA)) {
+ left += le16_to_cpu(mc.mb[2]) /
+ sizeof(struct qla_port_24xx_data);
+ goto restart;
+ }
+ goto out_free;
+ }
+
+ left = 0;
+
+ list = kzalloc(dma_size, GFP_KERNEL);
+ if (!list) {
+ ql_log(ql_log_warn, vha, 0x1140,
+ "%s(%ld): failed to allocate node names list "
+ "structure.\n", __func__, vha->host_no);
+ rval = QLA_MEMORY_ALLOC_FAILED;
+ goto out_free;
+ }
+
+ memcpy(list, pmap, dma_size);
+restart:
+ dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
}
+ *out_data = list;
+ *out_len = dma_size;
+
+out:
+ return rval;
+
+out_free:
+ dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
return rval;
}
@@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
dma_addr_t pd_dma;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
+ "Entered %s.\n", __func__);
pd24 = NULL;
pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
fcport->port_type = FCT_INITIATOR;
else
fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd24->prli_svc_param_word_3[0] & BIT_7)
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
} else {
uint64_t zero = 0;
@@ -1378,7 +1502,8 @@ gpd_error_out:
"Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_PORT_NAME;
mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
name[7] = LSB(mcp->mb[7]);
}
- ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+ "Entered %s.\n", __func__);
if (IS_CNA_CAPABLE(vha->hw)) {
/* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
+ "Entered %s.\n", __func__);
- ql_dbg(ql_dbg_mbx, vha, 0x105e,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
"Retry cnt=%d ratov=%d total tov=%d.\n",
vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
@@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
rval, mcp->mb[0], mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
+ "Entered %s.\n", __func__);
if (ha->flags.cpu_affinity_enabled)
req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
break;
}
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
+ "Done %s.\n", __func__);
iop[0] = le32_to_cpu(lg->io_parameter[0]);
@@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mb[10] |= BIT_0; /* Class 2. */
if (lg->io_parameter[9] || lg->io_parameter[10])
mb[10] |= BIT_1; /* Class 3. */
+ if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
+ mb[10] |= BIT_7; /* Confirmed Completion
+ * Allowed
+ */
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
+ "Done %s.\n", __func__);
}
return rval;
@@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
+ "Entered %s.\n", __func__);
if (IS_FWI2_CAPABLE(ha))
return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
+ "Done %s.\n", __func__);
}
return (rval);
@@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
struct req_que *req;
struct rsp_que *rsp;
- ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
+ "Entered %s.\n", __func__);
lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
if (lg == NULL) {
@@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
le32_to_cpu(lg->io_parameter[1]));
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
"Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_LIP_FULL_LOGIN;
mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
+ "Entered %s.\n", __func__);
if (id_list == NULL)
return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
} else {
*entries = mcp->mb[1];
- ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
ql_dbg(ql_dbg_mbx, vha, 0x107d,
"Failed mb[0]=%x.\n", mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x107e,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
"Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
"mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
dma_addr_t pmap_dma;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
+ "Entered %s.\n", __func__);
pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
if (pmap == NULL) {
@@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1081,
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
"mb0/mb1=%x/%X FC/AL position map size (%x).\n",
mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
uint32_t *siter, *diter, dwords;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_STATUS;
mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
rval = QLA_FUNCTION_FAILED;
} else {
/* Copy over data -- firmware data is LE. */
- ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
+ "Done %s.\n", __func__);
dwords = offsetof(struct link_statistics, unused1) / 4;
siter = diter = &stats->link_fail_cnt;
while (dwords--)
@@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
mbx_cmd_t *mcp = &mc;
uint32_t *siter, *diter, dwords;
- ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
"Failed mb[0]=%x.\n", mcp->mb[0]);
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
+ "Done %s.\n", __func__);
/* Copy over data -- firmware data is LE. */
dwords = sizeof(struct link_statistics) / 4;
siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
- ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
+ "Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp)
abt->port_id[0] = fcport->d_id.b.al_pa;
abt->port_id[1] = fcport->d_id.b.area;
abt->port_id[2] = fcport->d_id.b.domain;
- abt->vp_index = fcport->vp_idx;
+ abt->vp_index = fcport->vha->vp_idx;
abt->req_que_no = cpu_to_le16(req->id);
@@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp)
le16_to_cpu(abt->nport_handle));
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ha = vha->hw;
req = vha->req;
- ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
+ "Entered %s.\n", __func__);
if (ha->flags.cpu_affinity_enabled)
rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
- tsk->p.tsk.vp_index = fcport->vp_idx;
+ tsk->p.tsk.vp_index = fcport->vha->vp_idx;
if (type == TCF_LUN_RESET) {
int_to_scsilun(l, &tsk->p.tsk.lun);
host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
} else if (le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID) {
if (le32_to_cpu(sts->rsp_data_len) < 4) {
- ql_dbg(ql_dbg_mbx, vha, 0x1097,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
"Ignoring inconsistent data length -- not enough "
"response info (%d).\n",
le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
ql_dbg(ql_dbg_mbx, vha, 0x1099,
"Failed to issue marker IOCB (%x).\n", rval2);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SERDES_PARAMS;
mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
/*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_STOP_FIRMWARE;
mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
if (mcp->mb[0] == MBS_INVALID_COMMAND)
rval = QLA_INVALID_COMMAND;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
+ "Entered %s.\n", __func__);
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
!IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
+ "Done %s.\n", __func__);
if (mb)
memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
+ "Done %s.\n", __func__);
if (wr)
*wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
+ "Entered %s.\n", __func__);
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
+ "Done %s.\n", __func__);
if (port_speed)
*port_speed = mcp->mb[3];
}
@@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
+ "Entered %s.\n", __func__);
if (!IS_IIDMA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
}
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x10b4,
+ "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
+ "Done %s.\n", __func__);
}
return rval;
@@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp;
unsigned long flags;
- ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
+ "Entered %s.\n", __func__);
if (rptid_entry->entry_status != 0)
return;
if (rptid_entry->format == 0) {
- ql_dbg(ql_dbg_mbx, vha, 0x10b7,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
"VPs acquired %d.\n",
MSB(le16_to_cpu(rptid_entry->vp_count)),
LSB(le16_to_cpu(rptid_entry->vp_count)));
- ql_dbg(ql_dbg_mbx, vha, 0x10b8,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
} else if (rptid_entry->format == 1) {
vp_idx = LSB(stat);
- ql_dbg(ql_dbg_mbx, vha, 0x10b9,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
"port id %02x%02x%02x.\n", vp_idx, MSB(stat),
rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
/* This can be called by the parent */
- ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
+ "Entered %s.\n", __func__);
vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
if (!vpmod) {
@@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
vpmod->vp_count = 1;
vpmod->vp_index1 = vha->vp_idx;
vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
+
+ qlt_modify_vp_config(vha, vpmod);
+
memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
rval = QLA_FUNCTION_FAILED;
} else {
/* EMPTY */
- ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
+ "Done %s.\n", __func__);
fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
}
dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
int vp_index = vha->vp_idx;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- ql_dbg(ql_dbg_mbx, vha, 0x10c1,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
"Entered %s enabling index %d.\n", __func__, vp_index);
if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
le16_to_cpu(vce->comp_status));
rval = QLA_FUNCTION_FAILED;
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
+ "Done %s.\n", __func__);
}
dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__);
-
- /*
- * This command is implicitly executed by firmware during login for the
- * physical hosts
- */
- if (vp_idx == 0)
- return QLA_FUNCTION_FAILED;
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
+ "Entered %s.\n", __func__);
if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
ql_dbg(ql_dbg_mbx, vha, 0x1008,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
unsigned long flags;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
+ "Entered %s.\n", __func__);
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (mn == NULL) {
@@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
status[0] = le16_to_cpu(mn->p.rsp.comp_status);
status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
le16_to_cpu(mn->p.rsp.failure_code) : 0;
- ql_dbg(ql_dbg_mbx, vha, 0x10ce,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
"cs=%x fc=%x.\n", status[0], status[1]);
if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
retry = 1;
}
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d0,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
"Firmware updated to %x.\n",
le32_to_cpu(mn->p.rsp.fw_ver));
@@ -3316,9 +3501,11 @@ verify_done:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x10d1,
+ "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
ql_dbg(ql_dbg_mbx, vha, 0x10d4,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
ql_dbg(ql_dbg_mbx, vha, 0x10d7,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_IDC_ACK;
memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x10da,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
+ "Entered %s.\n", __func__);
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
+ "Done %s.\n", __func__);
*sector_size = mcp->mb[1];
}
@@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_RESTART_MPI_FW;
mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
ql_dbg(ql_dbg_mbx, vha, 0x10e9,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
ql_dbg(ql_dbg_mbx, vha, 0x10ec,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
+ "Entered %s.\n", __func__);
if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
+ "Done %s.\n", __func__);
*actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
+ "Entered %s.\n", __func__);
if (!IS_CNA_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
"Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
+ "Done %s.\n", __func__);
}
return rval;
@@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
ql_dbg(ql_dbg_mbx, vha, 0x10f5,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
+ "Done %s.\n", __func__);
*data = mcp->mb[3] << 16 | mcp->mb[2];
}
@@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
uint32_t iter_cnt = 0x1;
- ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
"mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
mcp->mb[3], mcp->mb[18], mcp->mb[19]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
+ "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
+ "Done %s.\n", __func__);
}
/* Copy mailbox information */
@@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x10fd,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
"Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
if (rval != QLA_SUCCESS)
ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
else
- ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
+ "Done %s.\n", __func__);
return rval;
}
@@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
ql_dbg(ql_dbg_mbx, vha, 0x1101,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
rval = QLA_SUCCESS;
- ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
+ "Entered %s.\n", __func__);
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
@@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x1104,
"Failed=%x mb[0]=%x.\n", rval, mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1107,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
+ "Done %s.\n", __func__);
if (mcp->mb[1] != 0x7)
ha->link_data_rate = mcp->mb[1];
}
@@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
+ "Entered %s.\n", __func__);
if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
/* Copy all bits to preserve original value */
memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
- ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
+ "Done %s.\n", __func__);
}
return rval;
}
@@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_SET_PORT_CONFIG;
/* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx, vha, 0x110d,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else
- ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
+ "Done %s.\n", __func__);
return rval;
}
@@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
+ "Entered %s.\n", __func__);
if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
uint8_t byte;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
+ "Entered %s.\n", __func__);
/* Integer part */
rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
}
*frac = (byte >> 6) * 25;
- ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
+ "Done %s.\n", __func__);
fail:
return rval;
}
@@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
+ "Entered %s.\n", __func__);
if (!IS_FWI2_CAPABLE(ha))
return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x1016,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
+ "Entered %s.\n", __func__);
if (!IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x100c,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
int rval = QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
+ "Entered %s.\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
(mcp->mb[1] << 16) | mcp->mb[0],
(mcp->mb[3] << 16) | mcp->mb[2]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
+ "Done %s.\n", __func__);
ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
if (!ha->md_template_size) {
ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
mbx_cmd_t *mcp = &mc;
int rval = QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
+ "Entered %s.\n", __func__);
ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
((mcp->mb[1] << 16) | mcp->mb[0]),
((mcp->mb[3] << 16) | mcp->mb[2]));
} else
- ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
+ "Done %s.\n", __func__);
return rval;
}
@@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
+ "Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
ql_dbg(ql_dbg_mbx, vha, 0x1134,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
+ "Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
led_cfg[4] = mcp->mb[5];
led_cfg[5] = mcp->mb[6];
}
- ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
+ "Done %s.\n", __func__);
}
return rval;
@@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
if (!IS_QLA82XX(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1127,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
"Entered %s.\n", __func__);
memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
ql_dbg(ql_dbg_mbx, vha, 0x1128,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1129,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
"Done %s.\n", __func__);
}
@@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
if (!IS_QLA83XX(ha))
return QLA_FUNCTION_FAILED;
- ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+ "Entered %s.\n", __func__);
mcp->mb[0] = MBC_WRITE_REMOTE_REG;
mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
ql_dbg(ql_dbg_mbx, vha, 0x1131,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
} else {
- ql_dbg(ql_dbg_mbx, vha, 0x1132,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
"Done %s.\n", __func__);
}
@@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
mbx_cmd_t *mcp = &mc;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
- ql_dbg(ql_dbg_mbx, vha, 0x113b,
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
"Implicit LOGO Unsupported.\n");
return QLA_FUNCTION_FAILED;
}
- ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
+ "Entering %s.\n", __func__);
/* Perform Implicit LOGO. */
mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
ql_dbg(ql_dbg_mbx, vha, 0x113d,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
else
- ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__);
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
+ "Done %s.\n", __func__);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa062a1..3e8b324 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,6 +6,7 @@
*/
#include "qla_def.h"
#include "qla_gbl.h"
+#include "qla_target.h"
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
@@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
list_add_tail(&vha->list, &ha->vp_list);
+
+ qlt_update_vp_map(vha, SET_VP_IDX);
+
spin_unlock_irqrestore(&ha->vport_slock, flags);
mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
}
list_del(&vha->list);
+ qlt_update_vp_map(vha, RESET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
ql_dbg(ql_dbg_vport, vha, 0xa001,
"Marking port dead, loop_id=0x%04x : %x.\n",
- fcport->loop_id, fcport->vp_idx);
+ fcport->loop_id, fcport->vha->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_DOWN);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ /* Remove port id from vp target map */
+ qlt_update_vp_map(vha, RESET_AL_PA);
+
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
- ql_dbg(ql_dbg_dpc, vha, 0x4012,
- "Entering %s.\n", __func__);
- ql_dbg(ql_dbg_dpc, vha, 0x4013,
- "vp_flags: 0x%lx.\n", vha->vp_flags);
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
+ "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
qla2x00_do_work(vha);
@@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
- ql_dbg(ql_dbg_dpc, vha, 0x401c,
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
"Exiting %s.\n", __func__);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de722a9..caf627b 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
}
/* Offset in flash = lower 16 bits
- * Number of enteries = upper 16 bits
+ * Number of entries = upper 16 bits
*/
offset = n & 0xffffU;
n = (n >> 16) & 0xffffU;
- /* number of addr/value pair should not exceed 1024 enteries */
+ /* number of addr/value pair should not exceed 1024 entries */
if (n >= 1024) {
ql_log(ql_log_fatal, vha, 0x0071,
"Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
- ql_log(ql_log_info, NULL, 0xb054,
+ ql_log(ql_log_info, NULL, 0xb053,
"%s: NULL response queue pointer.\n", __func__);
return IRQ_NONE;
}
@@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0x00a1,
- "Firmware loaded successully from flash.\n");
+ "Firmware loaded successfully from flash.\n");
return QLA_SUCCESS;
} else {
ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@ try_blob_fw:
blob = ha->hablob = qla2x00_request_firmware(vha);
if (!blob) {
ql_log(ql_log_fatal, vha, 0x00a3,
- "Firmware image not preset.\n");
+ "Firmware image not present.\n");
goto fw_load_failed;
}
@@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
if (!optrom) {
ql_log(ql_log_warn, vha, 0xb01b,
"Unable to allocate memory "
- "for optron burst write (%x KB).\n",
+ "for optrom burst write (%x KB).\n",
OPTROM_BURST_SIZE / 1024);
}
}
@@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
* changing the state to DEV_READY
*/
ql_log(ql_log_info, vha, 0xb023,
- "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME);
- ql_log(ql_log_info, vha, 0xb024,
- "DRV_ACTIVE:%d DRV_STATE:%d.\n",
+ "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
+ "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
drv_active, drv_state);
qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
if (ql2xmdenable) {
if (qla82xx_md_collect(vha))
ql_log(ql_log_warn, vha, 0xb02c,
- "Not able to collect minidump.\n");
+ "Minidump not collected.\n");
} else
ql_log(ql_log_warn, vha, 0xb04f,
"Minidump disabled.\n");
@@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
"Firmware version differs "
"Previous version: %d:%d:%d - "
"New version: %d:%d:%d\n",
+ fw_major_version, fw_minor_version,
+ fw_subminor_version,
ha->fw_major_version,
ha->fw_minor_version,
- ha->fw_subminor_version,
- fw_major_version, fw_minor_version,
- fw_subminor_version);
+ ha->fw_subminor_version);
/* Release MiniDump resources */
qla82xx_md_free(vha);
/* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@ exit:
return rval;
}
+static int qla82xx_check_temp(scsi_qla_host_t *vha)
+{
+ uint32_t temp, temp_state, temp_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql_log(ql_log_warn, vha, 0x600e,
+ "Device temperature %d degrees C exceeds "
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ return 1;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ ql_log(ql_log_warn, vha, 0x600f,
+ "Device temperature %d degrees C exceeds "
+ "operating range. Immediate action needed.\n",
+ temp_val);
+ }
+ return 0;
+}
+
void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
/* don't poll if reset is going on */
if (!ha->flags.isp82xx_reset_hdlr_active) {
dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ if (qla82xx_check_temp(vha)) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla82xx_clear_pending_mbx(vha);
+ } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6001,
"Adapter reset needed.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x6002,
"Quiescent needed.\n");
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
} else {
if (qla82xx_check_fw_alive(vha)) {
ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
set_bit(ISP_ABORT_NEEDED,
&vha->dpc_flags);
}
- qla2xxx_wake_dpc(vha);
ha->flags.isp82xx_fw_hung = 1;
ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
goto md_failed;
}
+ if (ha->flags.isp82xx_no_md_cap) {
+ ql_log(ql_log_warn, vha, 0xb054,
+ "Forced reset from application, "
+ "ignore minidump capture\n");
+ ha->flags.isp82xx_no_md_cap = 0;
+ goto md_failed;
+ }
+
if (qla82xx_validate_template_chksum(vha)) {
ql_log(ql_log_info, vha, 0xb039,
"Template checksum validation error\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 4ac50e2..6eb210e 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@@ -561,7 +562,6 @@
#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
-#define PCIE_CHICKEN3 (0x120c8)
#define PCIE_SETUP_FUNCTION (0x12040)
#define PCIE_SETUP_FUNCTION2 (0x12048)
@@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c9c56a8..6d1d873 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,12 +13,13 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
-
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
+#include "qla_target.h"
+
/*
* Driver version
*/
@@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
*/
int ql_errlev = ql_log_all;
+int ql2xenableclass2;
+module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xenableclass2,
+ "Specify if Class 2 operations are supported from the very "
+ "beginning. Default is 0 - class 2 not supported.");
+
int ql2xlogintimeout = 20;
module_param(ql2xlogintimeout, int, S_IRUGO);
MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
.max_sectors = 0xFFFF,
.shost_attrs = qla2x00_host_attrs,
+
+ .supported_mode = MODE_INITIATOR,
};
static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
static void qla2x00_mem_free(struct qla_hw_data *);
/* -------------------------------------------------------------------------- */
-static int qla2x00_alloc_queues(struct qla_hw_data *ha)
+static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
+ struct rsp_que *rsp)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
"Unable to allocate memory for response queue ptrs.\n");
goto fail_rsp_map;
}
+ /*
+ * Make sure we record at least the request and response queue zero in
+ * case we need to free them if part of the probe fails.
+ */
+ ha->rsp_q_map[0] = rsp;
+ ha->req_q_map[0] = req;
set_bit(0, ha->rsp_qid_map);
set_bit(0, ha->req_qid_map);
return 1;
@@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
- ql_dbg(ql_dbg_io, vha, 0x3001,
+ ql_dbg(ql_dbg_aer, vha, 0x9010,
"PCI Channel IO permanent failure, exiting "
"cmd=%p.\n", cmd);
cmd->result = DID_NO_CONNECT << 16;
} else {
- ql_dbg(ql_dbg_io, vha, 0x3002,
+ ql_dbg(ql_dbg_aer, vha, 0x9011,
"EEH_Busy, Requeuing the cmd=%p.\n", cmd);
cmd->result = DID_REQUEUE << 16;
}
@@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
rval = fc_remote_port_chkready(rport);
if (rval) {
cmd->result = rval;
- ql_dbg(ql_dbg_io, vha, 0x3003,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
"fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
cmd, rval);
goto qc24_fail_command;
@@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
ret = FAILED;
ql_log(ql_log_info, vha, 0x8012,
- "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
+ "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
ha->pdev = pdev;
+ ha->tgt.enable_class_2 = ql2xenableclass2;
/* Clear our data area */
ha->bars = bars;
@@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
+que_init:
+ /* Alloc arrays of request and response ring ptrs */
+ if (!qla2x00_alloc_queues(ha, req, rsp)) {
+ ql_log(ql_log_fatal, base_vha, 0x003d,
+ "Failed to allocate memory for queue pointers..."
+ "aborting.\n");
+ goto probe_init_failed;
+ }
+
+ qlt_probe_one_stage1(base_vha, ha);
+
/* Set up the irqs */
ret = qla2x00_request_irqs(ha, rsp);
if (ret)
@@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pci_save_state(pdev);
- /* Alloc arrays of request and response ring ptrs */
-que_init:
- if (!qla2x00_alloc_queues(ha)) {
- ql_log(ql_log_fatal, base_vha, 0x003d,
- "Failed to allocate memory for queue pointers.. aborting.\n");
- goto probe_init_failed;
- }
-
- ha->rsp_q_map[0] = rsp;
- ha->req_q_map[0] = req;
+ /* Assign back pointers */
rsp->req = req;
req->rsp = rsp;
- set_bit(0, ha->req_qid_map);
- set_bit(0, ha->rsp_qid_map);
+
/* FWI2-capable only. */
req->req_q_in = &ha->iobase->isp24.req_q_in;
req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@ que_init:
ql_dbg(ql_dbg_init, base_vha, 0x00ee,
"DPC thread started successfully.\n");
+ /*
+ * If we're not coming up in initiator mode, we might sit for
+ * a while without waking up the dpc thread, which leads to a
+ * stuck process warning. So just kick the dpc once here and
+ * let the kthread start (and go back to sleep in qla2x00_do_dpc).
+ */
+ qla2xxx_wake_dpc(base_vha);
+
skip_dpc:
list_add_tail(&base_vha->list, &ha->vp_list);
base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@ skip_dpc:
ql_dbg(ql_dbg_init, base_vha, 0x00f2,
"Init done and hba is online.\n");
- scsi_scan_host(host);
+ if (qla_ini_mode_enabled(base_vha))
+ scsi_scan_host(host);
+ else
+ ql_dbg(ql_dbg_init, base_vha, 0x0122,
+ "skipping scsi_scan_host() for non-initiator port\n");
qla2x00_alloc_sysfs_attr(base_vha);
@@ -2577,11 +2609,17 @@ skip_dpc:
base_vha->host_no,
ha->isp_ops->fw_version_str(base_vha, fw_str));
+ qlt_add_target(ha, base_vha);
+
return 0;
probe_init_failed:
qla2x00_free_req_que(ha, req);
+ ha->req_q_map[0] = NULL;
+ clear_bit(0, ha->req_qid_map);
qla2x00_free_rsp_que(ha, rsp);
+ ha->rsp_q_map[0] = NULL;
+ clear_bit(0, ha->rsp_qid_map);
ha->max_req_queues = ha->max_rsp_queues = 0;
probe_failed:
@@ -2621,6 +2659,22 @@ probe_out:
}
static void
+qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct task_struct *t = ha->dpc_thread;
+
+ if (ha->dpc_thread == NULL)
+ return;
+ /*
+ * qla2xxx_wake_dpc checks for ->dpc_thread
+ * so we need to zero it out.
+ */
+ ha->dpc_thread = NULL;
+ kthread_stop(t);
+}
+
+static void
qla2x00_shutdown(struct pci_dev *pdev)
{
scsi_qla_host_t *vha;
@@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
struct qla_hw_data *ha;
unsigned long flags;
+ /*
+ * If the PCI device is disabled that means that probe failed and any
+ * resources should be have cleaned up on probe exit.
+ */
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
base_vha = pci_get_drvdata(pdev);
ha = base_vha->hw;
+ ha->flags.host_shutting_down = 1;
+
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->dpc_thread = NULL;
kthread_stop(t);
}
+ qlt_remove_target(ha, base_vha);
qla2x00_free_sysfs_attr(base_vha);
@@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
if (vha->timer_active)
qla2x00_stop_timer(vha);
- /* Kill the kernel thread for this host */
- if (ha->dpc_thread) {
- struct task_struct *t = ha->dpc_thread;
-
- /*
- * qla2xxx_wake_dpc checks for ->dpc_thread
- * so we need to zero it out.
- */
- ha->dpc_thread = NULL;
- kthread_stop(t);
- }
+ qla2x00_stop_dpc_thread(vha);
qla25xx_delete_queues(vha);
@@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
spin_unlock_irqrestore(vha->host->host_lock, flags);
set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(base_vha);
- } else
+ } else {
fc_remote_port_delete(rport);
+ qlt_fc_port_deleted(vha, fcport);
+ }
}
/*
@@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
int do_login, int defer)
{
if (atomic_read(&fcport->state) == FCS_ONLINE &&
- vha->vp_idx == fcport->vp_idx) {
+ vha->vp_idx == fcport->vha->vp_idx) {
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
qla2x00_schedule_rport_del(vha, fcport, defer);
}
@@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
+ if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
continue;
/*
@@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
if (defer)
qla2x00_schedule_rport_del(vha, fcport, defer);
- else if (vha->vp_idx == fcport->vp_idx)
+ else if (vha->vp_idx == fcport->vha->vp_idx)
qla2x00_schedule_rport_del(vha, fcport, defer);
}
}
@@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->init_cb)
goto fail;
+ if (qlt_mem_alloc(ha) < 0)
+ goto fail_free_init_cb;
+
ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
if (!ha->gid_list)
- goto fail_free_init_cb;
+ goto fail_free_tgt_mem;
ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@ fail_free_gid_list:
ha->gid_list_dma);
ha->gid_list = NULL;
ha->gid_list_dma = 0;
+fail_free_tgt_mem:
+ qlt_mem_free(ha);
fail_free_init_cb:
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
ha->init_cb_dma);
@@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
if (ha->ctx_mempool)
mempool_destroy(ha->ctx_mempool);
+ qlt_mem_free(ha);
+
if (ha->init_cb)
dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->gid_list = NULL;
ha->gid_list_dma = 0;
+
+ ha->tgt.atio_ring = NULL;
+ ha->tgt.atio_dma = 0;
+ ha->tgt.tgt_vp_map = NULL;
}
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
ha->dpc_active = 1;
- ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
- "DPC handler waking up.\n");
- ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
- "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
+ "DPC handler waking up, dpc_flags=0x%lx.\n",
+ base_vha->dpc_flags);
qla2x00_do_work(base_vha);
@@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
}
+ if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
+ int ret;
+ ret = qla2x00_send_change_request(base_vha, 0x3, 0);
+ if (ret != QLA_SUCCESS)
+ ql_log(ql_log_warn, base_vha, 0x121,
+ "Failed to enable receiving of RSCN "
+ "requests: 0x%x.\n", ret);
+ clear_bit(SCR_PENDING, &base_vha->dpc_flags);
+ }
+
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
@@ -4457,6 +4533,21 @@ qla2x00_module_init(void)
return -ENOMEM;
}
+ /* Initialize target kmem_cache and mem_pools */
+ ret = qlt_init();
+ if (ret < 0) {
+ kmem_cache_destroy(srb_cachep);
+ return ret;
+ } else if (ret > 0) {
+ /*
+ * If initiator mode is explictly disabled by qlt_init(),
+ * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
+ * performing scsi_scan_target() during LOOP UP event.
+ */
+ qla2xxx_transport_functions.disable_target_scan = 1;
+ qla2xxx_transport_vport_functions.disable_target_scan = 1;
+ }
+
/* Derive version string. */
strcpy(qla2x00_version_str, QLA2XXX_VERSION);
if (ql2xextended_error_logging)
@@ -4468,6 +4559,7 @@ qla2x00_module_init(void)
kmem_cache_destroy(srb_cachep);
ql_log(ql_log_fatal, NULL, 0x0002,
"fc_attach_transport failed...Failing load!.\n");
+ qlt_exit();
return -ENODEV;
}
@@ -4481,6 +4573,7 @@ qla2x00_module_init(void)
fc_attach_transport(&qla2xxx_transport_vport_functions);
if (!qla2xxx_transport_vport_template) {
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
fc_release_transport(qla2xxx_transport_template);
ql_log(ql_log_fatal, NULL, 0x0004,
"fc_attach_transport vport failed...Failing load!.\n");
@@ -4492,6 +4585,7 @@ qla2x00_module_init(void)
ret = pci_register_driver(&qla2xxx_pci_driver);
if (ret) {
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
fc_release_transport(qla2xxx_transport_template);
fc_release_transport(qla2xxx_transport_vport_template);
ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4511,6 +4605,7 @@ qla2x00_module_exit(void)
pci_unregister_driver(&qla2xxx_pci_driver);
qla2x00_release_firmware();
kmem_cache_destroy(srb_cachep);
+ qlt_exit();
if (ctx_cachep)
kmem_cache_destroy(ctx_cachep);
fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 0000000..5b30132
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,4963 @@
+/*
+ * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
+ *
+ * based on qla2x00t.c code:
+ *
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2006 - 2010 ID7 Ltd.
+ *
+ * Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2
+ * of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+
+static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
+module_param(qlini_mode, charp, S_IRUGO);
+MODULE_PARM_DESC(qlini_mode,
+ "Determines when initiator mode will be enabled. Possible values: "
+ "\"exclusive\" - initiator mode will be enabled on load, "
+ "disabled on enabling target mode and then on disabling target mode "
+ "enabled back; "
+ "\"disabled\" - initiator mode will never be enabled; "
+ "\"enabled\" (default) - initiator mode will always stay enabled.");
+
+static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+
+/*
+ * From scsi/fc/fc_fcp.h
+ */
+enum fcp_resp_rsp_codes {
+ FCP_TMF_CMPL = 0,
+ FCP_DATA_LEN_INVALID = 1,
+ FCP_CMND_FIELDS_INVALID = 2,
+ FCP_DATA_PARAM_MISMATCH = 3,
+ FCP_TMF_REJECTED = 4,
+ FCP_TMF_FAILED = 5,
+ FCP_TMF_INVALID_LUN = 9,
+};
+
+/*
+ * fc_pri_ta from scsi/fc/fc_fcp.h
+ */
+#define FCP_PTA_SIMPLE 0 /* simple task attribute */
+#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
+#define FCP_PTA_ORDERED 2 /* ordered task attribute */
+#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
+#define FCP_PTA_MASK 7 /* mask for task attribute field */
+#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
+#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
+
+/*
+ * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
+ * must be called under HW lock and could unlock/lock it inside.
+ * It isn't an issue, since in the current implementation on the time when
+ * those functions are called:
+ *
+ * - Either context is IRQ and only IRQ handler can modify HW data,
+ * including rings related fields,
+ *
+ * - Or access to target mode variables from struct qla_tgt doesn't
+ * cross those functions boundaries, except tgt_stop, which
+ * additionally protected by irq_cmd_count.
+ */
+/* Predefs for callbacks handed to qla2xxx LLD */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
+ struct atio_from_isp *pkt);
+static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ int fn, void *iocb, int flags);
+static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
+ *cmd, struct atio_from_isp *atio, int ha_locked);
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
+ struct qla_tgt_srr_imm *imm, int ha_lock);
+/*
+ * Global Variables
+ */
+static struct kmem_cache *qla_tgt_cmd_cachep;
+static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
+static mempool_t *qla_tgt_mgmt_cmd_mempool;
+static struct workqueue_struct *qla_tgt_wq;
+static DEFINE_MUTEX(qla_tgt_mutex);
+static LIST_HEAD(qla_tgt_glist);
+
+/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
+static struct qla_tgt_sess *qlt_find_sess_by_port_name(
+ struct qla_tgt *tgt,
+ const uint8_t *port_name)
+{
+ struct qla_tgt_sess *sess;
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
+ if (!memcmp(sess->port_name, port_name, WWN_SIZE))
+ return sess;
+ }
+
+ return NULL;
+}
+
+/* Might release hw lock, then reaquire!! */
+static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
+{
+ /* Send marker if required */
+ if (unlikely(vha->marker_needed != 0)) {
+ int rc = qla2x00_issue_marker(vha, vha_locked);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03d,
+ "qla_target(%d): issue_marker() failed\n",
+ vha->vp_idx);
+ }
+ return rc;
+ }
+ return QLA_SUCCESS;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
+ uint8_t *d_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint8_t vp_idx;
+
+ if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
+ return NULL;
+
+ if (vha->d_id.b.al_pa == d_id[2])
+ return vha;
+
+ BUG_ON(ha->tgt.tgt_vp_map == NULL);
+ vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+static inline
+struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
+ uint16_t vp_idx)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == vp_idx)
+ return vha;
+
+ BUG_ON(ha->tgt.tgt_vp_map == NULL);
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->tgt.tgt_vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ switch (atio->u.raw.entry_type) {
+ case ATIO_TYPE7:
+ {
+ struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
+ atio->u.isp24.fcp_hdr.d_id);
+ if (unlikely(NULL == host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03e,
+ "qla_target(%d): Received ATIO_TYPE7 "
+ "with unknown d_id %x:%x:%x\n", vha->vp_idx,
+ atio->u.isp24.fcp_hdr.d_id[0],
+ atio->u.isp24.fcp_hdr.d_id[1],
+ atio->u.isp24.fcp_hdr.d_id[2]);
+ break;
+ }
+ qlt_24xx_atio_pkt(host, atio);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct imm_ntfy_from_isp *entry =
+ (struct imm_ntfy_from_isp *)atio;
+
+ if ((entry->u.isp24.vp_index != 0xFF) &&
+ (entry->u.isp24.nport_handle != 0xFFFF)) {
+ host = qlt_find_host_by_vp_idx(vha,
+ entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03f,
+ "qla_target(%d): Received "
+ "ATIO (IMMED_NOTIFY_TYPE) "
+ "with unknown vp_index %d\n",
+ vha->vp_idx, entry->u.isp24.vp_index);
+ break;
+ }
+ }
+ qlt_24xx_atio_pkt(host, atio);
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe040,
+ "qla_target(%d): Received unknown ATIO atio "
+ "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+ break;
+ }
+
+ return;
+}
+
+void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
+{
+ switch (pkt->entry_type) {
+ case CTIO_TYPE7:
+ {
+ struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe041,
+ "qla_target(%d): Response pkt (CTIO_TYPE7) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct imm_ntfy_from_isp *entry =
+ (struct imm_ntfy_from_isp *)pkt;
+
+ host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe042,
+ "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
+ "received, with unknown vp_index %d\n",
+ vha->vp_idx, entry->u.isp24.vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case NOTIFY_ACK_TYPE:
+ {
+ struct scsi_qla_host *host = vha;
+ struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+
+ if (0xFF != entry->u.isp24.vp_index) {
+ host = qlt_find_host_by_vp_idx(vha,
+ entry->u.isp24.vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe043,
+ "qla_target(%d): Response "
+ "pkt (NOTIFY_ACK_TYPE) "
+ "received, with unknown "
+ "vp_index %d\n", vha->vp_idx,
+ entry->u.isp24.vp_index);
+ break;
+ }
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case ABTS_RECV_24XX:
+ {
+ struct abts_recv_from_24xx *entry =
+ (struct abts_recv_from_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe044,
+ "qla_target(%d): Response pkt "
+ "(ABTS_RECV_24XX) received, with unknown "
+ "vp_index %d\n", vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ case ABTS_RESP_24XX:
+ {
+ struct abts_resp_to_24xx *entry =
+ (struct abts_resp_to_24xx *)pkt;
+ struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ entry->vp_index);
+ if (unlikely(!host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe045,
+ "qla_target(%d): Response pkt "
+ "(ABTS_RECV_24XX) received, with unknown "
+ "vp_index %d\n", vha->vp_idx, entry->vp_index);
+ break;
+ }
+ qlt_response_pkt(host, pkt);
+ break;
+ }
+
+ default:
+ qlt_response_pkt(vha, pkt);
+ break;
+ }
+
+}
+
+static void qlt_free_session_done(struct work_struct *work)
+{
+ struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
+ free_work);
+ struct qla_tgt *tgt = sess->tgt;
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+
+ BUG_ON(!tgt);
+ /*
+ * Release the target session for FC Nexus from fabric module code.
+ */
+ if (sess->se_sess != NULL)
+ ha->tgt.tgt_ops->free_session(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
+ "Unregistration of sess %p finished\n", sess);
+
+ kfree(sess);
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
+ tgt->sess_count--;
+ if (tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+void qlt_unreg_sess(struct qla_tgt_sess *sess)
+{
+ struct scsi_qla_host *vha = sess->vha;
+
+ vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
+
+ list_del(&sess->sess_list_entry);
+ if (sess->deleted)
+ list_del(&sess->del_list_entry);
+
+ INIT_WORK(&sess->free_work, qlt_free_session_done);
+ schedule_work(&sess->free_work);
+}
+EXPORT_SYMBOL(qlt_unreg_sess);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ uint32_t unpacked_lun, lun = 0;
+ uint16_t loop_id;
+ int res = 0;
+ struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+
+ loop_id = le16_to_cpu(n->u.isp24.nport_handle);
+ if (loop_id == 0xFFFF) {
+#if 0 /* FIXME: Re-enable Global event handling.. */
+ /* Global event */
+ atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
+ qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
+ if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
+ sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
+ typeof(*sess), sess_list_entry);
+ switch (mcmd) {
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ mcmd = QLA_TGT_NEXUS_LOSS;
+ break;
+ case QLA_TGT_ABORT_ALL_SESS:
+ mcmd = QLA_TGT_ABORT_ALL;
+ break;
+ case QLA_TGT_NEXUS_LOSS:
+ case QLA_TGT_ABORT_ALL:
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe046,
+ "qla_target(%d): Not allowed "
+ "command %x in %s", vha->vp_idx,
+ mcmd, __func__);
+ sess = NULL;
+ break;
+ }
+ } else
+ sess = NULL;
+#endif
+ } else {
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe000,
+ "Using sess for qla_tgt_reset: %p\n", sess);
+ if (!sess) {
+ res = -ESRCH;
+ return res;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe047,
+ "scsi(%ld): resetting (session %p from port "
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
+ "mcmd %x, loop_id %d)\n", vha->host_no, sess,
+ sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ mcmd, loop_id);
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
+ iocb, QLA24XX_MGMT_SEND_NACK);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
+ bool immediate)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
+
+ if (sess->deleted)
+ return;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
+ "Scheduling sess %p for deletion\n", sess);
+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
+ sess->deleted = 1;
+
+ if (immediate)
+ dev_loss_tmo = 0;
+
+ sess->expires = jiffies + dev_loss_tmo * HZ;
+
+ ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
+ "qla_target(%d): session for port %02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
+ "deletion in %u secs (expires: %lu) immed: %d\n",
+ sess->vha->vp_idx,
+ sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ sess->loop_id, dev_loss_tmo, sess->expires, immediate);
+
+ if (immediate)
+ schedule_delayed_work(&tgt->sess_del_work, 0);
+ else
+ schedule_delayed_work(&tgt->sess_del_work,
+ jiffies - sess->expires);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
+{
+ struct qla_tgt_sess *sess;
+
+ list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
+ qlt_schedule_sess_for_deletion(sess, true);
+
+ /* At this point tgt could be already dead */
+}
+
+static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
+ uint16_t *loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ dma_addr_t gid_list_dma;
+ struct gid_list_info *gid_list;
+ char *id_iter;
+ int res, rc, i;
+ uint16_t entries;
+
+ gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
+ "qla_target(%d): DMA Alloc failed of %u\n",
+ vha->vp_idx, qla2x00_gid_list_size(ha));
+ return -ENOMEM;
+ }
+
+ /* Get list of logged in devices */
+ rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
+ "qla_target(%d): get_id_list() failed: %x\n",
+ vha->vp_idx, rc);
+ res = -1;
+ goto out_free_id_list;
+ }
+
+ id_iter = (char *)gid_list;
+ res = -1;
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid = (struct gid_list_info *)id_iter;
+ if ((gid->al_pa == s_id[2]) &&
+ (gid->area == s_id[1]) &&
+ (gid->domain == s_id[0])) {
+ *loop_id = le16_to_cpu(gid->loop_id);
+ res = 0;
+ break;
+ }
+ id_iter += ha->gid_list_info_size;
+ }
+
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
+ return res;
+}
+
+static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
+ struct qla_tgt_sess *sess)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_port_24xx_data *pmap24;
+ bool res, found = false;
+ int rc, i;
+ uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
+ uint16_t entries;
+ void *pmap;
+ int pmap_len;
+ fc_port_t *fcport;
+ int global_resets;
+
+retry:
+ global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+ rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
+ if (rc != QLA_SUCCESS) {
+ res = false;
+ goto out;
+ }
+
+ pmap24 = pmap;
+ entries = pmap_len/sizeof(*pmap24);
+
+ for (i = 0; i < entries; ++i) {
+ if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
+ loop_id = le16_to_cpu(pmap24[i].loop_id);
+ found = true;
+ break;
+ }
+ }
+
+ kfree(pmap);
+
+ if (!found) {
+ res = false;
+ goto out;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
+ "qlt_check_fcport_exist(): loop_id %d", loop_id);
+
+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ if (fcport == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
+ "qla_target(%d): Allocation of tmp FC port failed",
+ vha->vp_idx);
+ res = false;
+ goto out;
+ }
+
+ fcport->loop_id = loop_id;
+
+ rc = qla2x00_get_port_database(vha, fcport, 0);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
+ "qla_target(%d): Failed to retrieve fcport "
+ "information -- get_port_database() returned %x "
+ "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+ res = false;
+ goto out_free_fcport;
+ }
+
+ if (global_resets !=
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
+ "qla_target(%d): global reset during session discovery"
+ " (counter was %d, new %d), retrying",
+ vha->vp_idx, global_resets,
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+ goto retry;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
+ "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
+ sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
+
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+
+ res = true;
+
+out_free_fcport:
+ kfree(fcport);
+
+out:
+ return res;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_undelete_sess(struct qla_tgt_sess *sess)
+{
+ BUG_ON(!sess->deleted);
+
+ list_del(&sess->del_list_entry);
+ sess->deleted = 0;
+}
+
+static void qlt_del_sess_work_fn(struct delayed_work *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt,
+ sess_del_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (!list_empty(&tgt->del_sess_list)) {
+ sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
+ del_list_entry);
+ if (time_after_eq(jiffies, sess->expires)) {
+ bool cancel;
+
+ qlt_undelete_sess(sess);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ cancel = qlt_check_fcport_exist(vha, sess);
+
+ if (cancel) {
+ if (sess->deleted) {
+ /*
+ * sess was again deleted while we were
+ * discovering it
+ */
+ spin_lock_irqsave(&ha->hardware_lock,
+ flags);
+ continue;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
+ "qla_target(%d): cancel deletion of "
+ "session for port %02x:%02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x (loop ID %d), because "
+ " it isn't deleted by firmware",
+ vha->vp_idx, sess->port_name[0],
+ sess->port_name[1], sess->port_name[2],
+ sess->port_name[3], sess->port_name[4],
+ sess->port_name[5], sess->port_name[6],
+ sess->port_name[7], sess->loop_id);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
+ "Timeout: sess %p about to be deleted\n",
+ sess);
+ ha->tgt.tgt_ops->shutdown_sess(sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else {
+ schedule_delayed_work(&tgt->sess_del_work,
+ jiffies - sess->expires);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+/*
+ * Adds an extra ref to allow to drop hw lock after adding sess to the list.
+ * Caller must put it.
+ */
+static struct qla_tgt_sess *qlt_create_sess(
+ struct scsi_qla_host *vha,
+ fc_port_t *fcport,
+ bool local)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+ unsigned char be_sid[3];
+
+ /* Check to avoid double sessions */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+ sess_list_entry) {
+ if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
+ "Double sess %p found (s_id %x:%x:%x, "
+ "loop_id %d), updating to d_id %x:%x:%x, "
+ "loop_id %d", sess, sess->s_id.b.domain,
+ sess->s_id.b.al_pa, sess->s_id.b.area,
+ sess->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.al_pa, fcport->d_id.b.area,
+ fcport->loop_id);
+
+ if (sess->deleted)
+ qlt_undelete_sess(sess);
+
+ kref_get(&sess->se_sess->sess_kref);
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ if (sess->local && !local)
+ sess->local = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return sess;
+ }
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ sess = kzalloc(sizeof(*sess), GFP_KERNEL);
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
+ "qla_target(%u): session allocation failed, "
+ "all commands from port %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7]);
+
+ return NULL;
+ }
+ sess->tgt = ha->tgt.qla_tgt;
+ sess->vha = vha;
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->local = local;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
+ "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
+ sess, ha->tgt.qla_tgt);
+
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+ /*
+ * Determine if this fc_port->port_name is allowed to access
+ * target mode using explict NodeACLs+MappedLUNs, or using
+ * TPG demo mode. If this is successful a target mode FC nexus
+ * is created.
+ */
+ if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
+ &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
+ kfree(sess);
+ return NULL;
+ }
+ /*
+ * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
+ * access across ->hardware_lock reaquire.
+ */
+ kref_get(&sess->se_sess->sess_kref);
+
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
+ memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
+ ha->tgt.qla_tgt->sess_count++;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
+ "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
+ "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
+ " completion %ssupported) added\n",
+ vha->vp_idx, local ? "local " : "", fcport->port_name[0],
+ fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
+ fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
+ sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
+ "" : "not ");
+
+ return sess;
+}
+
+/*
+ * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
+ */
+void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_create_sess(vha, fcport, false);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+
+ if (sess->deleted) {
+ qlt_undelete_sess(sess);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
+ "qla_target(%u): %ssession for port %02x:"
+ "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
+ "reappeared\n", vha->vp_idx, sess->local ? "local "
+ : "", sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ sess->loop_id);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
+ "Reappeared sess %p\n", sess);
+ }
+ sess->s_id = fcport->d_id;
+ sess->loop_id = fcport->loop_id;
+ sess->conf_compl_supported = !!(fcport->flags &
+ FCF_CONF_COMP_SUPPORTED);
+ }
+
+ if (sess && sess->local) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
+ "qla_target(%u): local session for "
+ "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
+ "(loop ID %d) became global\n", vha->vp_idx,
+ fcport->port_name[0], fcport->port_name[1],
+ fcport->port_name[2], fcport->port_name[3],
+ fcport->port_name[4], fcport->port_name[5],
+ fcport->port_name[6], fcport->port_name[7],
+ sess->loop_id);
+ sess->local = 0;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess;
+ unsigned long flags;
+
+ if (!vha->hw->tgt.tgt_ops)
+ return;
+
+ if (!tgt || (fcport->port_type != FCT_INITIATOR))
+ return;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (tgt->tgt_stop) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+ sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
+
+ sess->local = 1;
+ qlt_schedule_sess_for_deletion(sess, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static inline int test_tgt_sess_count(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+ int res;
+ /*
+ * We need to protect against race, when tgt is freed before or
+ * inside wake_up()
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
+ "tgt %p, empty(sess_list)=%d sess_count=%d\n",
+ tgt, list_empty(&tgt->sess_list), tgt->sess_count);
+ res = (tgt->sess_count == 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase1(struct qla_tgt *tgt)
+{
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+
+ if (tgt->tgt_stop || tgt->tgt_stopped) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
+ "Already in tgt->tgt_stop or tgt_stopped state\n");
+ dump_stack();
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
+ vha->host_no, vha);
+ /*
+ * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
+ * Lock is needed, because we still can get an incoming packet.
+ */
+ mutex_lock(&ha->tgt.tgt_mutex);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tgt->tgt_stop = 1;
+ qlt_clear_tgt_db(tgt, true);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ flush_delayed_work_sync(&tgt->sess_del_work);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
+ "Waiting for sess works (tgt %p)", tgt);
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ while (!list_empty(&tgt->sess_works_list)) {
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+ flush_scheduled_work();
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ }
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
+ "Waiting for tgt %p: list_empty(sess_list)=%d "
+ "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
+ tgt->sess_count);
+
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+
+ /* Big hammer */
+ if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
+ qlt_disable_vha(vha);
+
+ /* Wait for sessions to clear out (just in case) */
+ wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+}
+EXPORT_SYMBOL(qlt_stop_phase1);
+
+/* Called by tcm_qla2xxx configfs code */
+void qlt_stop_phase2(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+ unsigned long flags;
+
+ if (tgt->tgt_stopped) {
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+ "Already in tgt->tgt_stopped state\n");
+ dump_stack();
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+ "Waiting for %d IRQ commands to complete (tgt %p)",
+ tgt->irq_cmd_count, tgt);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ while (tgt->irq_cmd_count != 0) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ udelay(2);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
+ tgt->tgt_stop = 0;
+ tgt->tgt_stopped = 1;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+ tgt);
+}
+EXPORT_SYMBOL(qlt_stop_phase2);
+
+/* Called from qlt_remove_target() -> qla2x00_remove_one() */
+void qlt_release(struct qla_tgt *tgt)
+{
+ struct qla_hw_data *ha = tgt->ha;
+
+ if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+ qlt_stop_phase2(tgt);
+
+ ha->tgt.qla_tgt = NULL;
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+ "Release of tgt %p finished\n", tgt);
+
+ kfree(tgt);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
+ const void *param, unsigned int param_size)
+{
+ struct qla_tgt_sess_work_param *prm;
+ unsigned long flags;
+
+ prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
+ if (!prm) {
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
+ "qla_target(%d): Unable to create session "
+ "work, command will be refused", 0);
+ return -ENOMEM;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
+ "Scheduling work (type %d, prm %p)"
+ " to find session for param %p (size %d, tgt %p)\n",
+ type, prm, param, param_size, tgt);
+
+ prm->type = type;
+ memcpy(&prm->tm_iocb, param, param_size);
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ schedule_work(&tgt->sess_work);
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *ntfy,
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
+{
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ struct nack_to_isp *nack;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!pkt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe049,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return;
+ }
+
+ if (ha->tgt.qla_tgt != NULL)
+ ha->tgt.qla_tgt->notify_ack_expected++;
+
+ pkt->entry_type = NOTIFY_ACK_TYPE;
+ pkt->entry_count = 1;
+
+ nack = (struct nack_to_isp *)pkt;
+ nack->ox_id = ntfy->ox_id;
+
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
+ }
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
+ nack->u.isp24.status = ntfy->u.isp24.status;
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
+ nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
+ nack->u.isp24.srr_reject_code = srr_reject_code;
+ nack->u.isp24.srr_reject_code_expl = srr_explan;
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe005,
+ "qla_target(%d): Sending 24xx Notify Ack %d\n",
+ vha->vp_idx, nack->u.isp24.status);
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, uint32_t status,
+ bool ids_reversed)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct abts_resp_to_24xx *resp;
+ uint32_t f_ctl;
+ uint8_t *p;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe006,
+ "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
+ ha, abts, status);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!resp) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet", vha->vp_idx, __func__);
+ return;
+ }
+
+ resp->entry_type = ABTS_RESP_24XX;
+ resp->entry_count = 1;
+ resp->nport_handle = abts->nport_handle;
+ resp->vp_index = vha->vp_idx;
+ resp->sof_type = abts->sof_type;
+ resp->exchange_address = abts->exchange_address;
+ resp->fcp_hdr_le = abts->fcp_hdr_le;
+ f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+ F_CTL_SEQ_INITIATIVE);
+ p = (uint8_t *)&f_ctl;
+ resp->fcp_hdr_le.f_ctl[0] = *p++;
+ resp->fcp_hdr_le.f_ctl[1] = *p++;
+ resp->fcp_hdr_le.f_ctl[2] = *p;
+ if (ids_reversed) {
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
+ } else {
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+ }
+ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+ if (status == FCP_TMF_CMPL) {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+ resp->payload.ba_acct.low_seq_cnt = 0x0000;
+ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+ } else {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+ resp->payload.ba_rjt.reason_code =
+ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+ /* Other bytes are zero */
+ }
+
+ ha->tgt.qla_tgt->abts_resp_expected++;
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
+ struct abts_resp_from_24xx_fw *entry)
+{
+ struct ctio7_to_24xx *ctio;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe007,
+ "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
+ return;
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (ctio == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04b,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return;
+ }
+
+ /*
+ * We've got on entrance firmware's response on by us generated
+ * ABTS response. So, in it ID fields are reversed.
+ */
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->nport_handle = entry->nport_handle;
+ ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = vha->vp_idx;
+ ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
+ ctio->exchange_addr = entry->exchange_addr_to_abort;
+ ctio->u.status1.flags =
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
+ ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+
+ qla2x00_start_iocbs(vha, vha->req);
+
+ qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
+ FCP_TMF_CMPL, true);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ int rc;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
+ "qla_target(%d): task abort (tag=%d)\n",
+ vha->vp_idx, abts->exchange_addr_to_abort);
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
+ "qla_target(%d): %s: Allocation of ABORT cmd failed",
+ vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+
+ mcmd->sess = sess;
+ memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+ abts->exchange_addr_to_abort);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
+ "qla_target(%d): tgt_ops->handle_tmr()"
+ " failed: %d", vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
+ struct abts_recv_from_24xx *abts)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ uint32_t tag = abts->exchange_addr_to_abort;
+ uint8_t s_id[3];
+ int rc;
+
+ if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
+ "qla_target(%d): ABTS: Abort Sequence not "
+ "supported\n", vha->vp_idx);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
+ if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
+ "qla_target(%d): ABTS: Unknown Exchange "
+ "Address received\n", vha->vp_idx);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
+ "qla_target(%d): task abort (s_id=%x:%x:%x, "
+ "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
+ abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
+ le32_to_cpu(abts->fcp_hdr_le.parameter));
+
+ s_id[0] = abts->fcp_hdr_le.s_id[2];
+ s_id[1] = abts->fcp_hdr_le.s_id[1];
+ s_id[2] = abts->fcp_hdr_le.s_id[0];
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
+ "qla_target(%d): task abort for non-existant session\n",
+ vha->vp_idx);
+ rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+ QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
+ if (rc != 0) {
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
+ false);
+ }
+ return;
+ }
+
+ rc = __qlt_24xx_handle_abts(vha, abts, sess);
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
+ "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
+ vha->vp_idx, rc);
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
+ return;
+ }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
+ struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
+{
+ struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
+ struct ctio7_to_24xx *ctio;
+
+ ql_dbg(ql_dbg_tgt, ha, 0xe008,
+ "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
+ ha, atio, resp_code);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
+ return;
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
+ if (ctio == NULL) {
+ ql_dbg(ql_dbg_tgt, ha, 0xe04c,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", ha->vp_idx, __func__);
+ return;
+ }
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ ctio->nport_handle = mcmd->sess->loop_id;
+ ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = ha->vp_idx;
+ ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_SEND_STATUS);
+ ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.scsi_status =
+ __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
+ ctio->u.status1.response_len = __constant_cpu_to_le16(8);
+ ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
+
+ qla2x00_start_iocbs(ha, ha->req);
+}
+
+void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+}
+EXPORT_SYMBOL(qlt_free_mcmd);
+
+/* callback from target fabric module code */
+void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ struct scsi_qla_host *vha = mcmd->sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
+ "TM response mcmd (%p) status %#x state %#x",
+ mcmd, mcmd->fc_tm_rsp, mcmd->flags);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
+ qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
+ 0, 0, 0, 0, 0, 0);
+ else {
+ if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
+ mcmd->fc_tm_rsp, false);
+ else
+ qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
+ mcmd->fc_tm_rsp);
+ }
+ /*
+ * Make the callback for ->free_mcmd() to queue_work() and invoke
+ * target_put_sess_cmd() to drop cmd_kref to 1. The final
+ * target_put_sess_cmd() call will be made from TFO->check_stop_free()
+ * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
+ * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
+ * qlt_xmit_tm_rsp() returns here..
+ */
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+EXPORT_SYMBOL(qlt_xmit_tm_rsp);
+
+/* No locks */
+static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
+{
+ struct qla_tgt_cmd *cmd = prm->cmd;
+
+ BUG_ON(cmd->sg_cnt == 0);
+
+ prm->sg = (struct scatterlist *)cmd->sg;
+ prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
+ cmd->sg_cnt, cmd->dma_data_direction);
+ if (unlikely(prm->seg_cnt == 0))
+ goto out_err;
+
+ prm->cmd->sg_mapped = 1;
+
+ /*
+ * If greater than four sg entries then we need to allocate
+ * the continuation entries
+ */
+ if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+ prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+ prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
+
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
+ prm->seg_cnt, prm->req_cnt);
+ return 0;
+
+out_err:
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
+ "qla_target(%d): PCI mapping failed: sg_cnt=%d",
+ 0, prm->cmd->sg_cnt);
+ return -1;
+}
+
+static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ BUG_ON(!cmd->sg_mapped);
+ pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+ cmd->sg_mapped = 0;
+}
+
+static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
+ uint32_t req_cnt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ha->iobase;
+ uint32_t cnt;
+
+ if (vha->req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00a,
+ "Request ring circled: cnt=%d, vha->->ring_index=%d, "
+ "vha->req->cnt=%d, req_cnt=%d\n", cnt,
+ vha->req->ring_index, vha->req->cnt, req_cnt);
+ if (vha->req->ring_index < cnt)
+ vha->req->cnt = cnt - vha->req->ring_index;
+ else
+ vha->req->cnt = vha->req->length -
+ (vha->req->ring_index - cnt);
+ }
+
+ if (unlikely(vha->req->cnt < (req_cnt + 2))) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe00b,
+ "qla_target(%d): There is no room in the "
+ "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
+ "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
+ vha->req->cnt, req_cnt);
+ return -EAGAIN;
+ }
+ vha->req->cnt -= req_cnt;
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
+{
+ /* Adjust ring index. */
+ vha->req->ring_index++;
+ if (vha->req->ring_index == vha->req->length) {
+ vha->req->ring_index = 0;
+ vha->req->ring_ptr = vha->req->ring;
+ } else {
+ vha->req->ring_ptr++;
+ }
+ return (cont_entry_t *)vha->req->ring_ptr;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t h;
+
+ h = ha->tgt.current_handle;
+ /* always increment cmd handle */
+ do {
+ ++h;
+ if (h > MAX_OUTSTANDING_COMMANDS)
+ h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
+ if (h == ha->tgt.current_handle) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+ "qla_target(%d): Ran out of "
+ "empty cmd slots in ha %p\n", vha->vp_idx, ha);
+ h = QLA_TGT_NULL_HANDLE;
+ break;
+ }
+ } while ((h == QLA_TGT_NULL_HANDLE) ||
+ (h == QLA_TGT_SKIP_HANDLE) ||
+ (ha->tgt.cmds[h-1] != NULL));
+
+ if (h != QLA_TGT_NULL_HANDLE)
+ ha->tgt.current_handle = h;
+
+ return h;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ uint32_t h;
+ struct ctio7_to_24xx *pkt;
+ struct qla_hw_data *ha = vha->hw;
+ struct atio_from_isp *atio = &prm->cmd->atio;
+
+ pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
+ prm->pkt = pkt;
+ memset(pkt, 0, sizeof(*pkt));
+
+ pkt->entry_type = CTIO_TYPE7;
+ pkt->entry_count = (uint8_t)prm->req_cnt;
+ pkt->vp_index = vha->vp_idx;
+
+ h = qlt_make_handle(vha);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else
+ ha->tgt.cmds[h-1] = prm->cmd;
+
+ pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->nport_handle = prm->cmd->loop_id;
+ pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ pkt->exchange_addr = atio->u.isp24.exchange_addr;
+ pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
+ pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00c,
+ "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
+ vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
+ le16_to_cpu(pkt->u.status0.ox_id));
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ int cnt;
+ uint32_t *dword_ptr;
+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+
+ /* Build continuation packets */
+ while (prm->seg_cnt > 0) {
+ cont_a64_entry_t *cont_pkt64 =
+ (cont_a64_entry_t *)qlt_get_req_pkt(vha);
+
+ /*
+ * Make sure that from cont_pkt64 none of
+ * 64-bit specific fields used for 32-bit
+ * addressing. Cast to (cont_entry_t *) for
+ * that.
+ */
+
+ memset(cont_pkt64, 0, sizeof(*cont_pkt64));
+
+ cont_pkt64->entry_count = 1;
+ cont_pkt64->sys_define = 0;
+
+ if (enable_64bit_addressing) {
+ cont_pkt64->entry_type = CONTINUE_A64_TYPE;
+ dword_ptr =
+ (uint32_t *)&cont_pkt64->dseg_0_address;
+ } else {
+ cont_pkt64->entry_type = CONTINUE_TYPE;
+ dword_ptr =
+ (uint32_t *)&((cont_entry_t *)
+ cont_pkt64)->dseg_0_address;
+ }
+
+ /* Load continuation entry data segments */
+ for (cnt = 0;
+ cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
+ cnt++, prm->seg_cnt--) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32
+ (sg_dma_address(prm->sg)));
+ if (enable_64bit_addressing) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32
+ (sg_dma_address
+ (prm->sg)));
+ }
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00d,
+ "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
+ (long long unsigned int)
+ pci_dma_hi32(sg_dma_address(prm->sg)),
+ (long long unsigned int)
+ pci_dma_lo32(sg_dma_address(prm->sg)),
+ (int)sg_dma_len(prm->sg));
+
+ prm->sg = sg_next(prm->sg);
+ }
+ }
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. We have already made sure
+ * that there is sufficient amount of request entries to not drop it.
+ */
+static void qlt_load_data_segments(struct qla_tgt_prm *prm,
+ struct scsi_qla_host *vha)
+{
+ int cnt;
+ uint32_t *dword_ptr;
+ int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
+ struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe00e,
+ "iocb->scsi_status=%x, iocb->flags=%x\n",
+ le16_to_cpu(pkt24->u.status0.scsi_status),
+ le16_to_cpu(pkt24->u.status0.flags));
+
+ pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
+
+ /* Setup packet address segment pointer */
+ dword_ptr = pkt24->u.status0.dseg_0_address;
+
+ /* Set total data segment count */
+ if (prm->seg_cnt)
+ pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
+
+ if (prm->seg_cnt == 0) {
+ /* No data transfer */
+ *dword_ptr++ = 0;
+ *dword_ptr = 0;
+ return;
+ }
+
+ /* If scatter gather */
+ ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
+
+ /* Load command entry data segments */
+ for (cnt = 0;
+ (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
+ cnt++, prm->seg_cnt--) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
+ if (enable_64bit_addressing) {
+ *dword_ptr++ =
+ cpu_to_le32(pci_dma_hi32(
+ sg_dma_address(prm->sg)));
+ }
+ *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe010,
+ "S/G Segment phys_addr=%llx:%llx, len=%d\n",
+ (long long unsigned int)pci_dma_hi32(sg_dma_address(
+ prm->sg)),
+ (long long unsigned int)pci_dma_lo32(sg_dma_address(
+ prm->sg)),
+ (int)sg_dma_len(prm->sg));
+
+ prm->sg = sg_next(prm->sg);
+ }
+
+ qlt_load_cont_data_segments(prm, vha);
+}
+
+static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
+{
+ return cmd->bufflen > 0;
+}
+
+/*
+ * Called without ha->hardware_lock held
+ */
+static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
+ struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
+ uint32_t *full_req_cnt)
+{
+ struct qla_tgt *tgt = cmd->tgt;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (unlikely(cmd->aborted)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
+ "qla_target(%d): terminating exchange "
+ "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
+ se_cmd, cmd->tag);
+
+ cmd->state = QLA_TGT_STATE_ABORTED;
+
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+
+ /* !! At this point cmd could be already freed !! */
+ return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
+ vha->vp_idx, cmd->tag);
+
+ prm->cmd = cmd;
+ prm->tgt = tgt;
+ prm->rq_result = scsi_status;
+ prm->sense_buffer = &cmd->sense_buffer[0];
+ prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
+ prm->sg = NULL;
+ prm->seg_cnt = -1;
+ prm->req_cnt = 1;
+ prm->add_status_pkt = 0;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
+ prm->rq_result, xmit_type);
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+ return -EFAULT;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
+
+ if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
+ if (qlt_pci_map_calc_cnt(prm) != 0)
+ return -EAGAIN;
+ }
+
+ *full_req_cnt = prm->req_cnt;
+
+ if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+ prm->residual = se_cmd->residual_count;
+ ql_dbg(ql_dbg_tgt, vha, 0xe014,
+ "Residual underflow: %d (tag %d, "
+ "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+ cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
+ prm->rq_result |= SS_RESIDUAL_UNDER;
+ } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ prm->residual = se_cmd->residual_count;
+ ql_dbg(ql_dbg_tgt, vha, 0xe015,
+ "Residual overflow: %d (tag %d, "
+ "op %x, bufflen %d, rq_result %x)\n", prm->residual,
+ cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->bufflen, prm->rq_result);
+ prm->rq_result |= SS_RESIDUAL_OVER;
+ }
+
+ if (xmit_type & QLA_TGT_XMIT_STATUS) {
+ /*
+ * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
+ * ignored in *xmit_response() below
+ */
+ if (qlt_has_data(cmd)) {
+ if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
+ (IS_FWI2_CAPABLE(ha) &&
+ (prm->rq_result != 0))) {
+ prm->add_status_pkt = 1;
+ (*full_req_cnt)++;
+ }
+ }
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe016,
+ "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
+ prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
+
+ return 0;
+}
+
+static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
+ struct qla_tgt_cmd *cmd, int sending_sense)
+{
+ if (ha->tgt.enable_class_2)
+ return 0;
+
+ if (sending_sense)
+ return cmd->conf_compl_supported;
+ else
+ return ha->tgt.enable_explicit_conf &&
+ cmd->conf_compl_supported;
+}
+
+#ifdef CONFIG_QLA_TGT_DEBUG_SRR
+/*
+ * Original taken from the XFS code
+ */
+static unsigned long qlt_srr_random(void)
+{
+ static int Inited;
+ static unsigned long RandomValue;
+ static DEFINE_SPINLOCK(lock);
+ /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+ register long rv;
+ register long lo;
+ register long hi;
+ unsigned long flags;
+
+ spin_lock_irqsave(&lock, flags);
+ if (!Inited) {
+ RandomValue = jiffies;
+ Inited = 1;
+ }
+ rv = RandomValue;
+ hi = rv / 127773;
+ lo = rv % 127773;
+ rv = 16807 * lo - 2836 * hi;
+ if (rv <= 0)
+ rv += 2147483647;
+ RandomValue = rv;
+ spin_unlock_irqrestore(&lock, flags);
+ return rv;
+}
+
+static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{
+#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
+ if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
+ == 50) {
+ *xmit_type &= ~QLA_TGT_XMIT_STATUS;
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
+ "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
+ }
+#endif
+ /*
+ * It's currently not possible to simulate SRRs for FCP_WRITE without
+ * a physical link layer failure, so don't even try here..
+ */
+ if (cmd->dma_data_direction != DMA_FROM_DEVICE)
+ return;
+
+ if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
+ ((qlt_srr_random() % 100) == 20)) {
+ int i, leave = 0;
+ unsigned int tot_len = 0;
+
+ while (leave == 0)
+ leave = qlt_srr_random() % cmd->sg_cnt;
+
+ for (i = 0; i < leave; i++)
+ tot_len += cmd->sg[i].length;
+
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
+ "Cutting cmd %p (tag %d) buffer"
+ " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
+ " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
+ cmd->bufflen, cmd->sg_cnt);
+
+ cmd->bufflen = tot_len;
+ cmd->sg_cnt = leave;
+ }
+
+ if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
+ unsigned int offset = qlt_srr_random() % cmd->bufflen;
+
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
+ "Cutting cmd %p (tag %d) buffer head "
+ "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
+ cmd->bufflen);
+ if (offset == 0)
+ *xmit_type &= ~QLA_TGT_XMIT_DATA;
+ else if (qlt_set_data_offset(cmd, offset)) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
+ "qlt_set_data_offset() failed (tag %d)", cmd->tag);
+ }
+ }
+}
+#else
+static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
+{}
+#endif
+
+static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
+ struct qla_tgt_prm *prm)
+{
+ prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
+ (uint32_t)sizeof(ctio->u.status1.sense_data));
+ ctio->u.status0.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
+ if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
+ ctio->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+ ctio->u.status0.residual = cpu_to_le32(prm->residual);
+ ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
+ if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
+ int i;
+
+ if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
+ if (prm->cmd->se_cmd.scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
+ "Skipping EXPLICIT_CONFORM and "
+ "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
+ "non GOOD status\n");
+ goto skip_explict_conf;
+ }
+ ctio->u.status1.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+skip_explict_conf:
+ ctio->u.status1.flags &=
+ ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ctio->u.status1.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ ctio->u.status1.scsi_status |=
+ __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
+ ctio->u.status1.sense_length =
+ cpu_to_le16(prm->sense_buffer_len);
+ for (i = 0; i < prm->sense_buffer_len/4; i++)
+ ((uint32_t *)ctio->u.status1.sense_data)[i] =
+ cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
+#if 0
+ if (unlikely((prm->sense_buffer_len % 4) != 0)) {
+ static int q;
+ if (q < 10) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04f,
+ "qla_target(%d): %d bytes of sense "
+ "lost", prm->tgt->ha->vp_idx,
+ prm->sense_buffer_len % 4);
+ q++;
+ }
+ }
+#endif
+ } else {
+ ctio->u.status1.flags &=
+ ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
+ ctio->u.status1.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
+ ctio->u.status1.sense_length = 0;
+ memset(ctio->u.status1.sense_data, 0,
+ sizeof(ctio->u.status1.sense_data));
+ }
+
+ /* Sense with len > 24, is it possible ??? */
+}
+
+/*
+ * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
+ * QLA_TGT_XMIT_STATUS for >= 24xx silicon
+ */
+int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
+ uint8_t scsi_status)
+{
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct ctio7_to_24xx *pkt;
+ struct qla_tgt_prm prm;
+ uint32_t full_req_cnt = 0;
+ unsigned long flags = 0;
+ int res;
+
+ memset(&prm, 0, sizeof(prm));
+ qlt_check_srr_debug(cmd, &xmit_type);
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
+ "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
+ "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
+ 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
+
+ res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
+ &full_req_cnt);
+ if (unlikely(res != 0)) {
+ if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
+ return 0;
+
+ return res;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Does F/W have an IOCBs for this request */
+ res = qlt_check_reserve_free_req(vha, full_req_cnt);
+ if (unlikely(res))
+ goto out_unmap_unlock;
+
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ if (unlikely(res != 0))
+ goto out_unmap_unlock;
+
+
+ pkt = (struct ctio7_to_24xx *)prm.pkt;
+
+ if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
+ pkt->u.status0.flags |=
+ __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
+ CTIO7_FLAGS_STATUS_MODE_0);
+
+ qlt_load_data_segments(&prm, vha);
+
+ if (prm.add_status_pkt == 0) {
+ if (xmit_type & QLA_TGT_XMIT_STATUS) {
+ pkt->u.status0.scsi_status =
+ cpu_to_le16(prm.rq_result);
+ pkt->u.status0.residual =
+ cpu_to_le32(prm.residual);
+ pkt->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_SEND_STATUS);
+ if (qlt_need_explicit_conf(ha, cmd, 0)) {
+ pkt->u.status0.flags |=
+ __constant_cpu_to_le16(
+ CTIO7_FLAGS_EXPLICIT_CONFORM |
+ CTIO7_FLAGS_CONFORM_REQ);
+ }
+ }
+
+ } else {
+ /*
+ * We have already made sure that there is sufficient
+ * amount of request entries to not drop HW lock in
+ * req_pkt().
+ */
+ struct ctio7_to_24xx *ctio =
+ (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe019,
+ "Building additional status packet\n");
+
+ memcpy(ctio, pkt, sizeof(*ctio));
+ ctio->entry_count = 1;
+ ctio->dseg_count = 0;
+ ctio->u.status1.flags &= ~__constant_cpu_to_le16(
+ CTIO7_FLAGS_DATA_IN);
+
+ /* Real finish is ctio_m1's finish */
+ pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
+ pkt->u.status0.flags |= __constant_cpu_to_le16(
+ CTIO7_FLAGS_DONT_RET_CTIO);
+ qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
+ &prm);
+ pr_debug("Status CTIO7: %p\n", ctio);
+ }
+ } else
+ qlt_24xx_init_ctio_to_isp(pkt, &prm);
+
+
+ cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01a,
+ "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
+ pkt, scsi_status);
+
+ qla2x00_start_iocbs(vha, vha->req);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return 0;
+
+out_unmap_unlock:
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(qlt_xmit_response);
+
+int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
+{
+ struct ctio7_to_24xx *pkt;
+ struct scsi_qla_host *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = cmd->tgt;
+ struct qla_tgt_prm prm;
+ unsigned long flags;
+ int res = 0;
+
+ memset(&prm, 0, sizeof(prm));
+ prm.cmd = cmd;
+ prm.tgt = tgt;
+ prm.sg = NULL;
+ prm.req_cnt = 1;
+
+ /* Send marker if required */
+ if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
+ return -EIO;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
+ (int)vha->vp_idx);
+
+ /* Calculate number of entries and segments required */
+ if (qlt_pci_map_calc_cnt(&prm) != 0)
+ return -EAGAIN;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Does F/W have an IOCBs for this request */
+ res = qlt_check_reserve_free_req(vha, prm.req_cnt);
+ if (res != 0)
+ goto out_unlock_free_unmap;
+
+ res = qlt_24xx_build_ctio_pkt(&prm, vha);
+ if (unlikely(res != 0))
+ goto out_unlock_free_unmap;
+ pkt = (struct ctio7_to_24xx *)prm.pkt;
+ pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
+ CTIO7_FLAGS_STATUS_MODE_0);
+ qlt_load_data_segments(&prm, vha);
+
+ cmd->state = QLA_TGT_STATE_NEED_DATA;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+
+out_unlock_free_unmap:
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return res;
+}
+EXPORT_SYMBOL(qlt_rdy_to_xfer);
+
+/* If hardware_lock held on entry, might drop it, then reaquire */
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
+static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd,
+ struct atio_from_isp *atio)
+{
+ struct ctio7_to_24xx *ctio24;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ int ret = 0;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (pkt == NULL) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe050,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet\n", vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+
+ if (cmd != NULL) {
+ if (cmd->state < QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe051,
+ "qla_target(%d): Terminating cmd %p with "
+ "incorrect state %d\n", vha->vp_idx, cmd,
+ cmd->state);
+ } else
+ ret = 1;
+ }
+
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ ctio24 = (struct ctio7_to_24xx *)pkt;
+ ctio24->entry_type = CTIO_TYPE7;
+ ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
+ ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->vp_index = vha->vp_idx;
+ ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
+ CTIO7_FLAGS_TERMINATE);
+ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+
+ /* Most likely, it isn't needed */
+ ctio24->u.status1.residual = get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]);
+ if (ctio24->u.status1.residual != 0)
+ ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+ qla2x00_start_iocbs(vha, vha->req);
+ return ret;
+}
+
+static void qlt_send_term_exchange(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+{
+ unsigned long flags;
+ int rc;
+
+ if (qlt_issue_marker(vha, ha_locked) < 0)
+ return;
+
+ if (ha_locked) {
+ rc = __qlt_send_term_exchange(vha, cmd, atio);
+ goto done;
+ }
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ rc = __qlt_send_term_exchange(vha, cmd, atio);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+done:
+ if (rc == 1) {
+ if (!ha_locked && !in_interrupt())
+ msleep(250); /* just in case */
+
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ }
+}
+
+void qlt_free_cmd(struct qla_tgt_cmd *cmd)
+{
+ BUG_ON(cmd->sg_mapped);
+
+ if (unlikely(cmd->free_sg))
+ kfree(cmd->sg);
+ kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+}
+EXPORT_SYMBOL(qlt_free_cmd);
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
+ struct qla_tgt_cmd *cmd, void *ctio)
+{
+ struct qla_tgt_srr_ctio *sc;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_srr_imm *imm;
+
+ tgt->ctio_srr_id++;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
+ "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
+
+ if (!ctio) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
+ "qla_target(%d): SRR CTIO, but ctio is NULL\n",
+ vha->vp_idx);
+ return -EINVAL;
+ }
+
+ sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
+ if (sc != NULL) {
+ sc->cmd = cmd;
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ sc->srr_id = tgt->ctio_srr_id;
+ list_add_tail(&sc->srr_list_entry,
+ &tgt->srr_ctio_list);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
+ "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+ int found = 0;
+ list_for_each_entry(imm, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (imm->srr_id == sc->srr_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
+ "Scheduling srr work\n");
+ schedule_work(&tgt->srr_work);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
+ "qla_target(%d): imm_srr_id "
+ "== ctio_srr_id (%d), but there is no "
+ "corresponding SRR IMM, deleting CTIO "
+ "SRR %p\n", vha->vp_idx,
+ tgt->ctio_srr_id, sc);
+ list_del(&sc->srr_list_entry);
+ spin_unlock(&tgt->srr_lock);
+
+ kfree(sc);
+ return -EINVAL;
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ } else {
+ struct qla_tgt_srr_imm *ti;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
+ "qla_target(%d): Unable to allocate SRR CTIO entry\n",
+ vha->vp_idx);
+ spin_lock(&tgt->srr_lock);
+ list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (imm->srr_id == tgt->ctio_srr_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
+ "IMM SRR %p deleted (id %d)\n",
+ imm, imm->srr_id);
+ list_del(&imm->srr_list_entry);
+ qlt_reject_free_srr_imm(vha, imm, 1);
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
+ struct qla_tgt_cmd *cmd, uint32_t status)
+{
+ int term = 0;
+
+ if (ctio != NULL) {
+ struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
+ term = !(c->flags &
+ __constant_cpu_to_le16(OF_TERM_EXCH));
+ } else
+ term = 1;
+
+ if (term)
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+
+ return term;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
+ uint32_t handle)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ handle--;
+ if (ha->tgt.cmds[handle] != NULL) {
+ struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
+ ha->tgt.cmds[handle] = NULL;
+ return cmd;
+ } else
+ return NULL;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+ uint32_t handle, void *ctio)
+{
+ struct qla_tgt_cmd *cmd = NULL;
+
+ /* Clear out internal marks */
+ handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
+ CTIO_INTERMEDIATE_HANDLE_MARK);
+
+ if (handle != QLA_TGT_NULL_HANDLE) {
+ if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
+ "SKIP_HANDLE CTIO\n");
+ return NULL;
+ }
+ /* handle-1 is actually used */
+ if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe052,
+ "qla_target(%d): Wrong handle %x received\n",
+ vha->vp_idx, handle);
+ return NULL;
+ }
+ cmd = qlt_get_cmd(vha, handle);
+ if (unlikely(cmd == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe053,
+ "qla_target(%d): Suspicious: unable to "
+ "find the command with handle %x\n", vha->vp_idx,
+ handle);
+ return NULL;
+ }
+ } else if (ctio != NULL) {
+ /* We can't get loop ID from CTIO7 */
+ ql_dbg(ql_dbg_tgt, vha, 0xe054,
+ "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
+ "support NULL handles\n", vha->vp_idx);
+ return NULL;
+ }
+
+ return cmd;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
+ uint32_t status, void *ctio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct se_cmd *se_cmd;
+ struct target_core_fabric_ops *tfo;
+ struct qla_tgt_cmd *cmd;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe01e,
+ "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
+ vha->vp_idx, ctio, status, handle);
+
+ if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
+ /* That could happen only in case of an error/reset/abort */
+ if (status != CTIO_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
+ "Intermediate CTIO received"
+ " (status %x)\n", status);
+ }
+ return;
+ }
+
+ cmd = qlt_ctio_to_cmd(vha, handle, ctio);
+ if (cmd == NULL)
+ return;
+
+ se_cmd = &cmd->se_cmd;
+ tfo = se_cmd->se_tfo;
+
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(vha, cmd);
+
+ if (unlikely(status != CTIO_SUCCESS)) {
+ switch (status & 0xFFFF) {
+ case CTIO_LIP_RESET:
+ case CTIO_TARGET_RESET:
+ case CTIO_ABORTED:
+ case CTIO_TIMEOUT:
+ case CTIO_INVALID_RX_ID:
+ /* They are OK */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
+ "qla_target(%d): CTIO with "
+ "status %#x received, state %x, se_cmd %p, "
+ "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
+ "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
+ status, cmd->state, se_cmd);
+ break;
+
+ case CTIO_PORT_LOGGED_OUT:
+ case CTIO_PORT_UNAVAILABLE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
+ "qla_target(%d): CTIO with PORT LOGGED "
+ "OUT (29) or PORT UNAVAILABLE (28) status %x "
+ "received (state %x, se_cmd %p)\n", vha->vp_idx,
+ status, cmd->state, se_cmd);
+ break;
+
+ case CTIO_SRR_RECEIVED:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
+ "qla_target(%d): CTIO with SRR_RECEIVED"
+ " status %x received (state %x, se_cmd %p)\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
+ break;
+ else
+ return;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
+ "qla_target(%d): CTIO with error status "
+ "0x%x received (state %x, se_cmd %p\n",
+ vha->vp_idx, status, cmd->state, se_cmd);
+ break;
+ }
+
+ if (cmd->state != QLA_TGT_STATE_NEED_DATA)
+ if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
+ return;
+ }
+
+ if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+ } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ int rx_status = 0;
+
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+
+ if (unlikely(status != CTIO_SUCCESS))
+ rx_status = -EIO;
+ else
+ cmd->write_data_transferred = 1;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe020,
+ "Data received, context %x, rx_status %d\n",
+ 0x0, rx_status);
+
+ ha->tgt.tgt_ops->handle_data(cmd);
+ return;
+ } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
+ "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
+ "qla_target(%d): A command in state (%d) should "
+ "not return a CTIO complete\n", vha->vp_idx, cmd->state);
+ }
+
+ if (unlikely(status != CTIO_SUCCESS)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
+ dump_stack();
+ }
+
+ ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+ if (likely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe021,
+ "CTIO, but target mode not enabled"
+ " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
+ return;
+ }
+
+ tgt->irq_cmd_count++;
+ qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
+ tgt->irq_cmd_count--;
+}
+
+static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
+ uint8_t task_codes)
+{
+ int fcp_task_attr;
+
+ switch (task_codes) {
+ case ATIO_SIMPLE_QUEUE:
+ fcp_task_attr = MSG_SIMPLE_TAG;
+ break;
+ case ATIO_HEAD_OF_QUEUE:
+ fcp_task_attr = MSG_HEAD_TAG;
+ break;
+ case ATIO_ORDERED_QUEUE:
+ fcp_task_attr = MSG_ORDERED_TAG;
+ break;
+ case ATIO_ACA_QUEUE:
+ fcp_task_attr = MSG_ACA_TAG;
+ break;
+ case ATIO_UNTAGGED:
+ fcp_task_attr = MSG_SIMPLE_TAG;
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
+ "qla_target: unknown task code %x, use ORDERED instead\n",
+ task_codes);
+ fcp_task_attr = MSG_ORDERED_TAG;
+ break;
+ }
+
+ return fcp_task_attr;
+}
+
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
+ uint8_t *);
+/*
+ * Process context for I/O path into tcm_qla2xxx code
+ */
+static void qlt_do_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ scsi_qla_host_t *vha = cmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_sess *sess = NULL;
+ struct atio_from_isp *atio = &cmd->atio;
+ unsigned char *cdb;
+ unsigned long flags;
+ uint32_t data_length;
+ int ret, fcp_task_attr, data_dir, bidi = 0;
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ atio->u.isp24.fcp_hdr.s_id);
+ /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
+ if (sess)
+ kref_get(&sess->se_sess->sess_kref);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (unlikely(!sess)) {
+ uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
+ "qla_target(%d): Unable to find wwn login"
+ " (s_id %x:%x:%x), trying to create it manually\n",
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
+
+ if (atio->u.raw.entry_count > 1) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
+ "Dropping multy entry cmd %p\n", cmd);
+ goto out_term;
+ }
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has an extra creation ref. */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ if (!sess)
+ goto out_term;
+ }
+
+ cmd->sess = sess;
+ cmd->loop_id = sess->loop_id;
+ cmd->conf_compl_supported = sess->conf_compl_supported;
+
+ cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
+ cmd->tag = atio->u.isp24.exchange_addr;
+ cmd->unpacked_lun = scsilun_to_int(
+ (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
+
+ if (atio->u.isp24.fcp_cmnd.rddata &&
+ atio->u.isp24.fcp_cmnd.wrdata) {
+ bidi = 1;
+ data_dir = DMA_TO_DEVICE;
+ } else if (atio->u.isp24.fcp_cmnd.rddata)
+ data_dir = DMA_FROM_DEVICE;
+ else if (atio->u.isp24.fcp_cmnd.wrdata)
+ data_dir = DMA_TO_DEVICE;
+ else
+ data_dir = DMA_NONE;
+
+ fcp_task_attr = qlt_get_fcp_task_attr(vha,
+ atio->u.isp24.fcp_cmnd.task_attr);
+ data_length = be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]));
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe022,
+ "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
+ cmd, cmd->unpacked_lun, cmd->tag);
+
+ ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
+ fcp_task_attr, data_dir, bidi);
+ if (ret != 0)
+ goto out_term;
+ /*
+ * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
+ */
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+ /*
+ * cmd has not sent to target yet, so pass NULL as the second
+ * argument to qlt_send_term_exchange() and free the memory here.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+ kmem_cache_free(qla_tgt_cmd_cachep, cmd);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_cmd *cmd;
+
+ if (unlikely(tgt->tgt_stop)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ "New command while device %p is shutting down\n", tgt);
+ return -EFAULT;
+ }
+
+ cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
+ if (!cmd) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+ "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&cmd->cmd_list);
+
+ memcpy(&cmd->atio, atio, sizeof(*atio));
+ cmd->state = QLA_TGT_STATE_NEW;
+ cmd->tgt = ha->tgt.qla_tgt;
+ cmd->vha = vha;
+
+ INIT_WORK(&cmd->work, qlt_do_work);
+ queue_work(qla_tgt_wq, &cmd->work);
+ return 0;
+
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
+ int fn, void *iocb, int flags)
+{
+ struct scsi_qla_host *vha = sess->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ int res;
+ uint8_t tmr_func;
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (!mcmd) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
+ "qla_target(%d): Allocation of management "
+ "command failed, some commands and their data could "
+ "leak\n", vha->vp_idx);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+ mcmd->sess = sess;
+
+ if (iocb) {
+ memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+ sizeof(mcmd->orig_iocb.imm_ntfy));
+ }
+ mcmd->tmr_func = fn;
+ mcmd->flags = flags;
+
+ switch (fn) {
+ case QLA_TGT_CLEAR_ACA:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
+ "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
+ tmr_func = TMR_CLEAR_ACA;
+ break;
+
+ case QLA_TGT_TARGET_RESET:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
+ "qla_target(%d): TARGET_RESET received\n",
+ sess->vha->vp_idx);
+ tmr_func = TMR_TARGET_WARM_RESET;
+ break;
+
+ case QLA_TGT_LUN_RESET:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
+ "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
+ tmr_func = TMR_LUN_RESET;
+ break;
+
+ case QLA_TGT_CLEAR_TS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
+ "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
+ tmr_func = TMR_CLEAR_TASK_SET;
+ break;
+
+ case QLA_TGT_ABORT_TS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
+ "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
+ tmr_func = TMR_ABORT_TASK_SET;
+ break;
+#if 0
+ case QLA_TGT_ABORT_ALL:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
+ "qla_target(%d): Doing ABORT_ALL_TASKS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_ABORT_ALL_SESS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
+ "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_NEXUS_LOSS_SESS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
+ "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
+ sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+
+ case QLA_TGT_NEXUS_LOSS:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
+ "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
+ tmr_func = 0;
+ break;
+#endif
+ default:
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
+ "qla_target(%d): Unknown task mgmt fn 0x%x\n",
+ sess->vha->vp_idx, fn);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -ENOSYS;
+ }
+
+ res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
+ if (res != 0) {
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
+ "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
+ sess->vha->vp_idx, res);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
+{
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt;
+ struct qla_tgt_sess *sess;
+ uint32_t lun, unpacked_lun;
+ int lun_size, fn;
+
+ tgt = ha->tgt.qla_tgt;
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
+ fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ a->u.isp24.fcp_hdr.s_id);
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ if (!sess) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
+ "qla_target(%d): task mgmt fn 0x%x for "
+ "non-existant session\n", vha->vp_idx, fn);
+ return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
+ sizeof(struct atio_from_isp));
+ }
+
+ return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int __qlt_abort_task(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
+{
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ uint32_t lun, unpacked_lun;
+ int rc;
+
+ mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
+ if (mcmd == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
+ "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
+ vha->vp_idx, __func__);
+ return -ENOMEM;
+ }
+ memset(mcmd, 0, sizeof(*mcmd));
+
+ mcmd->sess = sess;
+ memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
+ sizeof(mcmd->orig_iocb.imm_ntfy));
+
+ lun = a->u.isp24.fcp_cmnd.lun;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
+ le16_to_cpu(iocb->u.isp2x.seq_id));
+ if (rc != 0) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
+ "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
+ vha->vp_idx, rc);
+ mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static int qlt_abort_task(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess;
+ int loop_id;
+
+ loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
+
+ sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
+ if (sess == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
+ "qla_target(%d): task abort for unexisting "
+ "session\n", vha->vp_idx);
+ return qlt_sched_sess_work(ha->tgt.qla_tgt,
+ QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
+ }
+
+ return __qlt_abort_task(vha, iocb, sess);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int res = 0;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
+ "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
+ " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
+ iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
+ iocb->u.isp24.status_subcode);
+
+ switch (iocb->u.isp24.status_subcode) {
+ case ELS_PLOGI:
+ case ELS_FLOGI:
+ case ELS_PRLI:
+ case ELS_LOGO:
+ case ELS_PRLO:
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+ break;
+ case ELS_PDISC:
+ case ELS_ADISC:
+ {
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ res = 1; /* send notify ack */
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
+ "qla_target(%d): Unsupported ELS command %x "
+ "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
+ res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
+ break;
+ }
+
+ return res;
+}
+
+static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
+{
+ struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
+ size_t first_offset = 0, rem_offset = offset, tmp = 0;
+ int i, sg_srr_cnt, bufflen = 0;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
+ "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
+ "cmd->sg_cnt: %u, direction: %d\n",
+ cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+
+ /*
+ * FIXME: Reject non zero SRR relative offset until we can test
+ * this code properly.
+ */
+ pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
+ return -1;
+
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
+ "Missing cmd->sg or zero cmd->sg_cnt in"
+ " qla_tgt_set_data_offset\n");
+ return -EINVAL;
+ }
+ /*
+ * Walk the current cmd->sg list until we locate the new sg_srr_start
+ */
+ for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
+ "sg[%d]: %p page: %p, length: %d, offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
+
+ if ((sg->length + tmp) > offset) {
+ first_offset = rem_offset;
+ sg_srr_start = sg;
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
+ "Found matching sg[%d], using %p as sg_srr_start, "
+ "and using first_offset: %zu\n", i, sg,
+ first_offset);
+ break;
+ }
+ tmp += sg->length;
+ rem_offset -= sg->length;
+ }
+
+ if (!sg_srr_start) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
+ "Unable to locate sg_srr_start for offset: %u\n", offset);
+ return -EINVAL;
+ }
+ sg_srr_cnt = (cmd->sg_cnt - i);
+
+ sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
+ if (!sg_srr) {
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
+ "Unable to allocate sgp\n");
+ return -ENOMEM;
+ }
+ sg_init_table(sg_srr, sg_srr_cnt);
+ sgp = &sg_srr[0];
+ /*
+ * Walk the remaining list for sg_srr_start, mapping to the newly
+ * allocated sg_srr taking first_offset into account.
+ */
+ for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
+ if (first_offset) {
+ sg_set_page(sgp, sg_page(sg),
+ (sg->length - first_offset), first_offset);
+ first_offset = 0;
+ } else {
+ sg_set_page(sgp, sg_page(sg), sg->length, 0);
+ }
+ bufflen += sgp->length;
+
+ sgp = sg_next(sgp);
+ if (!sgp)
+ break;
+ }
+
+ cmd->sg = sg_srr;
+ cmd->sg_cnt = sg_srr_cnt;
+ cmd->bufflen = bufflen;
+ cmd->offset += offset;
+ cmd->free_sg = 1;
+
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
+ cmd->sg_cnt);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
+ cmd->bufflen);
+ ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
+ cmd->offset);
+
+ if (cmd->sg_cnt < 0)
+ BUG();
+
+ if (cmd->bufflen < 0)
+ BUG();
+
+ return 0;
+}
+
+static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
+ uint32_t srr_rel_offs, int *xmit_type)
+{
+ int res = 0, rel_offs;
+
+ rel_offs = srr_rel_offs - cmd->offset;
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
+ srr_rel_offs, rel_offs);
+
+ *xmit_type = QLA_TGT_XMIT_ALL;
+
+ if (rel_offs < 0) {
+ ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
+ "qla_target(%d): SRR rel_offs (%d) < 0",
+ cmd->vha->vp_idx, rel_offs);
+ res = -1;
+ } else if (rel_offs == cmd->bufflen)
+ *xmit_type = QLA_TGT_XMIT_STATUS;
+ else if (rel_offs > 0)
+ res = qlt_set_data_offset(cmd, rel_offs);
+
+ return res;
+}
+
+/* No locks, thread context */
+static void qlt_handle_srr(struct scsi_qla_host *vha,
+ struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
+{
+ struct imm_ntfy_from_isp *ntfy =
+ (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_cmd *cmd = sctio->cmd;
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
+ int xmit_type = 0, resp = 0;
+ uint32_t offset;
+ uint16_t srr_ui;
+
+ offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
+ srr_ui = ntfy->u.isp24.srr_ui;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
+ cmd, srr_ui);
+
+ switch (srr_ui) {
+ case SRR_IU_STATUS:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ xmit_type = QLA_TGT_XMIT_STATUS;
+ resp = 1;
+ break;
+ case SRR_IU_DATA_IN:
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
+ "Unable to process SRR_IU_DATA_IN due to"
+ " missing cmd->sg, state: %d\n", cmd->state);
+ dump_stack();
+ goto out_reject;
+ }
+ if (se_cmd->scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe02a,
+ "Rejecting SRR_IU_DATA_IN with non GOOD "
+ "scsi_status\n");
+ goto out_reject;
+ }
+ cmd->bufflen = se_cmd->data_length;
+
+ if (qlt_has_data(cmd)) {
+ if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ resp = 1;
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
+ "qla_target(%d): SRR for in data for cmd "
+ "without them (tag %d, SCSI status %d), "
+ "reject", vha->vp_idx, cmd->tag,
+ cmd->se_cmd.scsi_status);
+ goto out_reject;
+ }
+ break;
+ case SRR_IU_DATA_OUT:
+ if (!cmd->sg || !cmd->sg_cnt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
+ "Unable to process SRR_IU_DATA_OUT due to"
+ " missing cmd->sg\n");
+ dump_stack();
+ goto out_reject;
+ }
+ if (se_cmd->scsi_status != 0) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe02b,
+ "Rejecting SRR_IU_DATA_OUT"
+ " with non GOOD scsi_status\n");
+ goto out_reject;
+ }
+ cmd->bufflen = se_cmd->data_length;
+
+ if (qlt_has_data(cmd)) {
+ if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
+ goto out_reject;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy,
+ 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (xmit_type & QLA_TGT_XMIT_DATA)
+ qlt_rdy_to_xfer(cmd);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
+ "qla_target(%d): SRR for out data for cmd "
+ "without them (tag %d, SCSI status %d), "
+ "reject", vha->vp_idx, cmd->tag,
+ cmd->se_cmd.scsi_status);
+ goto out_reject;
+ }
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
+ "qla_target(%d): Unknown srr_ui value %x",
+ vha->vp_idx, srr_ui);
+ goto out_reject;
+ }
+
+ /* Transmit response in case of status and data-in cases */
+ if (resp)
+ qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+
+ return;
+
+out_reject:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+ if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ dump_stack();
+ } else
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
+ struct qla_tgt_srr_imm *imm, int ha_locked)
+{
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags = 0;
+
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(imm);
+}
+
+static void qlt_handle_srr_work(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_tgt_srr_ctio *sctio;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
+ tgt);
+
+restart:
+ spin_lock_irqsave(&tgt->srr_lock, flags);
+ list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
+ struct qla_tgt_srr_imm *imm, *i, *ti;
+ struct qla_tgt_cmd *cmd;
+ struct se_cmd *se_cmd;
+
+ imm = NULL;
+ list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
+ srr_list_entry) {
+ if (i->srr_id == sctio->srr_id) {
+ list_del(&i->srr_list_entry);
+ if (imm) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
+ "qla_target(%d): There must be "
+ "only one IMM SRR per CTIO SRR "
+ "(IMM SRR %p, id %d, CTIO %p\n",
+ vha->vp_idx, i, i->srr_id, sctio);
+ qlt_reject_free_srr_imm(tgt->vha, i, 0);
+ } else
+ imm = i;
+ }
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
+ "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
+ sctio->srr_id);
+
+ if (imm == NULL) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
+ "Not found matching IMM for SRR CTIO (id %d)\n",
+ sctio->srr_id);
+ continue;
+ } else
+ list_del(&sctio->srr_list_entry);
+
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+
+ cmd = sctio->cmd;
+ /*
+ * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
+ * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
+ * logic..
+ */
+ cmd->offset = 0;
+ if (cmd->free_sg) {
+ kfree(cmd->sg);
+ cmd->sg = NULL;
+ cmd->free_sg = 0;
+ }
+ se_cmd = &cmd->se_cmd;
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
+ "SRR cmd %p (se_cmd %p, tag %d, op %x), "
+ "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
+ se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+
+ qlt_handle_srr(vha, sctio, imm);
+
+ kfree(imm);
+ kfree(sctio);
+ goto restart;
+ }
+ spin_unlock_irqrestore(&tgt->srr_lock, flags);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_tgt_srr_imm *imm;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ struct qla_tgt_srr_ctio *sctio;
+
+ tgt->imm_srr_id++;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+ vha->vp_idx);
+
+ imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
+ if (imm != NULL) {
+ memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
+
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ imm->srr_id = tgt->imm_srr_id;
+ list_add_tail(&imm->srr_list_entry,
+ &tgt->srr_imm_list);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
+ "IMM NTFY SRR %p added (id %d, ui %x)\n",
+ imm, imm->srr_id, iocb->u.isp24.srr_ui);
+ if (tgt->imm_srr_id == tgt->ctio_srr_id) {
+ int found = 0;
+ list_for_each_entry(sctio, &tgt->srr_ctio_list,
+ srr_list_entry) {
+ if (sctio->srr_id == imm->srr_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
+ "Scheduling srr work\n");
+ schedule_work(&tgt->srr_work);
+ } else {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
+ "qla_target(%d): imm_srr_id "
+ "== ctio_srr_id (%d), but there is no "
+ "corresponding SRR CTIO, deleting IMM "
+ "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
+ imm);
+ list_del(&imm->srr_list_entry);
+
+ kfree(imm);
+
+ spin_unlock(&tgt->srr_lock);
+ goto out_reject;
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ } else {
+ struct qla_tgt_srr_ctio *ts;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
+ "qla_target(%d): Unable to allocate SRR IMM "
+ "entry, SRR request will be rejected\n", vha->vp_idx);
+
+ /* IRQ is already OFF */
+ spin_lock(&tgt->srr_lock);
+ list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
+ srr_list_entry) {
+ if (sctio->srr_id == tgt->imm_srr_id) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
+ "CTIO SRR %p deleted (id %d)\n",
+ sctio, sctio->srr_id);
+ list_del(&sctio->srr_list_entry);
+ qlt_send_term_exchange(vha, sctio->cmd,
+ &sctio->cmd->atio, 1);
+ kfree(sctio);
+ }
+ }
+ spin_unlock(&tgt->srr_lock);
+ goto out_reject;
+ }
+
+ return;
+
+out_reject:
+ qlt_send_notify_ack(vha, iocb, 0, 0, 0,
+ NOTIFY_ACK_SRR_FLAGS_REJECT,
+ NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
+ NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
+ struct imm_ntfy_from_isp *iocb)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t add_flags = 0;
+ int send_notify_ack = 1;
+ uint16_t status;
+
+ status = le16_to_cpu(iocb->u.isp2x.status);
+ switch (status) {
+ case IMM_NTFY_LIP_RESET:
+ {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
+ "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
+ vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+
+ if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+ send_notify_ack = 0;
+ break;
+ }
+
+ case IMM_NTFY_LIP_LINK_REINIT:
+ {
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
+ "qla_target(%d): LINK REINIT (loop %#x, "
+ "subcode %x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ }
+ memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
+ tgt->link_reinit_iocb_pending = 1;
+ /*
+ * QLogic requires to wait after LINK REINIT for possible
+ * PDISC or ADISC ELS commands
+ */
+ send_notify_ack = 0;
+ break;
+ }
+
+ case IMM_NTFY_PORT_LOGOUT:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
+ "qla_target(%d): Port logout (loop "
+ "%#x, subcode %x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp24.nport_handle),
+ iocb->u.isp24.status_subcode);
+
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_TPRLO:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
+ "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_PORT_CONFIG:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
+ "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
+ status);
+ if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
+ send_notify_ack = 0;
+ /* The sessions will be cleared in the callback, if needed */
+ break;
+
+ case IMM_NTFY_GLBL_LOGO:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
+ "qla_target(%d): Link failure detected\n",
+ vha->vp_idx);
+ /* I_T nexus loss */
+ if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_IOCB_OVERFLOW:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
+ "qla_target(%d): Cannot provide requested "
+ "capability (IOCB overflowed the immediate notify "
+ "resource count)\n", vha->vp_idx);
+ break;
+
+ case IMM_NTFY_ABORT_TASK:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
+ "qla_target(%d): Abort Task (S %08x I %#x -> "
+ "L %#x)\n", vha->vp_idx,
+ le16_to_cpu(iocb->u.isp2x.seq_id),
+ GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
+ le16_to_cpu(iocb->u.isp2x.lun));
+ if (qlt_abort_task(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_RESOURCE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
+ "qla_target(%d): Out of resources, host %ld\n",
+ vha->vp_idx, vha->host_no);
+ break;
+
+ case IMM_NTFY_MSG_RX:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
+ "qla_target(%d): Immediate notify task %x\n",
+ vha->vp_idx, iocb->u.isp2x.task_flags);
+ if (qlt_handle_task_mgmt(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_ELS:
+ if (qlt_24xx_handle_els(vha, iocb) == 0)
+ send_notify_ack = 0;
+ break;
+
+ case IMM_NTFY_SRR:
+ qlt_prepare_srr_imm(vha, iocb);
+ send_notify_ack = 0;
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
+ "qla_target(%d): Received unknown immediate "
+ "notify status %x\n", vha->vp_idx, status);
+ break;
+ }
+
+ if (send_notify_ack)
+ qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ * This function sends busy to ISP 2xxx or 24xx.
+ */
+static void qlt_send_busy(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio, uint16_t status)
+{
+ struct ctio7_to_24xx *ctio24;
+ struct qla_hw_data *ha = vha->hw;
+ request_t *pkt;
+ struct qla_tgt_sess *sess = NULL;
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ atio->u.isp24.fcp_hdr.s_id);
+ if (!sess) {
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+ return;
+ }
+ /* Sending marker isn't necessary, since we called from ISR */
+
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!pkt) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+ "qla_target(%d): %s failed: unable to allocate "
+ "request packet", vha->vp_idx, __func__);
+ return;
+ }
+
+ pkt->entry_count = 1;
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+
+ ctio24 = (struct ctio7_to_24xx *)pkt;
+ ctio24->entry_type = CTIO_TYPE7;
+ ctio24->nport_handle = sess->loop_id;
+ ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio24->vp_index = vha->vp_idx;
+ ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio24->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
+ __constant_cpu_to_le16(
+ CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
+ CTIO7_FLAGS_DONT_RET_CTIO);
+ /*
+ * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
+ * if the explicit conformation is used.
+ */
+ ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+ ctio24->u.status1.scsi_status = cpu_to_le16(status);
+ ctio24->u.status1.residual = get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len]);
+ if (ctio24->u.status1.residual != 0)
+ ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
+
+ qla2x00_start_iocbs(vha, vha->req);
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
+ struct atio_from_isp *atio)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ int rc;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+ "ATIO pkt, but no tgt (ha %p)", ha);
+ return;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe02c,
+ "qla_target(%d): ATIO pkt %p: type %02x count %02x",
+ vha->vp_idx, atio, atio->u.raw.entry_type,
+ atio->u.raw.entry_count);
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (atio->u.raw.entry_type) {
+ case ATIO_TYPE7:
+ ql_dbg(ql_dbg_tgt, vha, 0xe02d,
+ "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
+ "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
+ vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
+ atio->u.isp24.fcp_cmnd.rddata,
+ atio->u.isp24.fcp_cmnd.wrdata,
+ atio->u.isp24.fcp_cmnd.add_cdb_len,
+ be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[
+ atio->u.isp24.fcp_cmnd.add_cdb_len])),
+ atio->u.isp24.fcp_hdr.s_id[0],
+ atio->u.isp24.fcp_hdr.s_id[1],
+ atio->u.isp24.fcp_hdr.s_id[2]);
+
+ if (unlikely(atio->u.isp24.exchange_addr ==
+ ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe058,
+ "qla_target(%d): ATIO_TYPE7 "
+ "received with UNKNOWN exchange address, "
+ "sending QUEUE_FULL\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
+ break;
+ }
+ if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+ rc = qlt_handle_cmd_for_atio(vha, atio);
+ else
+ rc = qlt_handle_task_mgmt(vha, atio);
+ if (unlikely(rc != 0)) {
+ if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+ qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+#else
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+ } else {
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe059,
+ "qla_target: Unable to send "
+ "command to target for req, "
+ "ignoring.\n");
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05a,
+ "qla_target(%d): Unable to send "
+ "command to target, sending BUSY "
+ "status.\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, SAM_STAT_BUSY);
+ }
+ }
+ }
+ break;
+
+ case IMMED_NOTIFY_TYPE:
+ {
+ if (unlikely(atio->u.isp2x.entry_status != 0)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05b,
+ "qla_target(%d): Received ATIO packet %x "
+ "with error status %x\n", vha->vp_idx,
+ atio->u.raw.entry_type,
+ atio->u.isp2x.entry_status);
+ break;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
+ qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
+ break;
+ }
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe05c,
+ "qla_target(%d): Received unknown ATIO atio "
+ "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+/* ha->hardware_lock supposed to be held on entry */
+/* called via callback from qla2xxx */
+static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05d,
+ "qla_target(%d): Response pkt %x received, but no "
+ "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
+ return;
+ }
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe02f,
+ "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
+ "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
+ pkt->entry_count, pkt->entry_status, pkt->handle);
+
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (pkt->entry_type) {
+ case CTIO_TYPE7:
+ {
+ struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
+ vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case ACCEPT_TGT_IO_TYPE:
+ {
+ struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
+ int rc;
+ ql_dbg(ql_dbg_tgt, vha, 0xe031,
+ "ACCEPT_TGT_IO instance %d status %04x "
+ "lun %04x read/write %d data_length %04x "
+ "target_id %02x rx_id %04x\n ", vha->vp_idx,
+ le16_to_cpu(atio->u.isp2x.status),
+ le16_to_cpu(atio->u.isp2x.lun),
+ atio->u.isp2x.execution_codes,
+ le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
+ atio), atio->u.isp2x.rx_id);
+ if (atio->u.isp2x.status !=
+ __constant_cpu_to_le16(ATIO_CDB_VALID)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05e,
+ "qla_target(%d): ATIO with error "
+ "status %x received\n", vha->vp_idx,
+ le16_to_cpu(atio->u.isp2x.status));
+ break;
+ }
+ ql_dbg(ql_dbg_tgt, vha, 0xe032,
+ "FCP CDB: 0x%02x, sizeof(cdb): %lu",
+ atio->u.isp2x.cdb[0], (unsigned long
+ int)sizeof(atio->u.isp2x.cdb));
+
+ rc = qlt_handle_cmd_for_atio(vha, atio);
+ if (unlikely(rc != 0)) {
+ if (rc == -ESRCH) {
+#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
+ qlt_send_busy(vha, atio, 0);
+#else
+ qlt_send_term_exchange(vha, NULL, atio, 1);
+#endif
+ } else {
+ if (tgt->tgt_stop) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe05f,
+ "qla_target: Unable to send "
+ "command to target, sending TERM "
+ "EXCHANGE for rsp\n");
+ qlt_send_term_exchange(vha, NULL,
+ atio, 1);
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe060,
+ "qla_target(%d): Unable to send "
+ "command to target, sending BUSY "
+ "status\n", vha->vp_idx);
+ qlt_send_busy(vha, atio, 0);
+ }
+ }
+ }
+ }
+ break;
+
+ case CONTINUE_TGT_IO_TYPE:
+ {
+ struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe033,
+ "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case CTIO_A64_TYPE:
+ {
+ struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
+ vha->vp_idx);
+ qlt_do_ctio_completion(vha, entry->handle,
+ le16_to_cpu(entry->status)|(pkt->entry_status << 16),
+ entry);
+ break;
+ }
+
+ case IMMED_NOTIFY_TYPE:
+ ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
+ qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
+ break;
+
+ case NOTIFY_ACK_TYPE:
+ if (tgt->notify_ack_expected > 0) {
+ struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe036,
+ "NOTIFY_ACK seq %08x status %x\n",
+ le16_to_cpu(entry->u.isp2x.seq_id),
+ le16_to_cpu(entry->u.isp2x.status));
+ tgt->notify_ack_expected--;
+ if (entry->u.isp2x.status !=
+ __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe061,
+ "qla_target(%d): NOTIFY_ACK "
+ "failed %x\n", vha->vp_idx,
+ le16_to_cpu(entry->u.isp2x.status));
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe062,
+ "qla_target(%d): Unexpected NOTIFY_ACK received\n",
+ vha->vp_idx);
+ }
+ break;
+
+ case ABTS_RECV_24XX:
+ ql_dbg(ql_dbg_tgt, vha, 0xe037,
+ "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
+ qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
+ break;
+
+ case ABTS_RESP_24XX:
+ if (tgt->abts_resp_expected > 0) {
+ struct abts_resp_from_24xx_fw *entry =
+ (struct abts_resp_from_24xx_fw *)pkt;
+ ql_dbg(ql_dbg_tgt, vha, 0xe038,
+ "ABTS_RESP_24XX: compl_status %x\n",
+ entry->compl_status);
+ tgt->abts_resp_expected--;
+ if (le16_to_cpu(entry->compl_status) !=
+ ABTS_RESP_COMPL_SUCCESS) {
+ if ((entry->error_subcode1 == 0x1E) &&
+ (entry->error_subcode2 == 0)) {
+ /*
+ * We've got a race here: aborted
+ * exchange not terminated, i.e.
+ * response for the aborted command was
+ * sent between the abort request was
+ * received and processed.
+ * Unfortunately, the firmware has a
+ * silly requirement that all aborted
+ * exchanges must be explicitely
+ * terminated, otherwise it refuses to
+ * send responses for the abort
+ * requests. So, we have to
+ * (re)terminate the exchange and retry
+ * the abort response.
+ */
+ qlt_24xx_retry_term_exchange(vha,
+ entry);
+ } else
+ ql_dbg(ql_dbg_tgt, vha, 0xe063,
+ "qla_target(%d): ABTS_RESP_24XX "
+ "failed %x (subcode %x:%x)",
+ vha->vp_idx, entry->compl_status,
+ entry->error_subcode1,
+ entry->error_subcode2);
+ }
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe064,
+ "qla_target(%d): Unexpected ABTS_RESP_24XX "
+ "received\n", vha->vp_idx);
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt, vha, 0xe065,
+ "qla_target(%d): Received unknown response pkt "
+ "type %x\n", vha->vp_idx, pkt->entry_type);
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
+ */
+void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
+ uint16_t *mailbox)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ int login_code;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe039,
+ "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
+ vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
+ ha->operating_mode, ha->current_topology);
+
+ if (!ha->tgt.tgt_ops)
+ return;
+
+ if (unlikely(tgt == NULL)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe03a,
+ "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
+ return;
+ }
+
+ if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
+ IS_QLA2100(ha))
+ return;
+ /*
+ * In tgt_stop mode we also should allow all requests to pass.
+ * Otherwise, some commands can stuck.
+ */
+
+ tgt->irq_cmd_count++;
+
+ switch (code) {
+ case MBA_RESET: /* Reset */
+ case MBA_SYSTEM_ERR: /* System Error */
+ case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
+ case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
+ "qla_target(%d): System error async event %#x "
+ "occured", vha->vp_idx, code);
+ break;
+ case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MBA_LOOP_UP:
+ {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
+ "qla_target(%d): Async LOOP_UP occured "
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ if (tgt->link_reinit_iocb_pending) {
+ qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
+ 0, 0, 0, 0, 0, 0);
+ tgt->link_reinit_iocb_pending = 0;
+ }
+ break;
+ }
+
+ case MBA_LIP_OCCURRED:
+ case MBA_LOOP_DOWN:
+ case MBA_LIP_RESET:
+ case MBA_RSCN_UPDATE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
+ "qla_target(%d): Async event %#x occured "
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ break;
+
+ case MBA_PORT_UPDATE:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
+ "qla_target(%d): Port update async event %#x "
+ "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+ "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+ login_code = le16_to_cpu(mailbox[2]);
+ if (login_code == 0x4)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
+ "Async MB 2: Got PLOGI Complete\n");
+ else if (login_code == 0x7)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
+ "Async MB 2: Port Logged Out\n");
+ break;
+
+ default:
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
+ "qla_target(%d): Async event %#x occured: "
+ "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+ break;
+ }
+
+ tgt->irq_cmd_count--;
+}
+
+static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
+ uint16_t loop_id)
+{
+ fc_port_t *fcport;
+ int rc;
+
+ fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
+ if (!fcport) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
+ "qla_target(%d): Allocation of tmp FC port failed",
+ vha->vp_idx);
+ return NULL;
+ }
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
+
+ fcport->loop_id = loop_id;
+
+ rc = qla2x00_get_port_database(vha, fcport, 0);
+ if (rc != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
+ "qla_target(%d): Failed to retrieve fcport "
+ "information -- get_port_database() returned %x "
+ "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
+ kfree(fcport);
+ return NULL;
+ }
+
+ return fcport;
+}
+
+/* Must be called under tgt_mutex */
+static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
+ uint8_t *s_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ fc_port_t *fcport = NULL;
+ int rc, global_resets;
+ uint16_t loop_id = 0;
+
+retry:
+ global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+
+ rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
+ if (rc != 0) {
+ if ((s_id[0] == 0xFF) &&
+ (s_id[1] == 0xFC)) {
+ /*
+ * This is Domain Controller, so it should be
+ * OK to drop SCSI commands from it.
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
+ "Unable to find initiator with S_ID %x:%x:%x",
+ s_id[0], s_id[1], s_id[2]);
+ } else
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
+ "qla_target(%d): Unable to find "
+ "initiator with S_ID %x:%x:%x",
+ vha->vp_idx, s_id[0], s_id[1],
+ s_id[2]);
+ return NULL;
+ }
+
+ fcport = qlt_get_port_database(vha, loop_id);
+ if (!fcport)
+ return NULL;
+
+ if (global_resets !=
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
+ "qla_target(%d): global reset during session discovery "
+ "(counter was %d, new %d), retrying", vha->vp_idx,
+ global_resets,
+ atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+ goto retry;
+ }
+
+ sess = qlt_create_sess(vha, fcport, true);
+
+ kfree(fcport);
+ return sess;
+}
+
+static void qlt_abort_work(struct qla_tgt *tgt,
+ struct qla_tgt_sess_work_param *prm)
+{
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ uint32_t be_s_id;
+ uint8_t s_id[3];
+ int rc;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
+ s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
+ s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
+
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
+ (unsigned char *)&be_s_id);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has got an extra creation ref */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!sess)
+ goto out_term;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+ if (rc != 0)
+ goto out_term;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_tmr_work(struct qla_tgt *tgt,
+ struct qla_tgt_sess_work_param *prm)
+{
+ struct atio_from_isp *a = &prm->tm_iocb2;
+ struct scsi_qla_host *vha = tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt_sess *sess = NULL;
+ unsigned long flags;
+ uint8_t *s_id = NULL; /* to hide compiler warnings */
+ int rc;
+ uint32_t lun, unpacked_lun;
+ int lun_size, fn;
+ void *iocb;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ if (tgt->tgt_stop)
+ goto out_term;
+
+ s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
+ sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
+ if (!sess) {
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ mutex_lock(&ha->tgt.tgt_mutex);
+ sess = qlt_make_local_sess(vha, s_id);
+ /* sess has got an extra creation ref */
+ mutex_unlock(&ha->tgt.tgt_mutex);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!sess)
+ goto out_term;
+ } else {
+ kref_get(&sess->se_sess->sess_kref);
+ }
+
+ iocb = a;
+ lun = a->u.isp24.fcp_cmnd.lun;
+ lun_size = sizeof(lun);
+ fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
+ unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+
+ rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
+ if (rc != 0)
+ goto out_term;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ ha->tgt.tgt_ops->put_sess(sess);
+ return;
+
+out_term:
+ qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+}
+
+static void qlt_sess_work_fn(struct work_struct *work)
+{
+ struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
+ struct scsi_qla_host *vha = tgt->vha;
+ unsigned long flags;
+
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+ while (!list_empty(&tgt->sess_works_list)) {
+ struct qla_tgt_sess_work_param *prm = list_entry(
+ tgt->sess_works_list.next, typeof(*prm),
+ sess_works_list_entry);
+
+ /*
+ * This work can be scheduled on several CPUs at time, so we
+ * must delete the entry to eliminate double processing
+ */
+ list_del(&prm->sess_works_list_entry);
+
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+
+ switch (prm->type) {
+ case QLA_TGT_SESS_WORK_ABORT:
+ qlt_abort_work(tgt, prm);
+ break;
+ case QLA_TGT_SESS_WORK_TM:
+ qlt_tmr_work(tgt, prm);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ spin_lock_irqsave(&tgt->sess_work_lock, flags);
+
+ kfree(prm);
+ }
+ spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
+{
+ struct qla_tgt *tgt;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
+ "Registering target for host %ld(%p)", base_vha->host_no, ha);
+
+ BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+
+ tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
+ "Unable to allocate struct qla_tgt\n");
+ return -ENOMEM;
+ }
+
+ if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
+ base_vha->host->hostt->supported_mode |= MODE_TARGET;
+
+ tgt->ha = ha;
+ tgt->vha = base_vha;
+ init_waitqueue_head(&tgt->waitQ);
+ INIT_LIST_HEAD(&tgt->sess_list);
+ INIT_LIST_HEAD(&tgt->del_sess_list);
+ INIT_DELAYED_WORK(&tgt->sess_del_work,
+ (void (*)(struct work_struct *))qlt_del_sess_work_fn);
+ spin_lock_init(&tgt->sess_work_lock);
+ INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
+ INIT_LIST_HEAD(&tgt->sess_works_list);
+ spin_lock_init(&tgt->srr_lock);
+ INIT_LIST_HEAD(&tgt->srr_ctio_list);
+ INIT_LIST_HEAD(&tgt->srr_imm_list);
+ INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
+ atomic_set(&tgt->tgt_global_resets_count, 0);
+
+ ha->tgt.qla_tgt = tgt;
+
+ ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
+ "qla_target(%d): using 64 Bit PCI addressing",
+ base_vha->vp_idx);
+ tgt->tgt_enable_64bit_addr = 1;
+ /* 3 is reserved */
+ tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
+ tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
+ tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+
+ mutex_lock(&qla_tgt_mutex);
+ list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
+ mutex_unlock(&qla_tgt_mutex);
+
+ return 0;
+}
+
+/* Must be called under tgt_host_action_mutex */
+int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
+{
+ if (!ha->tgt.qla_tgt)
+ return 0;
+
+ mutex_lock(&qla_tgt_mutex);
+ list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+ mutex_unlock(&qla_tgt_mutex);
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
+ vha->host_no, ha);
+ qlt_release(ha->tgt.qla_tgt);
+
+ return 0;
+}
+
+static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
+ unsigned char *b)
+{
+ int i;
+
+ pr_debug("qla2xxx HW vha->node_name: ");
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", vha->node_name[i]);
+ pr_debug("\n");
+ pr_debug("qla2xxx HW vha->port_name: ");
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", vha->port_name[i]);
+ pr_debug("\n");
+
+ pr_debug("qla2xxx passed configfs WWPN: ");
+ put_unaligned_be64(wwpn, b);
+ for (i = 0; i < WWN_SIZE; i++)
+ pr_debug("%02x ", b[i]);
+ pr_debug("\n");
+}
+
+/**
+ * qla_tgt_lport_register - register lport with external module
+ *
+ * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
+ * @wwpn: Passwd FC target WWPN
+ * @callback: lport initialization callback for tcm_qla2xxx code
+ * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
+ */
+int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
+ int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+{
+ struct qla_tgt *tgt;
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+ struct Scsi_Host *host;
+ unsigned long flags;
+ int rc;
+ u8 b[WWN_SIZE];
+
+ mutex_lock(&qla_tgt_mutex);
+ list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
+ vha = tgt->vha;
+ ha = vha->hw;
+
+ host = vha->host;
+ if (!host)
+ continue;
+
+ if (ha->tgt.tgt_ops != NULL)
+ continue;
+
+ if (!(host->hostt->supported_mode & MODE_TARGET))
+ continue;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (host->active_mode & MODE_TARGET) {
+ pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ if (!scsi_host_get(host)) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe068,
+ "Unable to scsi_host_get() for"
+ " qla2xxx scsi_host\n");
+ continue;
+ }
+ qlt_lport_dump(vha, wwpn, b);
+
+ if (memcmp(vha->port_name, b, WWN_SIZE)) {
+ scsi_host_put(host);
+ continue;
+ }
+ /*
+ * Setup passed parameters ahead of invoking callback
+ */
+ ha->tgt.tgt_ops = qla_tgt_ops;
+ ha->tgt.target_lport_ptr = target_lport_ptr;
+ rc = (*callback)(vha);
+ if (rc != 0) {
+ ha->tgt.tgt_ops = NULL;
+ ha->tgt.target_lport_ptr = NULL;
+ }
+ mutex_unlock(&qla_tgt_mutex);
+ return rc;
+ }
+ mutex_unlock(&qla_tgt_mutex);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(qlt_lport_register);
+
+/**
+ * qla_tgt_lport_deregister - Degister lport
+ *
+ * @vha: Registered scsi_qla_host pointer
+ */
+void qlt_lport_deregister(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct Scsi_Host *sh = vha->host;
+ /*
+ * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
+ */
+ ha->tgt.target_lport_ptr = NULL;
+ ha->tgt.tgt_ops = NULL;
+ /*
+ * Release the Scsi_Host reference for the underlying qla2xxx host
+ */
+ scsi_host_put(sh);
+}
+EXPORT_SYMBOL(qlt_lport_deregister);
+
+/* Must be called under HW lock */
+void qlt_set_mode(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ switch (ql2x_ini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->host->active_mode = MODE_TARGET;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ vha->host->active_mode |= MODE_TARGET;
+ break;
+ default:
+ break;
+ }
+
+ if (ha->tgt.ini_mode_force_reverse)
+ qla_reverse_ini_mode(vha);
+}
+
+/* Must be called under HW lock */
+void qlt_clear_mode(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ switch (ql2x_ini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ vha->host->active_mode = MODE_UNKNOWN;
+ break;
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->host->active_mode = MODE_INITIATOR;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ vha->host->active_mode &= ~MODE_TARGET;
+ break;
+ default:
+ break;
+ }
+
+ if (ha->tgt.ini_mode_force_reverse)
+ qla_reverse_ini_mode(vha);
+}
+
+/*
+ * qla_tgt_enable_vha - NO LOCK HELD
+ *
+ * host_reset, bring up w/ Target Mode Enabled
+ */
+void
+qlt_enable_vha(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ unsigned long flags;
+
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe069,
+ "Unable to locate qla_tgt pointer from"
+ " struct qla_hw_data\n");
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tgt->tgt_stopped = 0;
+ qlt_set_mode(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+}
+EXPORT_SYMBOL(qlt_enable_vha);
+
+/*
+ * qla_tgt_disable_vha - NO LOCK HELD
+ *
+ * Disable Target Mode and reset the adapter
+ */
+void
+qlt_disable_vha(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_tgt *tgt = ha->tgt.qla_tgt;
+ unsigned long flags;
+
+ if (!tgt) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe06a,
+ "Unable to locate qla_tgt pointer from"
+ " struct qla_hw_data\n");
+ dump_stack();
+ return;
+ }
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ qlt_clear_mode(vha);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ qla2x00_wait_for_hba_online(vha);
+}
+
+/*
+ * Called from qla_init.c:qla24xx_vport_create() contex to setup
+ * the target mode specific struct scsi_qla_host and struct qla_hw_data
+ * members.
+ */
+void
+qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
+{
+ if (!qla_tgt_mode_enabled(vha))
+ return;
+
+ mutex_init(&ha->tgt.tgt_mutex);
+ mutex_init(&ha->tgt.tgt_host_action_mutex);
+
+ qlt_clear_mode(vha);
+
+ /*
+ * NOTE: Currently the value is kept the same for <24xx and
+ * >=24xx ISPs. If it is necessary to change it,
+ * the check should be added for specific ISPs,
+ * assigning the value appropriately.
+ */
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+}
+
+void
+qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
+{
+ /*
+ * FC-4 Feature bit 0 indicates target functionality to the name server.
+ */
+ if (qla_tgt_mode_enabled(vha)) {
+ if (qla_ini_mode_enabled(vha))
+ ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
+ else
+ ct_req->req.rff_id.fc4_feature = BIT_0;
+ } else if (qla_ini_mode_enabled(vha)) {
+ ct_req->req.rff_id.fc4_feature = BIT_1;
+ }
+}
+
+/*
+ * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
+ * @ha: HA context
+ *
+ * Beginning of ATIO ring has initialization control block already built
+ * by nvram config routine.
+ *
+ * Returns 0 on success.
+ */
+void
+qlt_init_atio_q_entries(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint16_t cnt;
+ struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
+
+ if (!qla_tgt_mode_enabled(vha))
+ return;
+
+ for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
+ pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt++;
+ }
+
+}
+
+/*
+ * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
+ * @ha: SCSI driver HA context
+ */
+void
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ struct atio_from_isp *pkt;
+ int cnt, i;
+
+ if (!vha->flags.online)
+ return;
+
+ while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+ pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+ cnt = pkt->u.raw.entry_count;
+
+ qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
+
+ for (i = 0; i < cnt; i++) {
+ ha->tgt.atio_ring_index++;
+ if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
+ ha->tgt.atio_ring_index = 0;
+ ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
+ } else
+ ha->tgt.atio_ring_ptr++;
+
+ pkt->u.raw.signature = ATIO_PROCESSED;
+ pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
+ }
+ wmb();
+ }
+
+ /* Adjust ring index */
+ WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+}
+
+void
+qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+/* FIXME: atio_q in/out for ha->mqenable=1..? */
+ if (ha->mqenable) {
+#if 0
+ WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
+ WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
+ RD_REG_DWORD(&reg->isp25mq.atio_q_out);
+#endif
+ } else {
+ /* Setup APTIO registers for target mode */
+ WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
+ WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
+ RD_REG_DWORD(&reg->isp24.atio_q_out);
+ }
+}
+
+void
+qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ /* Enable FC tapes support */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_24xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+int
+qlt_24xx_process_response_error(struct scsi_qla_host *vha,
+ struct sts_entry_24xx *pkt)
+{
+ switch (pkt->entry_type) {
+ case ABTS_RECV_24XX:
+ case ABTS_RESP_24XX:
+ case CTIO_TYPE7:
+ case NOTIFY_ACK_TYPE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+void
+qlt_modify_vp_config(struct scsi_qla_host *vha,
+ struct vp_config_entry_24xx *vpmod)
+{
+ if (qla_tgt_mode_enabled(vha))
+ vpmod->options_idx1 &= ~BIT_5;
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ vpmod->options_idx1 &= ~BIT_4;
+}
+
+void
+qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ mutex_init(&ha->tgt.tgt_mutex);
+ mutex_init(&ha->tgt.tgt_host_action_mutex);
+ qlt_clear_mode(base_vha);
+}
+
+int
+qlt_mem_alloc(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
+ MAX_MULTI_ID_FABRIC, GFP_KERNEL);
+ if (!ha->tgt.tgt_vp_map)
+ return -ENOMEM;
+
+ ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
+ (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
+ &ha->tgt.atio_dma, GFP_KERNEL);
+ if (!ha->tgt.atio_ring) {
+ kfree(ha->tgt.tgt_vp_map);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void
+qlt_mem_free(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.atio_ring) {
+ dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
+ sizeof(struct atio_from_isp), ha->tgt.atio_ring,
+ ha->tgt.atio_dma);
+ }
+ kfree(ha->tgt.tgt_vp_map);
+}
+
+/* vport_slock to be held by the caller */
+void
+qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ switch (cmd) {
+ case SET_VP_IDX:
+ vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
+ break;
+ case SET_AL_PA:
+ vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+ break;
+ case RESET_VP_IDX:
+ vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
+ break;
+ case RESET_AL_PA:
+ vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+ break;
+ }
+}
+
+static int __init qlt_parse_ini_mode(void)
+{
+ if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
+ else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
+ ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
+ else
+ return false;
+
+ return true;
+}
+
+int __init qlt_init(void)
+{
+ int ret;
+
+ if (!qlt_parse_ini_mode()) {
+ ql_log(ql_log_fatal, NULL, 0xe06b,
+ "qlt_parse_ini_mode() failed\n");
+ return -EINVAL;
+ }
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return 0;
+
+ qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
+ sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
+ NULL);
+ if (!qla_tgt_cmd_cachep) {
+ ql_log(ql_log_fatal, NULL, 0xe06c,
+ "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
+ return -ENOMEM;
+ }
+
+ qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
+ sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
+ qla_tgt_mgmt_cmd), 0, NULL);
+ if (!qla_tgt_mgmt_cmd_cachep) {
+ ql_log(ql_log_fatal, NULL, 0xe06d,
+ "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
+ mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
+ if (!qla_tgt_mgmt_cmd_mempool) {
+ ql_log(ql_log_fatal, NULL, 0xe06e,
+ "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
+ ret = -ENOMEM;
+ goto out_mgmt_cmd_cachep;
+ }
+
+ qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
+ if (!qla_tgt_wq) {
+ ql_log(ql_log_fatal, NULL, 0xe06f,
+ "alloc_workqueue for qla_tgt_wq failed\n");
+ ret = -ENOMEM;
+ goto out_cmd_mempool;
+ }
+ /*
+ * Return 1 to signal that initiator-mode is being disabled
+ */
+ return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
+
+out_cmd_mempool:
+ mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+out_mgmt_cmd_cachep:
+ kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+out:
+ kmem_cache_destroy(qla_tgt_cmd_cachep);
+ return ret;
+}
+
+void qlt_exit(void)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ destroy_workqueue(qla_tgt_wq);
+ mempool_destroy(qla_tgt_mgmt_cmd_mempool);
+ kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
+ kmem_cache_destroy(qla_tgt_cmd_cachep);
+}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 0000000..170af15
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1003 @@
+/*
+ * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
+ * Copyright (C) 2004 - 2005 Leonid Stoljar
+ * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
+ * Copyright (C) 2007 - 2010 ID7 Ltd.
+ *
+ * Forward port and refactoring to modern qla2xxx and target/configfs
+ *
+ * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * Additional file for the target driver support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * This is the global def file that is useful for including from the
+ * target portion.
+ */
+
+#ifndef __QLA_TARGET_H
+#define __QLA_TARGET_H
+
+#include "qla_def.h"
+
+/*
+ * Must be changed on any change in any initiator visible interfaces or
+ * data in the target add-on
+ */
+#define QLA2XXX_TARGET_MAGIC 269
+
+/*
+ * Must be changed on any change in any target visible interfaces or
+ * data in the initiator
+ */
+#define QLA2XXX_INITIATOR_MAGIC 57222
+
+#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
+#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
+#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
+
+#define QLA2XXX_INI_MODE_EXCLUSIVE 0
+#define QLA2XXX_INI_MODE_DISABLED 1
+#define QLA2XXX_INI_MODE_ENABLED 2
+
+#define QLA2XXX_COMMAND_COUNT_INIT 250
+#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
+
+/*
+ * Used to mark which completion handles (for RIO Status's) are for CTIO's
+ * vs. regular (non-target) info. This is checked for in
+ * qla2x00_process_response_queue() to see if a handle coming back in a
+ * multi-complete should come to the tgt driver or be handled there by qla2xxx
+ */
+#define CTIO_COMPLETION_HANDLE_MARK BIT_29
+#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#endif
+#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
+
+/* Used to mark CTIO as intermediate */
+#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
+
+#ifndef OF_SS_MODE_0
+/*
+ * ISP target entries - Flags bit definitions.
+ */
+#define OF_SS_MODE_0 0
+#define OF_SS_MODE_1 1
+#define OF_SS_MODE_2 2
+#define OF_SS_MODE_3 3
+
+#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
+#define OF_DATA_IN BIT_6 /* Data in to initiator */
+ /* (data from target to initiator) */
+#define OF_DATA_OUT BIT_7 /* Data out from initiator */
+ /* (data from initiator to target) */
+#define OF_NO_DATA (BIT_7 | BIT_6)
+#define OF_INC_RC BIT_8 /* Increment command resource count */
+#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
+#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
+#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
+#define OF_SSTS BIT_15 /* Send SCSI status */
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD32
+#define QLA_TGT_DATASEGS_PER_CMD32 3
+#define QLA_TGT_DATASEGS_PER_CONT32 7
+#define QLA_TGT_MAX_SG32(ql) \
+ (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
+ QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
+
+#define QLA_TGT_DATASEGS_PER_CMD64 2
+#define QLA_TGT_DATASEGS_PER_CONT64 5
+#define QLA_TGT_MAX_SG64(ql) \
+ (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
+ QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
+#endif
+
+#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
+#define QLA_TGT_DATASEGS_PER_CMD_24XX 1
+#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
+#define QLA_TGT_MAX_SG_24XX(ql) \
+ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
+ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
+#endif
+#endif
+
+#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
+ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
+ : (uint16_t)(iocb)->u.isp2x.target.id.standard)
+
+#ifndef IMMED_NOTIFY_TYPE
+#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
+/*
+ * ISP queue - immediate notify entry structure definition.
+ * This is sent by the ISP to the Target driver.
+ * This IOCB would have report of events sent by the
+ * initiator, that needs to be handled by the target
+ * driver immediately.
+ */
+struct imm_ntfy_from_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t lun;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t status_modifier;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+#define SRR_IU_DATA_IN 0x1
+#define SRR_IU_DATA_OUT 0x5
+#define SRR_IU_STATUS 0x7
+ uint16_t srr_ox_id;
+ uint8_t reserved_2[28];
+ } isp2x;
+ struct {
+ uint32_t reserved;
+ uint16_t nport_handle;
+ uint16_t reserved_2;
+ uint16_t flags;
+#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
+#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t reserved_3;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_ox_id;
+ uint8_t reserved_4[19];
+ uint8_t vp_index;
+ uint32_t reserved_5;
+ uint8_t port_id[3];
+ uint8_t reserved_6;
+ } isp24;
+ } u;
+ uint16_t reserved_7;
+ uint16_t ox_id;
+} __packed;
+#endif
+
+#ifndef NOTIFY_ACK_TYPE
+#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
+/*
+ * ISP queue - notify acknowledge entry structure definition.
+ * This is sent to the ISP from the target driver.
+ */
+struct nack_to_isp {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ union {
+ struct {
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint8_t target_id;
+ uint8_t reserved_1;
+ uint16_t flags;
+ uint16_t resp_code;
+ uint16_t status;
+ uint16_t task_flags;
+ uint16_t seq_id;
+ uint16_t srr_rx_id;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_flags;
+ uint16_t srr_reject_code;
+ uint8_t srr_reject_vendor_uniq;
+ uint8_t srr_reject_code_expl;
+ uint8_t reserved_2[24];
+ } isp2x;
+ struct {
+ uint32_t handle;
+ uint16_t nport_handle;
+ uint16_t reserved_1;
+ uint16_t flags;
+ uint16_t srr_rx_id;
+ uint16_t status;
+ uint8_t status_subcode;
+ uint8_t reserved_3;
+ uint32_t exchange_address;
+ uint32_t srr_rel_offs;
+ uint16_t srr_ui;
+ uint16_t srr_flags;
+ uint8_t reserved_4[19];
+ uint8_t vp_index;
+ uint8_t srr_reject_vendor_uniq;
+ uint8_t srr_reject_code_expl;
+ uint8_t srr_reject_code;
+ uint8_t reserved_5[5];
+ } isp24;
+ } u;
+ uint8_t reserved[2];
+ uint16_t ox_id;
+} __packed;
+#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
+
+#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
+
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
+#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
+
+#define NOTIFY_ACK_SUCCESS 0x01
+#endif
+
+#ifndef ACCEPT_TGT_IO_TYPE
+#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
+#endif
+
+#ifndef CONTINUE_TGT_IO_TYPE
+#define CONTINUE_TGT_IO_TYPE 0x17
+/*
+ * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
+ * This structure is sent to the ISP 2xxx from target driver.
+ */
+struct ctio_to_2xxx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ target_id_t target;
+ uint16_t rx_id;
+ uint16_t flags;
+ uint16_t status;
+ uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
+ uint16_t dseg_count; /* Data segment count. */
+ uint32_t relative_offset;
+ uint32_t residual;
+ uint16_t reserved_1[3];
+ uint16_t scsi_status;
+ uint32_t transfer_length;
+ uint32_t dseg_0_address; /* Data segment 0 address. */
+ uint32_t dseg_0_length; /* Data segment 0 length. */
+ uint32_t dseg_1_address; /* Data segment 1 address. */
+ uint32_t dseg_1_length; /* Data segment 1 length. */
+ uint32_t dseg_2_address; /* Data segment 2 address. */
+ uint32_t dseg_2_length; /* Data segment 2 length. */
+} __packed;
+#define ATIO_PATH_INVALID 0x07
+#define ATIO_CANT_PROV_CAP 0x16
+#define ATIO_CDB_VALID 0x3D
+
+#define ATIO_EXEC_READ BIT_1
+#define ATIO_EXEC_WRITE BIT_0
+#endif
+
+#ifndef CTIO_A64_TYPE
+#define CTIO_A64_TYPE 0x1F
+#define CTIO_SUCCESS 0x01
+#define CTIO_ABORTED 0x02
+#define CTIO_INVALID_RX_ID 0x08
+#define CTIO_TIMEOUT 0x0B
+#define CTIO_LIP_RESET 0x0E
+#define CTIO_TARGET_RESET 0x17
+#define CTIO_PORT_UNAVAILABLE 0x28
+#define CTIO_PORT_LOGGED_OUT 0x29
+#define CTIO_PORT_CONF_CHANGED 0x2A
+#define CTIO_SRR_RECEIVED 0x45
+#endif
+
+#ifndef CTIO_RET_TYPE
+#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
+#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
+
+struct fcp_hdr {
+ uint8_t r_ctl;
+ uint8_t d_id[3];
+ uint8_t cs_ctl;
+ uint8_t s_id[3];
+ uint8_t type;
+ uint8_t f_ctl[3];
+ uint8_t seq_id;
+ uint8_t df_ctl;
+ uint16_t seq_cnt;
+ uint16_t ox_id;
+ uint16_t rx_id;
+ uint32_t parameter;
+} __packed;
+
+struct fcp_hdr_le {
+ uint8_t d_id[3];
+ uint8_t r_ctl;
+ uint8_t s_id[3];
+ uint8_t cs_ctl;
+ uint8_t f_ctl[3];
+ uint8_t type;
+ uint16_t seq_cnt;
+ uint8_t df_ctl;
+ uint8_t seq_id;
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint32_t parameter;
+} __packed;
+
+#define F_CTL_EXCH_CONTEXT_RESP BIT_23
+#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
+#define F_CTL_LAST_SEQ BIT_20
+#define F_CTL_END_SEQ BIT_19
+#define F_CTL_SEQ_INITIATIVE BIT_16
+
+#define R_CTL_BASIC_LINK_SERV 0x80
+#define R_CTL_B_ACC 0x4
+#define R_CTL_B_RJT 0x5
+
+struct atio7_fcp_cmnd {
+ uint64_t lun;
+ uint8_t cmnd_ref;
+ uint8_t task_attr:3;
+ uint8_t reserved:5;
+ uint8_t task_mgmt_flags;
+#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
+#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
+#define FCP_CMND_TASK_MGMT_LU_RESET 4
+#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
+#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
+ uint8_t wrdata:1;
+ uint8_t rddata:1;
+ uint8_t add_cdb_len:6;
+ uint8_t cdb[16];
+ /*
+ * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
+ * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
+ * BUILD_BUG_ON in qlt_init().
+ */
+ uint8_t add_cdb[4];
+ /* uint32_t data_length; */
+} __packed;
+
+/*
+ * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
+ * This is sent from the ISP to the target driver.
+ */
+struct atio_from_isp {
+ union {
+ struct {
+ uint16_t entry_hdr;
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t sys_define_2; /* System defined. */
+ target_id_t target;
+ uint16_t rx_id;
+ uint16_t flags;
+ uint16_t status;
+ uint8_t command_ref;
+ uint8_t task_codes;
+ uint8_t task_flags;
+ uint8_t execution_codes;
+ uint8_t cdb[MAX_CMDSZ];
+ uint32_t data_length;
+ uint16_t lun;
+ uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
+ uint16_t reserved_32[6];
+ uint16_t ox_id;
+ } isp2x;
+ struct {
+ uint16_t entry_hdr;
+ uint8_t fcp_cmnd_len_low;
+ uint8_t fcp_cmnd_len_high:4;
+ uint8_t attr:4;
+ uint32_t exchange_addr;
+#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
+ struct fcp_hdr fcp_hdr;
+ struct atio7_fcp_cmnd fcp_cmnd;
+ } isp24;
+ struct {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t data[58];
+ uint32_t signature;
+#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
+ } raw;
+ } u;
+} __packed;
+
+#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
+
+/*
+ * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
+ * This structure is sent to the ISP 24xx from the target driver.
+ */
+
+struct ctio7_to_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint16_t nport_handle;
+#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t add_flags;
+ uint8_t initiator_id[3];
+ uint8_t reserved;
+ uint32_t exchange_addr;
+ union {
+ struct {
+ uint16_t reserved1;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t scsi_status;
+ uint32_t relative_offset;
+ uint32_t reserved2;
+ uint32_t transfer_length;
+ uint32_t reserved3;
+ /* Data segment 0 address. */
+ uint32_t dseg_0_address[2];
+ /* Data segment 0 length. */
+ uint32_t dseg_0_length;
+ } status0;
+ struct {
+ uint16_t sense_length;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t scsi_status;
+ uint16_t response_len;
+ uint16_t reserved;
+ uint8_t sense_data[24];
+ } status1;
+ } u;
+} __packed;
+
+/*
+ * ISP queue - CTIO type 7 from ISP 24xx to target driver
+ * returned entry structure.
+ */
+struct ctio7_from_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle; /* System defined handle */
+ uint16_t status;
+ uint16_t timeout;
+ uint16_t dseg_count; /* Data segment count. */
+ uint8_t vp_index;
+ uint8_t reserved1[5];
+ uint32_t exchange_address;
+ uint16_t reserved2;
+ uint16_t flags;
+ uint32_t residual;
+ uint16_t ox_id;
+ uint16_t reserved3;
+ uint32_t relative_offset;
+ uint8_t reserved4[24];
+} __packed;
+
+/* CTIO7 flags values */
+#define CTIO7_FLAGS_SEND_STATUS BIT_15
+#define CTIO7_FLAGS_TERMINATE BIT_14
+#define CTIO7_FLAGS_CONFORM_REQ BIT_13
+#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
+#define CTIO7_FLAGS_STATUS_MODE_0 0
+#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
+#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
+#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
+#define CTIO7_FLAGS_DSD_PTR BIT_2
+#define CTIO7_FLAGS_DATA_IN BIT_1
+#define CTIO7_FLAGS_DATA_OUT BIT_0
+
+#define ELS_PLOGI 0x3
+#define ELS_FLOGI 0x4
+#define ELS_LOGO 0x5
+#define ELS_PRLI 0x20
+#define ELS_PRLO 0x21
+#define ELS_TPRLO 0x24
+#define ELS_PDISC 0x50
+#define ELS_ADISC 0x52
+
+/*
+ * ISP queue - ABTS received/response entries structure definition for 24xx.
+ */
+#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
+#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
+
+/*
+ * ISP queue - ABTS received IOCB entry structure definition for 24xx.
+ * The ABTS BLS received from the wire is sent to the
+ * target driver by the ISP 24xx.
+ * The IOCB is placed on the response queue.
+ */
+struct abts_recv_from_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint8_t reserved_1[6];
+ uint16_t nport_handle;
+ uint8_t reserved_2[2];
+ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ uint8_t reserved_4[16];
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+#define ABTS_PARAM_ABORT_SEQ BIT_0
+
+struct ba_acc_le {
+ uint16_t reserved;
+ uint8_t seq_id_last;
+ uint8_t seq_id_valid;
+#define SEQ_ID_VALID 0x80
+#define SEQ_ID_INVALID 0x00
+ uint16_t rx_id;
+ uint16_t ox_id;
+ uint16_t high_seq_cnt;
+ uint16_t low_seq_cnt;
+} __packed;
+
+struct ba_rjt_le {
+ uint8_t vendor_uniq;
+ uint8_t reason_expl;
+ uint8_t reason_code;
+#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
+#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
+ uint8_t reserved;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
+ * The ABTS response to the ABTS received is sent by the
+ * target driver to the ISP 24xx.
+ * The IOCB is placed on the request queue.
+ */
+struct abts_resp_to_24xx {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle;
+ uint16_t reserved_1;
+ uint16_t nport_handle;
+ uint16_t control_flags;
+#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
+ uint8_t vp_index;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ union {
+ struct ba_acc_le ba_acct;
+ struct ba_rjt_le ba_rjt;
+ } __packed payload;
+ uint32_t reserved_4;
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+/*
+ * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
+ * The ABTS response with completion status to the ABTS response
+ * (sent by the target driver to the ISP 24xx) is sent by the
+ * ISP24xx firmware to the target driver.
+ * The IOCB is placed on the response queue.
+ */
+struct abts_resp_from_24xx_fw {
+ uint8_t entry_type; /* Entry type. */
+ uint8_t entry_count; /* Entry count. */
+ uint8_t sys_define; /* System defined. */
+ uint8_t entry_status; /* Entry Status. */
+ uint32_t handle;
+ uint16_t compl_status;
+#define ABTS_RESP_COMPL_SUCCESS 0
+#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
+ uint16_t nport_handle;
+ uint16_t reserved_1;
+ uint8_t reserved_2;
+ uint8_t reserved_3:4;
+ uint8_t sof_type:4;
+ uint32_t exchange_address;
+ struct fcp_hdr_le fcp_hdr_le;
+ uint8_t reserved_4[8];
+ uint32_t error_subcode1;
+#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
+ uint32_t error_subcode2;
+ uint32_t exchange_addr_to_abort;
+} __packed;
+
+/********************************************************************\
+ * Type Definitions used by initiator & target halves
+\********************************************************************/
+
+struct qla_tgt_mgmt_cmd;
+struct qla_tgt_sess;
+
+/*
+ * This structure provides a template of function calls that the
+ * target driver (from within qla_target.c) can issue to the
+ * target module (tcm_qla2xxx).
+ */
+struct qla_tgt_func_tmpl {
+
+ int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
+ unsigned char *, uint32_t, int, int, int);
+ void (*handle_data)(struct qla_tgt_cmd *);
+ int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
+ uint32_t);
+ void (*free_cmd)(struct qla_tgt_cmd *);
+ void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
+ void (*free_session)(struct qla_tgt_sess *);
+
+ int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
+ void *, uint8_t *, uint16_t);
+ struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
+ const uint16_t);
+ struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
+ const uint8_t *);
+ void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
+ void (*put_sess)(struct qla_tgt_sess *);
+ void (*shutdown_sess)(struct qla_tgt_sess *);
+};
+
+int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
+
+#include <target/target_core_base.h>
+
+#define QLA_TGT_TIMEOUT 10 /* in seconds */
+
+#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */
+
+/* Immediate notify status constants */
+#define IMM_NTFY_LIP_RESET 0x000E
+#define IMM_NTFY_LIP_LINK_REINIT 0x000F
+#define IMM_NTFY_IOCB_OVERFLOW 0x0016
+#define IMM_NTFY_ABORT_TASK 0x0020
+#define IMM_NTFY_PORT_LOGOUT 0x0029
+#define IMM_NTFY_PORT_CONFIG 0x002A
+#define IMM_NTFY_GLBL_TPRLO 0x002D
+#define IMM_NTFY_GLBL_LOGO 0x002E
+#define IMM_NTFY_RESOURCE 0x0034
+#define IMM_NTFY_MSG_RX 0x0036
+#define IMM_NTFY_SRR 0x0045
+#define IMM_NTFY_ELS 0x0046
+
+/* Immediate notify task flags */
+#define IMM_NTFY_TASK_MGMT_SHIFT 8
+
+#define QLA_TGT_CLEAR_ACA 0x40
+#define QLA_TGT_TARGET_RESET 0x20
+#define QLA_TGT_LUN_RESET 0x10
+#define QLA_TGT_CLEAR_TS 0x04
+#define QLA_TGT_ABORT_TS 0x02
+#define QLA_TGT_ABORT_ALL_SESS 0xFFFF
+#define QLA_TGT_ABORT_ALL 0xFFFE
+#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
+#define QLA_TGT_NEXUS_LOSS 0xFFFC
+
+/* Notify Acknowledge flags */
+#define NOTIFY_ACK_RES_COUNT BIT_8
+#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
+#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
+
+/* Command's states */
+#define QLA_TGT_STATE_NEW 0 /* New command + target processing */
+#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
+#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
+#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
+#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
+
+/* Special handles */
+#define QLA_TGT_NULL_HANDLE 0
+#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
+
+/* ATIO task_codes field */
+#define ATIO_SIMPLE_QUEUE 0
+#define ATIO_HEAD_OF_QUEUE 1
+#define ATIO_ORDERED_QUEUE 2
+#define ATIO_ACA_QUEUE 4
+#define ATIO_UNTAGGED 5
+
+/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
+#define FC_TM_SUCCESS 0
+#define FC_TM_BAD_FCP_DATA 1
+#define FC_TM_BAD_CMD 2
+#define FC_TM_FCP_DATA_MISMATCH 3
+#define FC_TM_REJECT 4
+#define FC_TM_FAILED 5
+
+/*
+ * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
+ * terminated, so no more actions is needed and success should be returned
+ * to target.
+ */
+#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
+
+#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
+#else
+#define pci_dma_lo32(a) (a & 0xffffffff)
+#define pci_dma_hi32(a) 0
+#endif
+
+#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
+ (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
+
+struct qla_port_24xx_data {
+ uint8_t port_name[WWN_SIZE];
+ uint16_t loop_id;
+ uint16_t reserved;
+};
+
+struct qla_tgt {
+ struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
+
+ /*
+ * To sync between IRQ handlers and qlt_target_release(). Needed,
+ * because req_pkt() can drop/reaquire HW lock inside. Protected by
+ * HW lock.
+ */
+ int irq_cmd_count;
+
+ int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
+
+ /* Target's flags, serialized by pha->hardware_lock */
+ unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
+ unsigned int link_reinit_iocb_pending:1;
+
+ /*
+ * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
+ * OR hardware_lock for reading.
+ */
+ int tgt_stop; /* the target mode driver is being stopped */
+ int tgt_stopped; /* the target mode driver has been stopped */
+
+ /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
+ int sess_count;
+
+ /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
+ struct list_head sess_list;
+
+ /* Protected by hardware_lock */
+ struct list_head del_sess_list;
+ struct delayed_work sess_del_work;
+
+ spinlock_t sess_work_lock;
+ struct list_head sess_works_list;
+ struct work_struct sess_work;
+
+ struct imm_ntfy_from_isp link_reinit_iocb;
+ wait_queue_head_t waitQ;
+ int notify_ack_expected;
+ int abts_resp_expected;
+ int modify_lun_expected;
+
+ int ctio_srr_id;
+ int imm_srr_id;
+ spinlock_t srr_lock;
+ struct list_head srr_ctio_list;
+ struct list_head srr_imm_list;
+ struct work_struct srr_work;
+
+ atomic_t tgt_global_resets_count;
+
+ struct list_head tgt_list_entry;
+};
+
+/*
+ * Equivilant to IT Nexus (Initiator-Target)
+ */
+struct qla_tgt_sess {
+ uint16_t loop_id;
+ port_id_t s_id;
+
+ unsigned int conf_compl_supported:1;
+ unsigned int deleted:1;
+ unsigned int local:1;
+
+ struct se_session *se_sess;
+ struct scsi_qla_host *vha;
+ struct qla_tgt *tgt;
+
+ struct list_head sess_list_entry;
+ unsigned long expires;
+ struct list_head del_list_entry;
+
+ uint8_t port_name[WWN_SIZE];
+ struct work_struct free_work;
+};
+
+struct qla_tgt_cmd {
+ struct qla_tgt_sess *sess;
+ int state;
+ struct se_cmd se_cmd;
+ struct work_struct free_work;
+ struct work_struct work;
+ /* Sense buffer that will be mapped into outgoing status */
+ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+
+ /* to save extra sess dereferences */
+ unsigned int conf_compl_supported:1;
+ unsigned int sg_mapped:1;
+ unsigned int free_sg:1;
+ unsigned int aborted:1; /* Needed in case of SRR */
+ unsigned int write_data_transferred:1;
+
+ struct scatterlist *sg; /* cmd data buffer SG vector */
+ int sg_cnt; /* SG segments count */
+ int bufflen; /* cmd buffer length */
+ int offset;
+ uint32_t tag;
+ uint32_t unpacked_lun;
+ enum dma_data_direction dma_data_direction;
+
+ uint16_t loop_id; /* to save extra sess dereferences */
+ struct qla_tgt *tgt; /* to save extra sess dereferences */
+ struct scsi_qla_host *vha;
+ struct list_head cmd_list;
+
+ struct atio_from_isp atio;
+};
+
+struct qla_tgt_sess_work_param {
+ struct list_head sess_works_list_entry;
+
+#define QLA_TGT_SESS_WORK_ABORT 1
+#define QLA_TGT_SESS_WORK_TM 2
+ int type;
+
+ union {
+ struct abts_recv_from_24xx abts;
+ struct imm_ntfy_from_isp tm_iocb;
+ struct atio_from_isp tm_iocb2;
+ };
+};
+
+struct qla_tgt_mgmt_cmd {
+ uint8_t tmr_func;
+ uint8_t fc_tm_rsp;
+ struct qla_tgt_sess *sess;
+ struct se_cmd se_cmd;
+ struct work_struct free_work;
+ unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK 1
+ union {
+ struct atio_from_isp atio;
+ struct imm_ntfy_from_isp imm_ntfy;
+ struct abts_recv_from_24xx abts;
+ } __packed orig_iocb;
+};
+
+struct qla_tgt_prm {
+ struct qla_tgt_cmd *cmd;
+ struct qla_tgt *tgt;
+ void *pkt;
+ struct scatterlist *sg; /* cmd data buffer SG vector */
+ int seg_cnt;
+ int req_cnt;
+ uint16_t rq_result;
+ uint16_t scsi_status;
+ unsigned char *sense_buffer;
+ int sense_buffer_len;
+ int residual;
+ int add_status_pkt;
+};
+
+struct qla_tgt_srr_imm {
+ struct list_head srr_list_entry;
+ int srr_id;
+ struct imm_ntfy_from_isp imm_ntfy;
+};
+
+struct qla_tgt_srr_ctio {
+ struct list_head srr_list_entry;
+ int srr_id;
+ struct qla_tgt_cmd *cmd;
+};
+
+#define QLA_TGT_XMIT_DATA 1
+#define QLA_TGT_XMIT_STATUS 2
+#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
+
+
+extern struct qla_tgt_data qla_target;
+/*
+ * Internal function prototypes
+ */
+void qlt_disable_vha(struct scsi_qla_host *);
+
+/*
+ * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
+ */
+extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
+extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
+ int (*callback)(struct scsi_qla_host *), void *);
+extern void qlt_lport_deregister(struct scsi_qla_host *);
+extern void qlt_unreg_sess(struct qla_tgt_sess *);
+extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
+extern void qlt_set_mode(struct scsi_qla_host *ha);
+extern void qlt_clear_mode(struct scsi_qla_host *ha);
+extern int __init qlt_init(void);
+extern void qlt_exit(void);
+extern void qlt_update_vp_map(struct scsi_qla_host *, int);
+
+/*
+ * This macro is used during early initializations when host->active_mode
+ * is not set. Right now, ha value is ignored.
+ */
+#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+
+static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
+{
+ return ha->host->active_mode & MODE_TARGET;
+}
+
+static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
+{
+ return ha->host->active_mode & MODE_INITIATOR;
+}
+
+static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
+{
+ if (ha->host->active_mode & MODE_INITIATOR)
+ ha->host->active_mode &= ~MODE_INITIATOR;
+ else
+ ha->host->active_mode |= MODE_INITIATOR;
+}
+
+/*
+ * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
+ */
+extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
+ struct atio_from_isp *);
+extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
+extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
+extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
+extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
+extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
+extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
+extern void qlt_enable_vha(struct scsi_qla_host *);
+extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
+extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
+extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
+extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *,
+ device_reg_t __iomem *);
+extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_24xx *);
+extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_24xx *);
+extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
+ struct sts_entry_24xx *);
+extern void qlt_modify_vp_config(struct scsi_qla_host *,
+ struct vp_config_entry_24xx *);
+extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
+extern int qlt_mem_alloc(struct qla_hw_data *);
+extern void qlt_mem_free(struct qla_hw_data *);
+extern void qlt_stop_phase1(struct qla_tgt *);
+extern void qlt_stop_phase2(struct qla_tgt *);
+
+#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 0000000..4752f65
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1904 @@
+/*******************************************************************************
+ * This file contains tcm implementation using v4 configfs fabric infrastructure
+ * for QLogic target mode HBAs
+ *
+ * ?? Copyright 2010-2011 RisingTide Systems LLC.
+ *
+ * Licensed to the Linux Foundation under the General Public License (GPL)
+ * version 2.
+ *
+ * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
+ *
+ * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
+ * the TCM_FC / Open-FCoE.org fabric module.
+ *
+ * Copyright (c) 2010 Cisco Systems, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ ****************************************************************************/
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <generated/utsrelease.h>
+#include <linux/utsname.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/configfs.h>
+#include <linux/ctype.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_configfs.h>
+#include <target/configfs_macros.h>
+
+#include "qla_def.h"
+#include "qla_target.h"
+#include "tcm_qla2xxx.h"
+
+struct workqueue_struct *tcm_qla2xxx_free_wq;
+struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+/*
+ * Parse WWN.
+ * If strict, we require lower-case hex and colon separators to be sure
+ * the name is the same as what would be generated by ft_format_wwn()
+ * so the name and wwn are mapped one-to-one.
+ */
+static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
+{
+ const char *cp;
+ char c;
+ u32 nibble;
+ u32 byte = 0;
+ u32 pos = 0;
+ u32 err;
+
+ *wwn = 0;
+ for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
+ c = *cp;
+ if (c == '\n' && cp[1] == '\0')
+ continue;
+ if (strict && pos++ == 2 && byte++ < 7) {
+ pos = 0;
+ if (c == ':')
+ continue;
+ err = 1;
+ goto fail;
+ }
+ if (c == '\0') {
+ err = 2;
+ if (strict && byte != 8)
+ goto fail;
+ return cp - name;
+ }
+ err = 3;
+ if (isdigit(c))
+ nibble = c - '0';
+ else if (isxdigit(c) && (islower(c) || !strict))
+ nibble = tolower(c) - 'a' + 10;
+ else
+ goto fail;
+ *wwn = (*wwn << 4) | nibble;
+ }
+ err = 4;
+fail:
+ pr_debug("err %u len %zu pos %u byte %u\n",
+ err, cp - name, pos, byte);
+ return -1;
+}
+
+static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
+{
+ u8 b[8];
+
+ put_unaligned_be64(wwn, b);
+ return snprintf(buf, len,
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
+}
+
+static char *tcm_qla2xxx_get_fabric_name(void)
+{
+ return "qla2xxx";
+}
+
+/*
+ * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
+ */
+static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
+{
+ unsigned int i, j;
+ u8 wwn[8];
+
+ memset(wwn, 0, sizeof(wwn));
+
+ /* Validate and store the new name */
+ for (i = 0, j = 0; i < 16; i++) {
+ int value;
+
+ value = hex_to_bin(*ns++);
+ if (value >= 0)
+ j = (j << 4) | value;
+ else
+ return -EINVAL;
+
+ if (i % 2) {
+ wwn[i/2] = j & 0xff;
+ j = 0;
+ }
+ }
+
+ *nm = wwn_to_u64(wwn);
+ return 0;
+}
+
+/*
+ * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
+ * store_fc_host_vport_create()
+ */
+static int tcm_qla2xxx_npiv_parse_wwn(
+ const char *name,
+ size_t count,
+ u64 *wwpn,
+ u64 *wwnn)
+{
+ unsigned int cnt = count;
+ int rc;
+
+ *wwpn = 0;
+ *wwnn = 0;
+
+ /* count may include a LF at end of string */
+ if (name[cnt-1] == '\n')
+ cnt--;
+
+ /* validate we have enough characters for WWPN */
+ if ((cnt != (16+1+16)) || (name[16] != ':'))
+ return -EINVAL;
+
+ rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
+ if (rc != 0)
+ return rc;
+
+ rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
+ if (rc != 0)
+ return rc;
+
+ return 0;
+}
+
+static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
+ u64 wwpn, u64 wwnn)
+{
+ u8 b[8], b2[8];
+
+ put_unaligned_be64(wwpn, b);
+ put_unaligned_be64(wwnn, b2);
+ return snprintf(buf, len,
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
+ "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
+ b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
+ b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_name(void)
+{
+ return "qla2xxx_npiv";
+}
+
+static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ u8 proto_id;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ proto_id = fc_get_fabric_proto_ident(se_tpg);
+ break;
+ }
+
+ return proto_id;
+}
+
+static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+ return &lport->lport_name[0];
+}
+
+static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+
+ return &lport->lport_npiv_name[0];
+}
+
+static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ return tpg->lport_tpgt;
+}
+
+static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code,
+ unsigned char *buf)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ int ret = 0;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
+ format_code, buf);
+ break;
+ }
+
+ return ret;
+}
+
+static u32 tcm_qla2xxx_get_pr_transport_id_len(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ int ret = 0;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
+ format_code);
+ break;
+ }
+
+ return ret;
+}
+
+static char *tcm_qla2xxx_parse_pr_out_transport_id(
+ struct se_portal_group *se_tpg,
+ const char *buf,
+ u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ char *tid = NULL;
+
+ switch (lport->lport_proto_id) {
+ case SCSI_PROTOCOL_FCP:
+ default:
+ tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
+ port_nexus_ptr);
+ break;
+ }
+
+ return tid;
+}
+
+static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
+}
+
+static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
+}
+
+static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
+}
+
+static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
+}
+
+static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
+ struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_nacl *nacl;
+
+ nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
+ if (!nacl) {
+ pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+ return NULL;
+ }
+
+ return &nacl->se_node_acl;
+}
+
+static void tcm_qla2xxx_release_fabric_acl(
+ struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ kfree(nacl);
+}
+
+static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return tpg->lport_tpgt;
+}
+
+static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
+{
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
+ struct qla_tgt_mgmt_cmd, free_work);
+
+ transport_generic_free_cmd(&mcmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_mcmd(), and will call
+ * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
+ * release callback. qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
+ queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
+}
+
+static void tcm_qla2xxx_complete_free(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
+}
+
+/*
+ * Called from qla_target_template->free_cmd(), and will call
+ * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
+ * release callback. qla_hw_data->hardware_lock is expected to be held
+ */
+static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
+{
+ INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from struct target_core_fabric_ops->check_stop_free() context
+ */
+static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
+{
+ return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+}
+
+/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
+ * fabric descriptor @se_cmd command to release
+ */
+static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd;
+
+ if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+ struct qla_tgt_mgmt_cmd, se_cmd);
+ qlt_free_mcmd(mcmd);
+ return;
+ }
+
+ cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ qlt_free_cmd(cmd);
+}
+
+static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+
+ BUG_ON(!sess);
+ vha = sess->vha;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ target_sess_cmd_list_set_waiting(se_sess);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+ return 1;
+}
+
+static void tcm_qla2xxx_close_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct scsi_qla_host *vha;
+ unsigned long flags;
+
+ BUG_ON(!sess);
+ vha = sess->vha;
+
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+ qlt_unreg_sess(sess);
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+}
+
+static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+/*
+ * The LIO target core uses DMA_TO_DEVICE to mean that data is going
+ * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
+ * that data is coming from the target (eg handling a READ). However,
+ * this is just the opposite of what we have to tell the DMA mapping
+ * layer -- eg when handling a READ, the HBA will have to DMA the data
+ * out of memory so it can send it to the initiator, which means we
+ * need to use DMA_TO_DEVICE when we map the data.
+ */
+static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
+{
+ if (se_cmd->se_cmd_flags & SCF_BIDI)
+ return DMA_BIDIRECTIONAL;
+
+ switch (se_cmd->data_direction) {
+ case DMA_TO_DEVICE:
+ return DMA_FROM_DEVICE;
+ case DMA_FROM_DEVICE:
+ return DMA_TO_DEVICE;
+ case DMA_NONE:
+ default:
+ return DMA_NONE;
+ }
+}
+
+static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+
+ /*
+ * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+ * the SGL mappings into PCIe memory for incoming FCP WRITE data.
+ */
+ return qlt_rdy_to_xfer(cmd);
+}
+
+static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
+{
+ unsigned long flags;
+ /*
+ * Check for WRITE_PENDING status to determine if we need to wait for
+ * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
+ */
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
+ se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+ wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
+ 3000);
+ return 0;
+ }
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ return 0;
+}
+
+static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
+{
+ return;
+}
+
+static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ return cmd->tag;
+}
+
+static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+/*
+ * Called from process context in qla_target.c:qlt_do_work() code
+ */
+static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+ unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
+ int data_dir, int bidi)
+{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+ struct se_session *se_sess;
+ struct qla_tgt_sess *sess;
+ int flags = TARGET_SCF_ACK_KREF;
+
+ if (bidi)
+ flags |= TARGET_SCF_BIDI_OP;
+
+ sess = cmd->sess;
+ if (!sess) {
+ pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
+ return -EINVAL;
+ }
+
+ se_sess = sess->se_sess;
+ if (!se_sess) {
+ pr_err("Unable to locate active struct se_session\n");
+ return -EINVAL;
+ }
+
+ return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+ cmd->unpacked_lun, data_length, fcp_task_attr,
+ data_dir, flags);
+}
+
+static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
+{
+ struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+ /*
+ * Ensure that the complete FCP WRITE payload has been received.
+ * Otherwise return an exception via CHECK_CONDITION status.
+ */
+ if (!cmd->write_data_transferred) {
+ /*
+ * Check if se_cmd has already been aborted via LUN_RESET, and
+ * waiting upon completion in tcm_qla2xxx_write_pending_status()
+ */
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
+ complete(&cmd->se_cmd.t_transport_stop_comp);
+ return;
+ }
+
+ cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
+ transport_generic_request_failure(&cmd->se_cmd);
+ return;
+ }
+
+ return target_execute_cmd(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+ INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
+/*
+ * Called from qla_target.c:qlt_issue_task_mgmt()
+ */
+static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
+ uint8_t tmr_func, uint32_t tag)
+{
+ struct qla_tgt_sess *sess = mcmd->sess;
+ struct se_cmd *se_cmd = &mcmd->se_cmd;
+
+ return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
+ tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
+}
+
+static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+ cmd->sg_cnt = se_cmd->t_data_nents;
+ cmd->sg = se_cmd->t_data_sg;
+ cmd->offset = 0;
+
+ /*
+ * Now queue completed DATA_IN the qla2xxx LLD and response ring
+ */
+ return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
+ se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
+{
+ struct qla_tgt_cmd *cmd = container_of(se_cmd,
+ struct qla_tgt_cmd, se_cmd);
+ int xmit_type = QLA_TGT_XMIT_STATUS;
+
+ cmd->bufflen = se_cmd->data_length;
+ cmd->sg = NULL;
+ cmd->sg_cnt = 0;
+ cmd->offset = 0;
+ cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
+ cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+
+ if (se_cmd->data_direction == DMA_FROM_DEVICE) {
+ /*
+ * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
+ * for qla_tgt_xmit_response LLD code
+ */
+ se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ se_cmd->residual_count = se_cmd->data_length;
+
+ cmd->bufflen = 0;
+ }
+ /*
+ * Now queue status response to qla2xxx LLD code and response ring
+ */
+ return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+}
+
+static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
+{
+ struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
+ struct qla_tgt_mgmt_cmd, se_cmd);
+
+ pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
+ mcmd, se_tmr->function, se_tmr->response);
+ /*
+ * Do translation between TCM TM response codes and
+ * QLA2xxx FC TM response codes.
+ */
+ switch (se_tmr->response) {
+ case TMR_FUNCTION_COMPLETE:
+ mcmd->fc_tm_rsp = FC_TM_SUCCESS;
+ break;
+ case TMR_TASK_DOES_NOT_EXIST:
+ mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
+ break;
+ case TMR_FUNCTION_REJECTED:
+ mcmd->fc_tm_rsp = FC_TM_REJECT;
+ break;
+ case TMR_LUN_DOES_NOT_EXIST:
+ default:
+ mcmd->fc_tm_rsp = FC_TM_FAILED;
+ break;
+ }
+ /*
+ * Queue the TM response to QLA2xxx LLD to build a
+ * CTIO response packet.
+ */
+ qlt_xmit_tm_rsp(mcmd);
+
+ return 0;
+}
+
+static u16 tcm_qla2xxx_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
+ u32 sense_length)
+{
+ return 0;
+}
+
+/* Local pointer to allocated TCM configfs fabric module */
+struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
+
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
+ struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
+{
+ struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+ struct se_portal_group *se_tpg = se_nacl->se_tpg;
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ void *node;
+
+ pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
+
+ node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
+ WARN_ON(node && (node != se_nacl));
+
+ pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
+ se_nacl, nacl->nport_wwnn, nacl->nport_id);
+ /*
+ * Now clear the se_nacl and session pointers from our HW lport lookup
+ * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
+ *
+ * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
+ * target_wait_for_sess_cmds() before the session waits for outstanding
+ * I/O to complete, to avoid a race between session shutdown execution
+ * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
+ */
+ tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
+}
+
+static void tcm_qla2xxx_release_session(struct kref *kref)
+{
+ struct se_session *se_sess = container_of(kref,
+ struct se_session, sess_kref);
+
+ qlt_unreg_sess(se_sess->fabric_sess_ptr);
+}
+
+static void tcm_qla2xxx_put_session(struct se_session *se_sess)
+{
+ struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
+ struct qla_hw_data *ha = sess->vha->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
+{
+ tcm_qla2xxx_put_session(sess->se_sess);
+}
+
+static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
+{
+ tcm_qla2xxx_shutdown_session(sess->se_sess);
+}
+
+static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
+ struct se_portal_group *se_tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct tcm_qla2xxx_nacl *nacl;
+ u64 wwnn;
+ u32 qla2xxx_nexus_depth;
+
+ if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
+ if (!se_nacl_new)
+ return ERR_PTR(-ENOMEM);
+/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
+ qla2xxx_nexus_depth = 1;
+
+ /*
+ * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a NodeACL from demo mode -> explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
+ name, qla2xxx_nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
+ return se_nacl;
+ }
+ /*
+ * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
+ */
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ nacl->nport_wwnn = wwnn;
+ tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
+
+ return se_nacl;
+}
+
+static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
+{
+ struct se_portal_group *se_tpg = se_acl->se_tpg;
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+
+ core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
+ kfree(nacl);
+}
+
+/* Start items for tcm_qla2xxx_tpg_attrib_cit */
+
+#define DEF_QLA_TPG_ATTRIB(name) \
+ \
+static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
+ struct se_portal_group *se_tpg, \
+ char *page) \
+{ \
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
+ struct tcm_qla2xxx_tpg, se_tpg); \
+ \
+ return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
+} \
+ \
+static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
+ struct se_portal_group *se_tpg, \
+ const char *page, \
+ size_t count) \
+{ \
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
+ struct tcm_qla2xxx_tpg, se_tpg); \
+ unsigned long val; \
+ int ret; \
+ \
+ ret = kstrtoul(page, 0, &val); \
+ if (ret < 0) { \
+ pr_err("kstrtoul() failed with" \
+ " ret: %d\n", ret); \
+ return -EINVAL; \
+ } \
+ ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
+ \
+ return (!ret) ? count : -EINVAL; \
+}
+
+#define DEF_QLA_TPG_ATTR_BOOL(_name) \
+ \
+static int tcm_qla2xxx_set_attrib_##_name( \
+ struct tcm_qla2xxx_tpg *tpg, \
+ unsigned long val) \
+{ \
+ struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
+ \
+ if ((val != 0) && (val != 1)) { \
+ pr_err("Illegal boolean value %lu\n", val); \
+ return -EINVAL; \
+ } \
+ \
+ a->_name = val; \
+ return 0; \
+}
+
+#define QLA_TPG_ATTR(_name, _mode) \
+ TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
+DEF_QLA_TPG_ATTRIB(generate_node_acls);
+QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
+
+/*
+ Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
+ */
+DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
+DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
+QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
+QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
+
+/*
+ * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
+ */
+DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
+DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
+QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
+ &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
+ &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
+ &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
+ &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
+ NULL,
+};
+
+/* End items for tcm_qla2xxx_tpg_attrib_cit */
+
+static ssize_t tcm_qla2xxx_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+
+ return snprintf(page, PAGE_SIZE, "%d\n",
+ atomic_read(&tpg->lport_tpg_enabled));
+}
+
+static ssize_t tcm_qla2xxx_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+
+ if (op) {
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
+ } else {
+ if (!ha->tgt.qla_tgt) {
+ pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
+ return -ENODEV;
+ }
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(ha->tgt.qla_tgt);
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
+ &tcm_qla2xxx_tpg_enable.attr,
+ NULL,
+};
+
+static struct se_portal_group *tcm_qla2xxx_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ if (!lport->qla_npiv_vp && (tpgt != 1)) {
+ pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
+ return ERR_PTR(-ENOSYS);
+ }
+
+ tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tpg->lport = lport;
+ tpg->lport_tpgt = tpgt;
+ /*
+ * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
+ * NodeACLs
+ */
+ QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
+ QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
+ QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
+
+ ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ /*
+ * Setup local TPG=1 pointer for non NPIV mode.
+ */
+ if (lport->qla_npiv_vp == NULL)
+ lport->tpg_1 = tpg;
+
+ return &tpg->se_tpg;
+}
+
+static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
+{
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ struct tcm_qla2xxx_lport *lport = tpg->lport;
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ /*
+ * Call into qla2x_target.c LLD logic to shutdown the active
+ * FC Nexuses and disable target mode operation for this qla_hw_data
+ */
+ if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
+ qlt_stop_phase1(ha->tgt.qla_tgt);
+
+ core_tpg_deregister(se_tpg);
+ /*
+ * Clear local TPG=1 pointer for non NPIV mode.
+ */
+ if (lport->qla_npiv_vp == NULL)
+ lport->tpg_1 = NULL;
+
+ kfree(tpg);
+}
+
+static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
+ struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct tcm_qla2xxx_tpg *tpg;
+ unsigned long tpgt;
+ int ret;
+
+ if (strstr(name, "tpgt_") != name)
+ return ERR_PTR(-EINVAL);
+ if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
+ if (!tpg) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ tpg->lport = lport;
+ tpg->lport_tpgt = tpgt;
+
+ ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
+ &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ if (ret < 0) {
+ kfree(tpg);
+ return NULL;
+ }
+ return &tpg->se_tpg;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
+ scsi_qla_host_t *vha,
+ const uint8_t *s_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_nacl *nacl;
+ u32 key;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return NULL;
+ }
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ pr_debug("find_sess_by_s_id: 0x%06x\n", key);
+
+ se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
+ if (!se_nacl) {
+ pr_debug("Unable to locate s_id: 0x%06x\n", key);
+ return NULL;
+ }
+ pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
+ se_nacl, se_nacl->initiatorname);
+
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ if (!nacl->qla_tgt_sess) {
+ pr_err("Unable to locate struct qla_tgt_sess\n");
+ return NULL;
+ }
+
+ return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_s_id(
+ struct tcm_qla2xxx_lport *lport,
+ struct se_node_acl *new_se_nacl,
+ struct tcm_qla2xxx_nacl *nacl,
+ struct se_session *se_sess,
+ struct qla_tgt_sess *qla_tgt_sess,
+ uint8_t *s_id)
+{
+ u32 key;
+ void *slot;
+ int rc;
+
+ key = (((unsigned long)s_id[0] << 16) |
+ ((unsigned long)s_id[1] << 8) |
+ (unsigned long)s_id[2]);
+ pr_debug("set_sess_by_s_id: %06x\n", key);
+
+ slot = btree_lookup32(&lport->lport_fcport_map, key);
+ if (!slot) {
+ if (new_se_nacl) {
+ pr_debug("Setting up new fc_port entry to new_se_nacl\n");
+ nacl->nport_id = key;
+ rc = btree_insert32(&lport->lport_fcport_map, key,
+ new_se_nacl, GFP_ATOMIC);
+ if (rc)
+ printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
+ (int)key);
+ } else {
+ pr_debug("Wiping nonexisting fc_port entry\n");
+ }
+
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (nacl->qla_tgt_sess) {
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
+ btree_remove32(&lport->lport_fcport_map, key);
+ nacl->qla_tgt_sess = NULL;
+ return;
+ }
+ pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
+ btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing existing fc_port entry\n");
+ btree_remove32(&lport->lport_fcport_map, key);
+ return;
+ }
+
+ pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
+ btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
+ qla_tgt_sess->se_sess = se_sess;
+ nacl->qla_tgt_sess = qla_tgt_sess;
+
+ pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
+ scsi_qla_host_t *vha,
+ const uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_nacl *nacl;
+ struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return NULL;
+ }
+
+ pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+ fc_loopid = lport->lport_loopid_map + loop_id;
+ se_nacl = fc_loopid->se_nacl;
+ if (!se_nacl) {
+ pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
+ loop_id);
+ return NULL;
+ }
+
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+ if (!nacl->qla_tgt_sess) {
+ pr_err("Unable to locate struct qla_tgt_sess\n");
+ return NULL;
+ }
+
+ return nacl->qla_tgt_sess;
+}
+
+/*
+ * Expected to be called with struct qla_hw_data->hardware_lock held
+ */
+static void tcm_qla2xxx_set_sess_by_loop_id(
+ struct tcm_qla2xxx_lport *lport,
+ struct se_node_acl *new_se_nacl,
+ struct tcm_qla2xxx_nacl *nacl,
+ struct se_session *se_sess,
+ struct qla_tgt_sess *qla_tgt_sess,
+ uint16_t loop_id)
+{
+ struct se_node_acl *saved_nacl;
+ struct tcm_qla2xxx_fc_loopid *fc_loopid;
+
+ pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
+
+ fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
+ lport->lport_loopid_map)[loop_id];
+
+ saved_nacl = fc_loopid->se_nacl;
+ if (!saved_nacl) {
+ pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (nacl->qla_tgt_sess) {
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = NULL;
+ nacl->qla_tgt_sess = NULL;
+ return;
+ }
+
+ pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+ return;
+ }
+
+ if (new_se_nacl == NULL) {
+ pr_debug("Clearing fc_loopid->se_nacl\n");
+ fc_loopid->se_nacl = NULL;
+ return;
+ }
+
+ pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
+ fc_loopid->se_nacl = new_se_nacl;
+ if (qla_tgt_sess->se_sess != se_sess)
+ qla_tgt_sess->se_sess = se_sess;
+ if (nacl->qla_tgt_sess != qla_tgt_sess)
+ nacl->qla_tgt_sess = qla_tgt_sess;
+
+ pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
+ nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
+}
+
+/*
+ * Should always be called with qla_hw_data->hardware_lock held.
+ */
+static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
+ struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
+{
+ struct se_session *se_sess = sess->se_sess;
+ unsigned char be_sid[3];
+
+ be_sid[0] = sess->s_id.b.domain;
+ be_sid[1] = sess->s_id.b.area;
+ be_sid[2] = sess->s_id.b.al_pa;
+
+ tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
+ sess, be_sid);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
+ sess, sess->loop_id);
+}
+
+static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ struct qla_hw_data *ha = tgt->ha;
+ struct se_session *se_sess;
+ struct se_node_acl *se_nacl;
+ struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_nacl *nacl;
+
+ BUG_ON(in_interrupt());
+
+ se_sess = sess->se_sess;
+ if (!se_sess) {
+ pr_err("struct qla_tgt_sess->se_sess is NULL\n");
+ dump_stack();
+ return;
+ }
+ se_nacl = se_sess->se_node_acl;
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return;
+ }
+ target_wait_for_sess_cmds(se_sess, 0);
+
+ transport_deregister_session_configfs(sess->se_sess);
+ transport_deregister_session(sess->se_sess);
+}
+
+/*
+ * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
+ * to locate struct se_node_acl
+ */
+static int tcm_qla2xxx_check_initiator_node_acl(
+ scsi_qla_host_t *vha,
+ unsigned char *fc_wwpn,
+ void *qla_tgt_sess,
+ uint8_t *s_id,
+ uint16_t loop_id)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ struct tcm_qla2xxx_tpg *tpg;
+ struct tcm_qla2xxx_nacl *nacl;
+ struct se_portal_group *se_tpg;
+ struct se_node_acl *se_nacl;
+ struct se_session *se_sess;
+ struct qla_tgt_sess *sess = qla_tgt_sess;
+ unsigned char port_name[36];
+ unsigned long flags;
+
+ lport = ha->tgt.target_lport_ptr;
+ if (!lport) {
+ pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
+ dump_stack();
+ return -EINVAL;
+ }
+ /*
+ * Locate the TPG=1 reference..
+ */
+ tpg = lport->tpg_1;
+ if (!tpg) {
+ pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
+ return -EINVAL;
+ }
+ se_tpg = &tpg->se_tpg;
+
+ se_sess = transport_init_session();
+ if (IS_ERR(se_sess)) {
+ pr_err("Unable to initialize struct se_session\n");
+ return PTR_ERR(se_sess);
+ }
+ /*
+ * Format the FCP Initiator port_name into colon seperated values to
+ * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
+ */
+ memset(&port_name, 0, 36);
+ snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
+ fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
+ fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+ /*
+ * Locate our struct se_node_acl either from an explict NodeACL created
+ * via ConfigFS, or via running in TPG demo mode.
+ */
+ se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
+ port_name);
+ if (!se_sess->se_node_acl) {
+ transport_free_session(se_sess);
+ return -EINVAL;
+ }
+ se_nacl = se_sess->se_node_acl;
+ nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
+ /*
+ * And now setup the new se_nacl and session pointers into our HW lport
+ * mappings for fabric S_ID and LOOP_ID.
+ */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
+ qla_tgt_sess, s_id);
+ tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
+ qla_tgt_sess, loop_id);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ /*
+ * Finally register the new FC Nexus with TCM
+ */
+ __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
+
+ return 0;
+}
+
+/*
+ * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
+ */
+static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
+ .handle_cmd = tcm_qla2xxx_handle_cmd,
+ .handle_data = tcm_qla2xxx_handle_data,
+ .handle_tmr = tcm_qla2xxx_handle_tmr,
+ .free_cmd = tcm_qla2xxx_free_cmd,
+ .free_mcmd = tcm_qla2xxx_free_mcmd,
+ .free_session = tcm_qla2xxx_free_session,
+ .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
+ .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
+ .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
+ .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
+ .put_sess = tcm_qla2xxx_put_sess,
+ .shutdown_sess = tcm_qla2xxx_shutdown_sess,
+};
+
+static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
+{
+ int rc;
+
+ rc = btree_init32(&lport->lport_fcport_map);
+ if (rc) {
+ pr_err("Unable to initialize lport->lport_fcport_map btree\n");
+ return rc;
+ }
+
+ lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
+ 65536);
+ if (!lport->lport_loopid_map) {
+ pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
+ sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ btree_destroy32(&lport->lport_fcport_map);
+ return -ENOMEM;
+ }
+ memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
+ * 65536);
+ pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
+ sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ return 0;
+}
+
+static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct tcm_qla2xxx_lport *lport;
+ /*
+ * Setup local pointer to vha, NPIV VP pointer (if present) and
+ * vha->tcm_lport pointer
+ */
+ lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
+ lport->qla_vha = vha;
+
+ return 0;
+}
+
+static struct se_wwn *tcm_qla2xxx_make_lport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport;
+ u64 wwpn;
+ int ret = -ENODEV;
+
+ if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
+ return ERR_PTR(-EINVAL);
+
+ lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+ if (!lport) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ lport->lport_wwpn = wwpn;
+ tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
+ wwpn);
+
+ ret = tcm_qla2xxx_init_lport(lport);
+ if (ret != 0)
+ goto out;
+
+ ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
+ tcm_qla2xxx_lport_register_cb, lport);
+ if (ret != 0)
+ goto out_lport;
+
+ return &lport->lport_wwn;
+out_lport:
+ vfree(lport->lport_loopid_map);
+ btree_destroy32(&lport->lport_fcport_map);
+out:
+ kfree(lport);
+ return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct se_node_acl *node;
+ u32 key = 0;
+
+ /*
+ * Call into qla2x_target.c LLD logic to complete the
+ * shutdown of struct qla_tgt after the call to
+ * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
+ */
+ if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
+ qlt_stop_phase2(ha->tgt.qla_tgt);
+
+ qlt_lport_deregister(vha);
+
+ vfree(lport->lport_loopid_map);
+ btree_for_each_safe32(&lport->lport_fcport_map, key, node)
+ btree_remove32(&lport->lport_fcport_map, key);
+ btree_destroy32(&lport->lport_fcport_map);
+ kfree(lport);
+}
+
+static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
+ struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct tcm_qla2xxx_lport *lport;
+ u64 npiv_wwpn, npiv_wwnn;
+ int ret;
+
+ if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
+ &npiv_wwpn, &npiv_wwnn) < 0)
+ return ERR_PTR(-EINVAL);
+
+ lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
+ if (!lport) {
+ pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ lport->lport_npiv_wwpn = npiv_wwpn;
+ lport->lport_npiv_wwnn = npiv_wwnn;
+ tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
+ TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+
+/* FIXME: tcm_qla2xxx_npiv_make_lport */
+ ret = -ENOSYS;
+ if (ret != 0)
+ goto out;
+
+ return &lport->lport_wwn;
+out:
+ kfree(lport);
+ return ERR_PTR(ret);
+}
+
+static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
+{
+ struct tcm_qla2xxx_lport *lport = container_of(wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct Scsi_Host *sh = vha->host;
+ /*
+ * Notify libfc that we want to release the lport->npiv_vport
+ */
+ fc_vport_terminate(lport->npiv_vport);
+
+ scsi_host_put(sh);
+ kfree(lport);
+}
+
+
+static ssize_t tcm_qla2xxx_wwn_show_attr_version(
+ struct target_fabric_configfs *tf,
+ char *page)
+{
+ return sprintf(page,
+ "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
+ UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine);
+}
+
+TF_WWN_ATTR_RO(tcm_qla2xxx, version);
+
+static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
+ &tcm_qla2xxx_wwn_version.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_ops = {
+ .get_fabric_name = tcm_qla2xxx_get_fabric_name,
+ .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
+ .tpg_get_tag = tcm_qla2xxx_get_tag,
+ .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
+ .tpg_check_demo_mode_write_protect =
+ tcm_qla2xxx_check_demo_write_protect,
+ .tpg_check_prod_mode_write_protect =
+ tcm_qla2xxx_check_prod_write_protect,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
+ .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .check_stop_free = tcm_qla2xxx_check_stop_free,
+ .release_cmd = tcm_qla2xxx_release_cmd,
+ .put_session = tcm_qla2xxx_put_session,
+ .shutdown_session = tcm_qla2xxx_shutdown_session,
+ .close_session = tcm_qla2xxx_close_session,
+ .sess_get_index = tcm_qla2xxx_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_qla2xxx_write_pending,
+ .write_pending_status = tcm_qla2xxx_write_pending_status,
+ .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
+ .get_task_tag = tcm_qla2xxx_get_task_tag,
+ .get_cmd_state = tcm_qla2xxx_get_cmd_state,
+ .queue_data_in = tcm_qla2xxx_queue_data_in,
+ .queue_status = tcm_qla2xxx_queue_status,
+ .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
+ .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_qla2xxx_make_lport,
+ .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
+ .fabric_make_tpg = tcm_qla2xxx_make_tpg,
+ .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+};
+
+static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
+ .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
+ .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
+ .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
+ .tpg_get_tag = tcm_qla2xxx_get_tag,
+ .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
+ .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = tcm_qla2xxx_check_false,
+ .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
+ .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
+ .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
+ .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
+ .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
+ .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
+ .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
+ .release_cmd = tcm_qla2xxx_release_cmd,
+ .put_session = tcm_qla2xxx_put_session,
+ .shutdown_session = tcm_qla2xxx_shutdown_session,
+ .close_session = tcm_qla2xxx_close_session,
+ .sess_get_index = tcm_qla2xxx_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = tcm_qla2xxx_write_pending,
+ .write_pending_status = tcm_qla2xxx_write_pending_status,
+ .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
+ .get_task_tag = tcm_qla2xxx_get_task_tag,
+ .get_cmd_state = tcm_qla2xxx_get_cmd_state,
+ .queue_data_in = tcm_qla2xxx_queue_data_in,
+ .queue_status = tcm_qla2xxx_queue_status,
+ .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
+ .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
+ .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
+ .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
+ .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
+ .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
+ .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
+};
+
+static int tcm_qla2xxx_register_configfs(void)
+{
+ struct target_fabric_configfs *fabric, *npiv_fabric;
+ int ret;
+
+ pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
+ UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ utsname()->machine);
+ /*
+ * Register the top level struct config_item_type with TCM core
+ */
+ fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
+ if (IS_ERR(fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ return PTR_ERR(fabric);
+ }
+ /*
+ * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
+ */
+ fabric->tf_ops = tcm_qla2xxx_ops;
+ /*
+ * Setup default attribute lists for various fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
+ tcm_qla2xxx_tpg_attrib_attrs;
+ TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+ return ret;
+ }
+ /*
+ * Setup our local pointer to *fabric
+ */
+ tcm_qla2xxx_fabric_configfs = fabric;
+ pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
+
+ /*
+ * Register the top level struct config_item_type for NPIV with TCM core
+ */
+ npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
+ if (IS_ERR(npiv_fabric)) {
+ pr_err("target_fabric_configfs_init() failed\n");
+ ret = PTR_ERR(npiv_fabric);
+ goto out_fabric;
+ }
+ /*
+ * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
+ */
+ npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
+ /*
+ * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
+ */
+ TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+ /*
+ * Register the npiv_fabric for use within TCM
+ */
+ ret = target_fabric_configfs_register(npiv_fabric);
+ if (ret < 0) {
+ pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
+ goto out_fabric;
+ }
+ /*
+ * Setup our local pointer to *npiv_fabric
+ */
+ tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
+ pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
+
+ tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
+ WQ_MEM_RECLAIM, 0);
+ if (!tcm_qla2xxx_free_wq) {
+ ret = -ENOMEM;
+ goto out_fabric_npiv;
+ }
+
+ tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
+ if (!tcm_qla2xxx_cmd_wq) {
+ ret = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(tcm_qla2xxx_free_wq);
+out_fabric_npiv:
+ target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+out_fabric:
+ target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+ return ret;
+}
+
+static void tcm_qla2xxx_deregister_configfs(void)
+{
+ destroy_workqueue(tcm_qla2xxx_cmd_wq);
+ destroy_workqueue(tcm_qla2xxx_free_wq);
+
+ target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
+ tcm_qla2xxx_fabric_configfs = NULL;
+ pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
+
+ target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
+ tcm_qla2xxx_npiv_fabric_configfs = NULL;
+ pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
+}
+
+static int __init tcm_qla2xxx_init(void)
+{
+ int ret;
+
+ ret = tcm_qla2xxx_register_configfs();
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void __exit tcm_qla2xxx_exit(void)
+{
+ tcm_qla2xxx_deregister_configfs();
+}
+
+MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
+MODULE_LICENSE("GPL");
+module_init(tcm_qla2xxx_init);
+module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 0000000..8254981
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,82 @@
+#include <target/target_core_base.h>
+#include <linux/btree.h>
+
+#define TCM_QLA2XXX_VERSION "v0.1"
+/* length of ASCII WWPNs including pad */
+#define TCM_QLA2XXX_NAMELEN 32
+/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
+#define TCM_QLA2XXX_NPIV_NAMELEN 66
+
+#include "qla_target.h"
+
+struct tcm_qla2xxx_nacl {
+ /* From libfc struct fc_rport->port_id */
+ u32 nport_id;
+ /* Binary World Wide unique Node Name for remote FC Initiator Nport */
+ u64 nport_wwnn;
+ /* ASCII formatted WWPN for FC Initiator Nport */
+ char nport_name[TCM_QLA2XXX_NAMELEN];
+ /* Pointer to qla_tgt_sess */
+ struct qla_tgt_sess *qla_tgt_sess;
+ /* Pointer to TCM FC nexus */
+ struct se_session *nport_nexus;
+ /* Returned by tcm_qla2xxx_make_nodeacl() */
+ struct se_node_acl se_node_acl;
+};
+
+struct tcm_qla2xxx_tpg_attrib {
+ int generate_node_acls;
+ int cache_dynamic_acls;
+ int demo_mode_write_protect;
+ int prod_mode_write_protect;
+};
+
+struct tcm_qla2xxx_tpg {
+ /* FC lport target portal group tag for TCM */
+ u16 lport_tpgt;
+ /* Atomic bit to determine TPG active status */
+ atomic_t lport_tpg_enabled;
+ /* Pointer back to tcm_qla2xxx_lport */
+ struct tcm_qla2xxx_lport *lport;
+ /* Used by tcm_qla2xxx_tpg_attrib_cit */
+ struct tcm_qla2xxx_tpg_attrib tpg_attrib;
+ /* Returned by tcm_qla2xxx_make_tpg() */
+ struct se_portal_group se_tpg;
+};
+
+#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
+
+struct tcm_qla2xxx_fc_loopid {
+ struct se_node_acl *se_nacl;
+};
+
+struct tcm_qla2xxx_lport {
+ /* SCSI protocol the lport is providing */
+ u8 lport_proto_id;
+ /* Binary World Wide unique Port Name for FC Target Lport */
+ u64 lport_wwpn;
+ /* Binary World Wide unique Port Name for FC NPIV Target Lport */
+ u64 lport_npiv_wwpn;
+ /* Binary World Wide unique Node Name for FC NPIV Target Lport */
+ u64 lport_npiv_wwnn;
+ /* ASCII formatted WWPN for FC Target Lport */
+ char lport_name[TCM_QLA2XXX_NAMELEN];
+ /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
+ char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
+ /* map for fc_port pointers in 24-bit FC Port ID space */
+ struct btree_head32 lport_fcport_map;
+ /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
+ struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
+ /* Pointer to struct scsi_qla_host from qla2xxx LLD */
+ struct scsi_qla_host *qla_vha;
+ /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
+ struct scsi_qla_host *qla_npiv_vp;
+ /* Pointer to struct qla_tgt pointer */
+ struct qla_tgt lport_qla_tgt;
+ /* Pointer to struct fc_vport for NPIV vport from libfc */
+ struct fc_vport *npiv_vport;
+ /* Pointer to TPG=1 for non NPIV mode */
+ struct tcm_qla2xxx_tpg *tpg_1;
+ /* Returned by tcm_qla2xxx_make_lport() */
+ struct se_wwn lport_wwn;
+};
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 0b0a7d4..c681b2a 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -9,6 +9,140 @@
#include "ql4_glbl.h"
#include "ql4_dbg.h"
+static ssize_t
+qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+
+ if (!is_qla8022(ha))
+ return -EINVAL;
+
+ if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
+ return 0;
+
+ return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
+ ha->fw_dump_size);
+}
+
+static ssize_t
+qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
+ struct bin_attribute *ba, char *buf, loff_t off,
+ size_t count)
+{
+ struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ uint32_t dev_state;
+ long reading;
+ int ret = 0;
+
+ if (!is_qla8022(ha))
+ return -EINVAL;
+
+ if (off != 0)
+ return ret;
+
+ buf[1] = 0;
+ ret = kstrtol(buf, 10, &reading);
+ if (ret) {
+ ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ switch (reading) {
+ case 0:
+ /* clear dump collection flags */
+ if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ /* Reload minidump template */
+ qla4xxx_alloc_fw_dump(ha);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Firmware template reloaded\n"));
+ }
+ break;
+ case 1:
+ /* Set flag to read dump */
+ if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
+ !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
+ set_bit(AF_82XX_DUMP_READING, &ha->flags);
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Raw firmware dump ready for read on (%ld).\n",
+ ha->host_no));
+ }
+ break;
+ case 2:
+ /* Reset HBA */
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ if (dev_state == QLA82XX_DEV_READY) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: Setting Need reset, reset_owner is 0x%x.\n",
+ __func__, ha->func_num);
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA82XX_DEV_NEED_RESET);
+ set_bit(AF_82XX_RST_OWNER, &ha->flags);
+ } else
+ ql4_printk(KERN_INFO, ha,
+ "%s: Reset not performed as device state is 0x%x\n",
+ __func__, dev_state);
+
+ qla4_8xxx_idc_unlock(ha);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ return count;
+}
+
+static struct bin_attribute sysfs_fw_dump_attr = {
+ .attr = {
+ .name = "fw_dump",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = 0,
+ .read = qla4_8xxx_sysfs_read_fw_dump,
+ .write = qla4_8xxx_sysfs_write_fw_dump,
+};
+
+static struct sysfs_entry {
+ char *name;
+ struct bin_attribute *attr;
+} bin_file_entries[] = {
+ { "fw_dump", &sysfs_fw_dump_attr },
+ { NULL },
+};
+
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
+{
+ struct Scsi_Host *host = ha->host;
+ struct sysfs_entry *iter;
+ int ret;
+
+ for (iter = bin_file_entries; iter->name; iter++) {
+ ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+ if (ret)
+ ql4_printk(KERN_ERR, ha,
+ "Unable to create sysfs %s binary attribute (%d).\n",
+ iter->name, ret);
+ }
+}
+
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
+{
+ struct Scsi_Host *host = ha->host;
+ struct sysfs_entry *iter;
+
+ for (iter = bin_file_entries; iter->name; iter++)
+ sysfs_remove_bin_file(&host->shost_gendev.kobj,
+ iter->attr);
+}
+
/* Scsi_Host attributes. */
static ssize_t
qla4xxx_fw_version_show(struct device *dev,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7f2492e..96a5616 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -398,6 +398,16 @@ struct isp_operations {
int (*get_sys_info) (struct scsi_qla_host *);
};
+struct ql4_mdump_size_table {
+ uint32_t size;
+ uint32_t size_cmask_02;
+ uint32_t size_cmask_04;
+ uint32_t size_cmask_08;
+ uint32_t size_cmask_10;
+ uint32_t size_cmask_FF;
+ uint32_t version;
+};
+
/*qla4xxx ipaddress configuration details */
struct ipaddress_config {
uint16_t ipv4_options;
@@ -485,6 +495,10 @@ struct scsi_qla_host {
#define AF_EEH_BUSY 20 /* 0x00100000 */
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
+#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
+#define AF_82XX_RST_OWNER 25 /* 0x02000000 */
+#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
+
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -662,6 +676,11 @@ struct scsi_qla_host {
uint32_t nx_dev_init_timeout;
uint32_t nx_reset_timeout;
+ void *fw_dump;
+ uint32_t fw_dump_size;
+ uint32_t fw_dump_capture_mask;
+ void *fw_dump_tmplt_hdr;
+ uint32_t fw_dump_tmplt_size;
struct completion mbx_intr_comp;
@@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
#define PROCESS_ALL_AENS 0
#define FLUSH_DDB_CHANGED_AENS 1
+/* Defines for udev events */
+#define QL4_UEVENT_CODE_FW_DUMP 0
+
#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 210cd1d..7240948 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -385,6 +385,11 @@ struct qla_flt_region {
#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
+#define MBOX_CMD_MINIDUMP 0x0129
+
+/* Minidump subcommand */
+#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
+#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
@@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
uint8_t reserved2[264]; /* 0x0308 - 0x040F */
};
+#define QLA82XX_DBG_STATE_ARRAY_LEN 16
+#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
+
+struct qla4_8xxx_minidump_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info_word2;
+ uint32_t driver_info_word3;
+ uint32_t driver_info_word4;
+
+ uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
+};
+
#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 9105366..20b49d0 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+ dma_addr_t phys_addr);
+int qla4xxx_req_template_size(struct scsi_qla_host *ha);
+void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
+void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
extern int ql4xenablemsix;
+extern int ql4xmdcapmask;
+extern int ql4xenablemd;
extern struct device_attribute *qla4xxx_host_attrs[];
#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90ee5d8..bf36723 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
return ipv4_wait|ipv6_wait;
}
+/**
+ * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
+ * @ha: pointer to host adapter structure.
+ **/
+void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
+{
+ int status;
+ uint32_t capture_debug_level;
+ int hdr_entry_bit, k;
+ void *md_tmp;
+ dma_addr_t md_tmp_dma;
+ struct qla4_8xxx_minidump_template_hdr *md_hdr;
+
+ if (ha->fw_dump) {
+ ql4_printk(KERN_WARNING, ha,
+ "Firmware dump previously allocated.\n");
+ return;
+ }
+
+ status = qla4xxx_req_template_size(ha);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to get template size\n",
+ ha->host_no);
+ return;
+ }
+
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+
+ /* Allocate memory for saving the template */
+ md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+ &md_tmp_dma, GFP_KERNEL);
+
+ /* Request template */
+ status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to get minidump template\n",
+ ha->host_no);
+ goto alloc_cleanup;
+ }
+
+ md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
+
+ capture_debug_level = md_hdr->capture_debug_level;
+
+ /* Get capture mask based on module loadtime setting. */
+ if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
+ ha->fw_dump_capture_mask = ql4xmdcapmask;
+ else
+ ha->fw_dump_capture_mask = capture_debug_level;
+
+ md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
+ md_hdr->num_of_entries));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n",
+ ha->fw_dump_tmplt_size));
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
+ ha->fw_dump_capture_mask));
+
+ /* Calculate fw_dump_size */
+ for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
+ hdr_entry_bit <<= 1, k++) {
+ if (hdr_entry_bit & ha->fw_dump_capture_mask)
+ ha->fw_dump_size += md_hdr->capture_size_array[k];
+ }
+
+ /* Total firmware dump size including command header */
+ ha->fw_dump_size += ha->fw_dump_tmplt_size;
+ ha->fw_dump = vmalloc(ha->fw_dump_size);
+ if (!ha->fw_dump)
+ goto alloc_cleanup;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Minidump Tempalate Size = 0x%x KB\n",
+ ha->fw_dump_tmplt_size));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
+
+ memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
+ ha->fw_dump_tmplt_hdr = ha->fw_dump;
+
+alloc_cleanup:
+ dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
+ md_tmp, md_tmp_dma);
+}
+
static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
{
uint32_t timeout_count;
@@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
"control block\n", ha->host_no, __func__));
return status;
}
+
if (!qla4xxx_fw_ready(ha))
return status;
+ if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
+ qla4xxx_alloc_fw_dump(ha);
+
return qla4xxx_get_firmware_status(ha);
}
@@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
switch (state) {
case DDB_DS_SESSION_ACTIVE:
case DDB_DS_DISCOVERY:
- ddb_entry->unblock_sess(ddb_entry->sess);
qla4xxx_update_session_conn_param(ha, ddb_entry);
+ ddb_entry->unblock_sess(ddb_entry->sess);
status = QLA_SUCCESS;
break;
case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
}
break;
case DDB_DS_SESSION_ACTIVE:
+ case DDB_DS_DISCOVERY:
switch (state) {
case DDB_DS_SESSION_FAILED:
/*
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 7ac21da..cab8f66 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
}
}
- if (is_qla8022(ha)) {
- if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
- DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
- "prematurely completing mbx cmd as firmware "
- "recovery detected\n", ha->host_no, __func__));
- return status;
- }
- /* Do not send any mbx cmd if h/w is in failed state*/
- qla4_8xxx_idc_lock(ha);
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- qla4_8xxx_idc_unlock(ha);
- if (dev_state == QLA82XX_DEV_FAILED) {
- ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
- "failed state, do not send any mailbox commands\n",
- ha->host_no, __func__);
- return status;
- }
- }
-
if ((is_aer_supported(ha)) &&
(test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
msleep(10);
}
+ if (is_qla8022(ha)) {
+ if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
+ ha->host_no, __func__));
+ goto mbox_exit;
+ }
+ /* Do not send any mbx cmd if h/w is in failed state*/
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_8xxx_idc_unlock(ha);
+ if (dev_state == QLA82XX_DEV_FAILED) {
+ ql4_printk(KERN_WARNING, ha,
+ "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
+ ha->host_no, __func__);
+ goto mbox_exit;
+ }
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@ mbox_exit:
return status;
}
+/**
+ * qla4xxx_get_minidump_template - Get the firmware template
+ * @ha: Pointer to host adapter structure.
+ * @phys_addr: dma address for template
+ *
+ * Obtain the minidump template from firmware during initialization
+ * as it may not be available when minidump is desired.
+ **/
+int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
+ dma_addr_t phys_addr)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+ mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
+ mbox_cmd[2] = LSDW(phys_addr);
+ mbox_cmd[3] = MSDW(phys_addr);
+ mbox_cmd[4] = ha->fw_dump_tmplt_size;
+ mbox_cmd[5] = 0;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
+ ha->host_no, __func__, mbox_cmd[0],
+ mbox_sts[0], mbox_sts[1]));
+ }
+ return status;
+}
+
+/**
+ * qla4xxx_req_template_size - Get minidump template size from firmware.
+ * @ha: Pointer to host adapter structure.
+ **/
+int qla4xxx_req_template_size(struct scsi_qla_host *ha)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+
+ mbox_cmd[0] = MBOX_CMD_MINIDUMP;
+ mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
+ &mbox_sts[0]);
+ if (status == QLA_SUCCESS) {
+ ha->fw_dump_tmplt_size = mbox_sts[1];
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
+ __func__, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5], mbox_sts[6], mbox_sts[7]));
+ if (ha->fw_dump_tmplt_size == 0)
+ status = QLA_ERROR;
+ } else {
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
+ __func__, mbox_sts[0], mbox_sts[1]);
+ status = QLA_ERROR;
+ }
+
+ return status;
+}
+
void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
{
set_bit(AF_FW_RECOVERY, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e1e46b6..228b670 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -7,6 +7,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pci.h>
+#include <linux/ratelimit.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
@@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
return data;
}
+/* Minidump related functions */
+static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
+ u32 data, uint8_t flag)
+{
+ uint32_t win_read, off_value, rval = QLA_SUCCESS;
+
+ off_value = off & 0xFFFF0000;
+ writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+
+ /* Read back value to make sure write has gone through before trying
+ * to use it.
+ */
+ win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ if (win_read != off_value) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
+ __func__, off_value, win_read, off));
+ return QLA_ERROR;
+ }
+
+ off_value = off & 0x0000FFFF;
+
+ if (flag)
+ writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
+ else
+ rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
+ ha->nx_pcibase));
+
+ return rval;
+}
+
#define CRB_WIN_LOCK_TIMEOUT 100000000
int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
}
if (j >= MAX_CTL_CHECK) {
- if (printk_ratelimit())
- ql4_printk(KERN_ERR, ha,
- "failed to read through agent\n");
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n",
+ __func__);
break;
}
@@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
if (j >= MAX_CTL_CHECK) {
if (printk_ratelimit())
ql4_printk(KERN_ERR, ha,
- "failed to write through agent\n");
+ "%s: failed to read through agent\n",
+ __func__);
ret = -1;
break;
}
@@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_active |= (1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+ __func__, ha->host_no, drv_active);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
}
@@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
drv_active &= ~(1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
+ __func__, ha->host_no, drv_active);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
}
@@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_state |= (1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+ __func__, ha->host_no, drv_state);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
drv_state &= ~(1 << (ha->func_num * 4));
+ ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
+ __func__, ha->host_no, drv_state);
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
}
@@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
qla4_8xxx_rom_unlock(ha);
}
+static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_addr);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla82xx_minidump_entry_cache *cache_hdr;
+ int rval = QLA_ERROR;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+
+ if (c_value_w)
+ qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
+ 0, 0);
+ if ((c_value_r & p_mask) == 0) {
+ break;
+ } else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr)
+{
+ struct qla82xx_minidump_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
+ uint32_t crb_addr;
+ unsigned long wtime;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+ int i;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
+
+ crb_addr = crb_entry->addr;
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+ if (opcode & QLA82XX_DBG_OPCODE_WR) {
+ qla4_8xxx_md_rw_32(ha, crb_addr,
+ crb_entry->value_1, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_RW) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_AND) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA82XX_DBG_OPCODE_AND;
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+ read_value |= crb_entry->value_3;
+ qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
+
+ do {
+ if ((read_value & crb_entry->value_2) ==
+ crb_entry->value_1)
+ break;
+ else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_ERROR;
+ break;
+ } else
+ read_value = qla4_8xxx_md_rw_32(ha,
+ crb_addr, 0, 0);
+ } while (1);
+ opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else {
+ read_value = crb_entry->value_1;
+ }
+
+ qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+ return rval;
+}
+
+static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, r_stride, loop_cnt));
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
+ __func__, (loop_cnt * sizeof(uint32_t))));
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla82xx_minidump_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(s_value);
+ *data_ptr++ = cpu_to_le32(r_value);
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla82xx_minidump_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
+ qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla82xx_minidump_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+
+static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value;
+ uint32_t i, loop_cnt;
+ struct qla82xx_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
+ r_addr = rom_hdr->read_addr;
+ loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, loop_cnt));
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
+ (r_addr & 0xFFFF0000), 1);
+ r_value = qla4_8xxx_md_rw_32(ha,
+ MD_DIRECT_ROM_READ_BASE +
+ (r_addr & 0x0000FFFF), 0, 0);
+ *data_ptr++ = cpu_to_le32(r_value);
+ r_addr += sizeof(uint32_t);
+ }
+ *d_ptr = data_ptr;
+}
+
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla82xx_minidump_entry_rdmem *m_hdr;
+ unsigned long flags;
+ uint32_t *data_ptr = *d_ptr;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+ m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size));
+
+ if (r_addr & 0xf) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+ __func__, r_addr));
+ return QLA_ERROR;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+ __func__, m_hdr->read_data_size));
+ return QLA_ERROR;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt));
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
+ r_value = 0;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
+ r_value = MIU_TA_CTL_ENABLE;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+ r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
+ qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
+ 0, 0);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n",
+ __func__);
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return QLA_SUCCESS;
+ }
+
+ for (j = 0; j < 4; j++) {
+ r_data = qla4_8xxx_md_rw_32(ha,
+ MD_MIU_TEST_AGT_RDDATA[j],
+ 0, 0);
+ *data_ptr++ = cpu_to_le32(r_data);
+ }
+
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
+ __func__, (loop_cnt * 16)));
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
+ struct qla82xx_minidump_entry_hdr *entry_hdr,
+ int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ ha->host_no, index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask));
+}
+
+/**
+ * qla82xx_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
+{
+ int num_entry_hdr = 0;
+ struct qla82xx_minidump_entry_hdr *entry_hdr;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t data_collected = 0;
+ int i, rval = QLA_ERROR;
+ uint64_t now;
+ uint32_t timestamp;
+
+ if (!ha->fw_dump) {
+ ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
+ __func__, ha->host_no);
+ return rval;
+ }
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
+ ha->fw_dump_tmplt_size);
+ data_collected += ha->fw_dump_tmplt_size;
+
+ num_entry_hdr = tmplt_hdr->num_of_entries;
+ ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
+ __func__, data_ptr);
+ ql4_printk(KERN_INFO, ha,
+ "[%s]: no of entry headers in Template: 0x%x\n",
+ __func__, num_entry_hdr);
+ ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
+ __func__, ha->fw_dump_capture_mask);
+ ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
+ __func__, ha->fw_dump_size, ha->fw_dump_size);
+
+ /* Update current timestamp before taking dump */
+ now = get_jiffies_64();
+ timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+ tmplt_hdr->driver_timestamp = timestamp;
+
+ entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+ (((uint8_t *)ha->fw_dump_tmplt_hdr) +
+ tmplt_hdr->first_entry_offset);
+
+ /* Walk through the entry headers - validate/perform required action */
+ for (i = 0; i < num_entry_hdr; i++) {
+ if (data_collected >= ha->fw_dump_size) {
+ ql4_printk(KERN_INFO, ha,
+ "Data collected: [0x%x], Total Dump size: [0x%x]\n",
+ data_collected, ha->fw_dump_size);
+ return rval;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ha->fw_dump_capture_mask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA82XX_DBG_SKIPPED_FLAG;
+ goto skip_nxt_entry;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected,
+ (ha->fw_dump_size - data_collected)));
+
+ /* Decode the entry type and take required action to capture
+ * debug data
+ */
+ switch (entry_hdr->entry_type) {
+ case QLA82XX_RDEND:
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ case QLA82XX_CNTRL:
+ rval = qla4_8xxx_minidump_process_control(ha,
+ entry_hdr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_RDCRB:
+ qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDMEM:
+ rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_BOARD:
+ case QLA82XX_RDROM:
+ qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_L2DTG:
+ case QLA82XX_L2ITG:
+ case QLA82XX_L2DAT:
+ case QLA82XX_L2INS:
+ rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_L1DAT:
+ case QLA82XX_L1INS:
+ qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDOCM:
+ qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDMUX:
+ qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_QUEUE:
+ qla4_8xxx_minidump_process_queue(ha, entry_hdr,
+ &data_ptr);
+ break;
+ case QLA82XX_RDNOP:
+ default:
+ ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+ break;
+ }
+
+ data_collected = (uint8_t *)data_ptr -
+ ((uint8_t *)((uint8_t *)ha->fw_dump +
+ ha->fw_dump_tmplt_size));
+skip_nxt_entry:
+ /* next entry in the template */
+ entry_hdr = (struct qla82xx_minidump_entry_hdr *)
+ (((uint8_t *)entry_hdr) +
+ entry_hdr->entry_size);
+ }
+
+ if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
+ ql4_printk(KERN_INFO, ha,
+ "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
+ data_collected, ha->fw_dump_size);
+ goto md_failed;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
+ __func__, i));
+md_failed:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
+ * @ha: pointer to adapter structure
+ **/
+static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
+{
+ char event_string[40];
+ char *envp[] = { event_string, NULL };
+
+ switch (code) {
+ case QL4_UEVENT_CODE_FW_DUMP:
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ ha->host_no);
+ break;
+ default:
+ /*do nothing*/
+ break;
+ }
+
+ kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
+}
+
/**
* qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
* @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@ dev_initialize:
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
qla4_8xxx_idc_unlock(ha);
+ if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
+ !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
+ if (!qla4_8xxx_collect_md_data(ha)) {
+ qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
+ } else {
+ ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
+ clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
+ }
+ }
rval = qla4_8xxx_try_start_fw(ha);
qla4_8xxx_idc_lock(ha);
@@ -1686,6 +2360,7 @@ static void
qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
{
uint32_t dev_state, drv_state, drv_active;
+ uint32_t active_mask = 0xFFFFFFFF;
unsigned long reset_timeout;
ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
qla4_8xxx_idc_lock(ha);
}
- qla4_8xxx_set_rst_ready(ha);
+ if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s(%ld): reset acknowledged\n",
+ __func__, ha->host_no));
+ qla4_8xxx_set_rst_ready(ha);
+ } else {
+ active_mask = (~(1 << (ha->func_num * 4)));
+ }
/* wait for 10 seconds for reset ack from all functions */
reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
__func__, ha->host_no, drv_state, drv_active);
- while (drv_state != drv_active) {
+ while (drv_state != (drv_active & active_mask)) {
if (time_after_eq(jiffies, reset_timeout)) {
- printk("%s: RESET TIMEOUT!\n", DRIVER_NAME);
+ ql4_printk(KERN_INFO, ha,
+ "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
+ DRIVER_NAME, drv_state, drv_active);
break;
}
+ /*
+ * When reset_owner times out, check which functions
+ * acked/did not ack
+ */
+ if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
+ ql4_printk(KERN_INFO, ha,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, ha->host_no, drv_state,
+ drv_active);
+ }
qla4_8xxx_idc_unlock(ha);
msleep(1000);
qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
}
+ /* Clear RESET OWNER as we are not going to use it any further */
+ clear_bit(AF_82XX_RST_OWNER, &ha->flags);
+
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
+ dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
/* Force to DEV_COLD unless someone else is starting a reset */
if (dev_state != QLA82XX_DEV_INITIALIZING) {
ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
+ qla4_8xxx_set_rst_ready(ha);
}
}
@@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
}
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown"));
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
while (1) {
if (time_after_eq(jiffies, dev_init_timeout)) {
- ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
+ ql4_printk(KERN_WARNING, ha,
+ "%s: Device Init Failed 0x%x = %s\n",
+ DRIVER_NAME,
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_FAILED);
}
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
- ql4_printk(KERN_INFO, ha,
- "2:Device state is 0x%x = %s\n", dev_state,
- dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
+ ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state[dev_state] : "Unknown");
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
@@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
QLA82XX_DEV_NEED_RESET);
+ set_bit(AF_82XX_RST_OWNER, &ha->flags);
} else
ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
@@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
qla4_8xxx_clear_rst_ready(ha);
qla4_8xxx_idc_unlock(ha);
- if (rval == QLA_SUCCESS)
+ if (rval == QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
clear_bit(AF_FW_RECOVERY, &ha->flags);
+ }
return rval;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc7500e..3025847 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -792,4 +792,196 @@ struct crb_addr_pair {
#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
+/* Minidump related */
+
+/* Entry Type Defines */
+#define QLA82XX_RDNOP 0
+#define QLA82XX_RDCRB 1
+#define QLA82XX_RDMUX 2
+#define QLA82XX_QUEUE 3
+#define QLA82XX_BOARD 4
+#define QLA82XX_RDOCM 6
+#define QLA82XX_PREGS 7
+#define QLA82XX_L1DTG 8
+#define QLA82XX_L1ITG 9
+#define QLA82XX_L1DAT 11
+#define QLA82XX_L1INS 12
+#define QLA82XX_L2DTG 21
+#define QLA82XX_L2ITG 22
+#define QLA82XX_L2DAT 23
+#define QLA82XX_L2INS 24
+#define QLA82XX_RDROM 71
+#define QLA82XX_RDMEM 72
+#define QLA82XX_CNTRL 98
+#define QLA82XX_RDEND 255
+
+/* Opcodes for Control Entries.
+ * These Flags are bit fields.
+ */
+#define QLA82XX_DBG_OPCODE_WR 0x01
+#define QLA82XX_DBG_OPCODE_RW 0x02
+#define QLA82XX_DBG_OPCODE_AND 0x04
+#define QLA82XX_DBG_OPCODE_OR 0x08
+#define QLA82XX_DBG_OPCODE_POLL 0x10
+#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
+#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
+#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
+
+/* Driver Flags */
+#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
+#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
+ * mismatch */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla82xx_minidump_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+};
+
+/* Read CRB entry header */
+struct qla82xx_minidump_entry_crb {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+};
+
+struct qla82xx_minidump_entry_cache {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+};
+
+/* Read OCM */
+struct qla82xx_minidump_entry_rdocm {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+};
+
+/* Read Memory */
+struct qla82xx_minidump_entry_rdmem {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Read ROM */
+struct qla82xx_minidump_entry_rdrom {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Mux entry */
+struct qla82xx_minidump_entry_mux {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+};
+
+/* Queue entry */
+struct qla82xx_minidump_entry_queue {
+ struct qla82xx_minidump_entry_hdr h;
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+};
+
+#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
+#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
+#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
+#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
+#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
+#define QLA82XX_MINIDUMP_MEM_SIZE 0
+#define QLA82XX_MAX_ENTRY_HDR 4
+
+struct qla82xx_minidump {
+ uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
+ uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
+ uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
+ uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
+ uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
+ uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
+};
+
+#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
+#define RQST_TMPLT_SIZE 0x0
+#define RQST_TMPLT 0x1
+#define MD_DIRECT_ROM_WINDOW 0x42110030
+#define MD_DIRECT_ROM_READ_BASE 0x42150000
+#define MD_MIU_TEST_AGT_CTRL 0x41000090
+#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
+#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
+
+static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
+ 0x410000AC, 0x410000B8, 0x410000BC };
#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ee47820..cd15678 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
" Maximum queue depth to report for target devices.\n"
"\t\t Default: 32.");
+static int ql4xqfulltracking = 1;
+module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xqfulltracking,
+ " Enable or disable dynamic tracking and adjustment of\n"
+ "\t\t scsi device queue depth.\n"
+ "\t\t 0 - Disable.\n"
+ "\t\t 1 - Enable. (Default)");
+
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
" Target Session Recovery Timeout.\n"
"\t\t Default: 120 sec.");
+int ql4xmdcapmask = 0x1F;
+module_param(ql4xmdcapmask, int, S_IRUGO);
+MODULE_PARM_DESC(ql4xmdcapmask,
+ " Set the Minidump driver capture mask level.\n"
+ "\t\t Default is 0x1F.\n"
+ "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
+
+int ql4xenablemd = 1;
+module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xenablemd,
+ " Set to enable minidump.\n"
+ "\t\t 0 - disable minidump\n"
+ "\t\t 1 - enable minidump (Default)");
+
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
* SCSI host template entry points
@@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
static umode_t ql4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+ int reason);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
.slave_configure = qla4xxx_slave_configure,
.slave_alloc = qla4xxx_slave_alloc,
.slave_destroy = qla4xxx_slave_destroy,
+ .change_queue_depth = qla4xxx_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
@@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
struct scsi_qla_host *ha;
- unsigned long flags;
+ unsigned long flags, wtime;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ uint32_t ddb_state;
+ int ret;
DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
sess = cls_sess->dd_data;
ddb_entry = sess->dd_data;
ha = ddb_entry->ha;
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto destroy_session;
+ }
+
+ wtime = jiffies + (HZ * LOGOUT_TOV);
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto destroy_session;
+
+ if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ goto destroy_session;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+destroy_session:
qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
iscsi_session_teardown(cls_sess);
+
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
}
static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
ha->queues_dma);
+ if (ha->fw_dump)
+ vfree(ha->fw_dump);
+
ha->queues_len = 0;
ha->queues = NULL;
ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
ha->response_dma = 0;
ha->shadow_regs = NULL;
ha->shadow_regs_dma = 0;
+ ha->fw_dump = NULL;
+ ha->fw_dump_size = 0;
/* Free srb pool. */
if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
set_bit(AF_INIT_DONE, &ha->flags);
+ qla4_8xxx_alloc_sysfs_attr(ha);
+
printk(KERN_INFO
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
iscsi_boot_destroy_kset(ha->boot_kset);
qla4xxx_destroy_fw_ddb_session(ha);
+ qla4_8xxx_free_sysfs_attr(ha);
scsi_remove_host(ha->host);
@@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
scsi_deactivate_tcq(sdev, 1);
}
+static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
+ int reason)
+{
+ if (!ql4xqfulltracking)
+ return -EOPNOTSUPP;
+
+ return iscsi_change_queue_depth(sdev, qdepth, reason);
+}
+
/**
* qla4xxx_del_from_active_array - returns an active srb
* @ha: Pointer to host adapter structure.
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 97b30c1..cc1cc351 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k16"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61c82a3..bbbc9c91 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level;
EXPORT_SYMBOL(scsi_logging_level);
#endif
-#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD)
-/* sd and scsi_pm need to coordinate flushing async actions */
+/* sd, scsi core and power management need to coordinate flushing async actions */
LIST_HEAD(scsi_sd_probe_domain);
EXPORT_SYMBOL(scsi_sd_probe_domain);
-#endif
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
* You may not alter any existing entry (although adding new ones is
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ddfd3..6dfb978 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- struct scsi_target *starget;
if (!sdev)
return 0;
shost = sdev->host;
- starget = scsi_target(sdev);
- if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
- scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
+ /*
+ * Ignore host/starget busy state.
+ * Since block layer does not have a concept of fairness across
+ * multiple queues, congestion of host/starget needs to be handled
+ * in SCSI layer.
+ */
+ if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
return 1;
return 0;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index c77628a..8818dd6 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -486,6 +486,10 @@ void
scsi_netlink_init(void)
{
int error;
+ struct netlink_kernel_cfg cfg = {
+ .input = scsi_nl_rcv_msg,
+ .groups = SCSI_NL_GRP_CNT,
+ };
INIT_LIST_HEAD(&scsi_nl_drivers);
@@ -497,8 +501,7 @@ scsi_netlink_init(void)
}
scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
- SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL,
- THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!scsi_nl_sock) {
printk(KERN_ERR "%s: register of receive handler failed\n",
__func__);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index f661a41..d4201de 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
drv = dev->driver;
- if (drv && drv->suspend)
+ if (drv && drv->suspend) {
err = drv->suspend(dev, msg);
+ if (err)
+ scsi_device_resume(to_scsi_device(dev));
+ }
}
dev_dbg(dev, "scsi suspend: %d\n", err);
return err;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 01b0374..2e5fe58 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
do {
if (list_empty(&scanning_hosts))
- return 0;
+ goto out;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
@@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
}
done:
spin_unlock(&async_scan_lock);
-
kfree(data);
+
+ out:
+ async_synchronize_full_domain(&scsi_sd_probe_domain);
+
return 0;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1cf640e..6042954 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2936,7 +2936,10 @@ EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
static __init int iscsi_transport_init(void)
{
int err;
-
+ struct netlink_kernel_cfg cfg = {
+ .groups = 1,
+ .input = iscsi_if_rx,
+ };
printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
ISCSI_TRANSPORT_VERSION);
@@ -2966,8 +2969,8 @@ static __init int iscsi_transport_init(void)
if (err)
goto unregister_conn_class;
- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
- NULL, THIS_MODULE);
+ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
+ THIS_MODULE, &cfg);
if (!nls) {
err = -ENOBUFS;
goto unregister_session_class;
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 74708fc..0727345 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -12,7 +12,7 @@
#include <linux/module.h>
#include <linux/device.h>
-#include <scsi/scsi_scan.h>
+#include "scsi_priv.h"
static int __init wait_scan_init(void)
{
@@ -22,11 +22,6 @@ static int __init wait_scan_init(void)
* and might not yet have reached the scsi async scanning
*/
wait_for_device_probe();
- /*
- * and then we wait for the actual asynchronous scsi scan
- * to finish.
- */
- scsi_complete_async_scans();
return 0;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6f0a4c6..6f72b80 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1899,6 +1899,8 @@ static int sd_try_rc16_first(struct scsi_device *sdp)
{
if (sdp->host->max_cmd_len < 16)
return 0;
+ if (sdp->try_rc_10_first)
+ return 0;
if (sdp->scsi_level > SCSI_SPC_2)
return 1;
if (scsi_device_protection(sdp))
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4e010b7..6a4fd00 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = pci_request_regions(pdev, UFSHCD);
if (err < 0) {
dev_err(&pdev->dev, "request regions failed\n");
- goto out_disable;
+ goto out_host_put;
}
hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@ out_iounmap:
iounmap(hba->mmio_base);
out_release_regions:
pci_release_regions(pdev);
-out_disable:
+out_host_put:
scsi_host_put(host);
+out_disable:
pci_clear_master(pdev);
pci_disable_device(pdev);
out_error:
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
index f168a61..d860ef7 100644
--- a/drivers/sh/Kconfig
+++ b/drivers/sh/Kconfig
@@ -1,5 +1,6 @@
menu "SuperH / SH-Mobile Driver Options"
source "drivers/sh/intc/Kconfig"
+source "drivers/sh/pfc/Kconfig"
endmenu
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 7139ad2..e57895b 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -5,6 +5,7 @@ obj-y := intc/
obj-$(CONFIG_HAVE_CLK) += clk/
obj-$(CONFIG_MAPLE) += maple/
+obj-$(CONFIG_SH_PFC) += pfc/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
-obj-$(CONFIG_GENERIC_GPIO) += pfc.o
+
obj-y += pm_runtime.o
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index f0d015d..07e9fb4 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -14,6 +14,8 @@
#include <linux/io.h>
#include <linux/sh_clk.h>
+#define CPG_CKSTP_BIT BIT(8)
+
static unsigned int sh_clk_read(struct clk *clk)
{
if (clk->flags & CLK_ENABLE_REG_8BIT)
@@ -66,71 +68,43 @@ int __init sh_clk_mstp_register(struct clk *clks, int nr)
return ret;
}
-static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
+/*
+ * Div/mult table lookup helpers
+ */
+static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
{
- return clk_rate_table_round(clk, clk->freq_table, rate);
+ return clk->priv;
}
-static int sh_clk_div6_divisors[64] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
- 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
-};
+static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
+{
+ return clk_to_div_table(clk)->div_mult_table;
+}
-static struct clk_div_mult_table sh_clk_div6_table = {
- .divisors = sh_clk_div6_divisors,
- .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
-};
+/*
+ * Common div ops
+ */
+static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_rate_table_round(clk, clk->freq_table, rate);
+}
-static unsigned long sh_clk_div6_recalc(struct clk *clk)
+static unsigned long sh_clk_div_recalc(struct clk *clk)
{
- struct clk_div_mult_table *table = &sh_clk_div6_table;
+ struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
unsigned int idx;
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, NULL);
+ table, clk->arch_flags ? &clk->arch_flags : NULL);
- idx = sh_clk_read(clk) & 0x003f;
+ idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
return clk->freq_table[idx].frequency;
}
-static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
-{
- struct clk_div_mult_table *table = &sh_clk_div6_table;
- u32 value;
- int ret, i;
-
- if (!clk->parent_table || !clk->parent_num)
- return -EINVAL;
-
- /* Search the parent */
- for (i = 0; i < clk->parent_num; i++)
- if (clk->parent_table[i] == parent)
- break;
-
- if (i == clk->parent_num)
- return -ENODEV;
-
- ret = clk_reparent(clk, parent);
- if (ret < 0)
- return ret;
-
- value = sh_clk_read(clk) &
- ~(((1 << clk->src_width) - 1) << clk->src_shift);
-
- sh_clk_write(value | (i << clk->src_shift), clk);
-
- /* Rebuild the frequency table */
- clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, NULL);
-
- return 0;
-}
-
-static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
+static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
{
+ struct clk_div_table *dt = clk_to_div_table(clk);
unsigned long value;
int idx;
@@ -139,51 +113,53 @@ static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
return idx;
value = sh_clk_read(clk);
- value &= ~0x3f;
- value |= idx;
+ value &= ~(clk->div_mask << clk->enable_bit);
+ value |= (idx << clk->enable_bit);
sh_clk_write(value, clk);
+
+ /* XXX: Should use a post-change notifier */
+ if (dt->kick)
+ dt->kick(clk);
+
return 0;
}
-static int sh_clk_div6_enable(struct clk *clk)
+static int sh_clk_div_enable(struct clk *clk)
{
- unsigned long value;
- int ret;
-
- ret = sh_clk_div6_set_rate(clk, clk->rate);
- if (ret == 0) {
- value = sh_clk_read(clk);
- value &= ~0x100; /* clear stop bit to enable clock */
- sh_clk_write(value, clk);
- }
- return ret;
+ sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
+ return 0;
}
-static void sh_clk_div6_disable(struct clk *clk)
+static void sh_clk_div_disable(struct clk *clk)
{
- unsigned long value;
+ unsigned int val;
- value = sh_clk_read(clk);
- value |= 0x100; /* stop clock */
- value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
- sh_clk_write(value, clk);
+ val = sh_clk_read(clk);
+ val |= CPG_CKSTP_BIT;
+
+ /*
+ * div6 clocks require the divisor field to be non-zero or the
+ * above CKSTP toggle silently fails. Ensure that the divisor
+ * array is reset to its initial state on disable.
+ */
+ if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
+ val |= clk->div_mask;
+
+ sh_clk_write(val, clk);
}
-static struct sh_clk_ops sh_clk_div6_clk_ops = {
- .recalc = sh_clk_div6_recalc,
+static struct sh_clk_ops sh_clk_div_clk_ops = {
+ .recalc = sh_clk_div_recalc,
+ .set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
- .set_rate = sh_clk_div6_set_rate,
- .enable = sh_clk_div6_enable,
- .disable = sh_clk_div6_disable,
};
-static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
- .recalc = sh_clk_div6_recalc,
+static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
+ .recalc = sh_clk_div_recalc,
+ .set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
- .set_rate = sh_clk_div6_set_rate,
- .enable = sh_clk_div6_enable,
- .disable = sh_clk_div6_disable,
- .set_parent = sh_clk_div6_set_parent,
+ .enable = sh_clk_div_enable,
+ .disable = sh_clk_div_disable,
};
static int __init sh_clk_init_parent(struct clk *clk)
@@ -218,12 +194,12 @@ static int __init sh_clk_init_parent(struct clk *clk)
return 0;
}
-static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
- struct sh_clk_ops *ops)
+static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
+ struct clk_div_table *table, struct sh_clk_ops *ops)
{
struct clk *clkp;
void *freq_table;
- int nr_divs = sh_clk_div6_table.nr_divisors;
+ int nr_divs = table->div_mult_table->nr_divisors;
int freq_table_size = sizeof(struct cpufreq_frequency_table);
int ret = 0;
int k;
@@ -231,7 +207,7 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
freq_table_size *= (nr_divs + 1);
freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
if (!freq_table) {
- pr_err("sh_clk_div6_register: unable to alloc memory\n");
+ pr_err("%s: unable to alloc memory\n", __func__);
return -ENOMEM;
}
@@ -239,47 +215,98 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
clkp = clks + k;
clkp->ops = ops;
+ clkp->priv = table;
+
clkp->freq_table = freq_table + (k * freq_table_size);
clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
- ret = clk_register(clkp);
- if (ret < 0)
- break;
- ret = sh_clk_init_parent(clkp);
+ ret = clk_register(clkp);
+ if (ret == 0)
+ ret = sh_clk_init_parent(clkp);
}
return ret;
}
-int __init sh_clk_div6_register(struct clk *clks, int nr)
-{
- return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
-}
+/*
+ * div6 support
+ */
+static int sh_clk_div6_divisors[64] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
-int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
-{
- return sh_clk_div6_register_ops(clks, nr,
- &sh_clk_div6_reparent_clk_ops);
-}
+static struct clk_div_mult_table div6_div_mult_table = {
+ .divisors = sh_clk_div6_divisors,
+ .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
+};
-static unsigned long sh_clk_div4_recalc(struct clk *clk)
+static struct clk_div_table sh_clk_div6_table = {
+ .div_mult_table = &div6_div_mult_table,
+};
+
+static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
{
- struct clk_div4_table *d4t = clk->priv;
- struct clk_div_mult_table *table = d4t->div_mult_table;
- unsigned int idx;
+ struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
+ u32 value;
+ int ret, i;
+ if (!clk->parent_table || !clk->parent_num)
+ return -EINVAL;
+
+ /* Search the parent */
+ for (i = 0; i < clk->parent_num; i++)
+ if (clk->parent_table[i] == parent)
+ break;
+
+ if (i == clk->parent_num)
+ return -ENODEV;
+
+ ret = clk_reparent(clk, parent);
+ if (ret < 0)
+ return ret;
+
+ value = sh_clk_read(clk) &
+ ~(((1 << clk->src_width) - 1) << clk->src_shift);
+
+ sh_clk_write(value | (i << clk->src_shift), clk);
+
+ /* Rebuild the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, &clk->arch_flags);
+ table, NULL);
- idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
+ return 0;
+}
- return clk->freq_table[idx].frequency;
+static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
+ .recalc = sh_clk_div_recalc,
+ .round_rate = sh_clk_div_round_rate,
+ .set_rate = sh_clk_div_set_rate,
+ .enable = sh_clk_div_enable,
+ .disable = sh_clk_div_disable,
+ .set_parent = sh_clk_div6_set_parent,
+};
+
+int __init sh_clk_div6_register(struct clk *clks, int nr)
+{
+ return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
+ &sh_clk_div_enable_clk_ops);
+}
+
+int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
+{
+ return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
+ &sh_clk_div6_reparent_clk_ops);
}
+/*
+ * div4 support
+ */
static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
{
- struct clk_div4_table *d4t = clk->priv;
- struct clk_div_mult_table *table = d4t->div_mult_table;
+ struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
u32 value;
int ret;
@@ -306,107 +333,31 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
return 0;
}
-static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
-{
- struct clk_div4_table *d4t = clk->priv;
- unsigned long value;
- int idx = clk_rate_table_find(clk, clk->freq_table, rate);
- if (idx < 0)
- return idx;
-
- value = sh_clk_read(clk);
- value &= ~(0xf << clk->enable_bit);
- value |= (idx << clk->enable_bit);
- sh_clk_write(value, clk);
-
- if (d4t->kick)
- d4t->kick(clk);
-
- return 0;
-}
-
-static int sh_clk_div4_enable(struct clk *clk)
-{
- sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
- return 0;
-}
-
-static void sh_clk_div4_disable(struct clk *clk)
-{
- sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
-}
-
-static struct sh_clk_ops sh_clk_div4_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
- .round_rate = sh_clk_div_round_rate,
-};
-
-static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
- .round_rate = sh_clk_div_round_rate,
- .enable = sh_clk_div4_enable,
- .disable = sh_clk_div4_disable,
-};
-
static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
+ .recalc = sh_clk_div_recalc,
+ .set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
- .enable = sh_clk_div4_enable,
- .disable = sh_clk_div4_disable,
+ .enable = sh_clk_div_enable,
+ .disable = sh_clk_div_disable,
.set_parent = sh_clk_div4_set_parent,
};
-static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
- struct clk_div4_table *table, struct sh_clk_ops *ops)
-{
- struct clk *clkp;
- void *freq_table;
- int nr_divs = table->div_mult_table->nr_divisors;
- int freq_table_size = sizeof(struct cpufreq_frequency_table);
- int ret = 0;
- int k;
-
- freq_table_size *= (nr_divs + 1);
- freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
- if (!freq_table) {
- pr_err("sh_clk_div4_register: unable to alloc memory\n");
- return -ENOMEM;
- }
-
- for (k = 0; !ret && (k < nr); k++) {
- clkp = clks + k;
-
- clkp->ops = ops;
- clkp->priv = table;
-
- clkp->freq_table = freq_table + (k * freq_table_size);
- clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
-
- ret = clk_register(clkp);
- }
-
- return ret;
-}
-
int __init sh_clk_div4_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
- return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
+ return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
}
int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
- return sh_clk_div4_register_ops(clks, nr, table,
- &sh_clk_div4_enable_clk_ops);
+ return sh_clk_div_register_ops(clks, nr, table,
+ &sh_clk_div_enable_clk_ops);
}
int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
- return sh_clk_div4_register_ops(clks, nr, table,
- &sh_clk_div4_reparent_clk_ops);
+ return sh_clk_div_register_ops(clks, nr, table,
+ &sh_clk_div4_reparent_clk_ops);
}
diff --git a/drivers/sh/intc/Makefile b/drivers/sh/intc/Makefile
index bb5df86..44f006d 100644
--- a/drivers/sh/intc/Makefile
+++ b/drivers/sh/intc/Makefile
@@ -1,4 +1,4 @@
-obj-y := access.o chip.o core.o dynamic.o handle.o virq.o
+obj-y := access.o chip.o core.o handle.o virq.o
obj-$(CONFIG_INTC_BALANCING) += balancing.o
obj-$(CONFIG_INTC_USERIMASK) += userimask.o
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
deleted file mode 100644
index 14eb01e..0000000
--- a/drivers/sh/intc/dynamic.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Dynamic IRQ management
- *
- * Copyright (C) 2010 Paul Mundt
- *
- * Modelled after arch/x86/kernel/apic/io_apic.c
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#define pr_fmt(fmt) "intc: " fmt
-
-#include <linux/irq.h>
-#include <linux/bitmap.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include "internals.h" /* only for activate_irq() damage.. */
-
-/*
- * The IRQ bitmap provides a global map of bound IRQ vectors for a
- * given platform. Allocation of IRQs are either static through the CPU
- * vector map, or dynamic in the case of board mux vectors or MSI.
- *
- * As this is a central point for all IRQ controllers on the system,
- * each of the available sources are mapped out here. This combined with
- * sparseirq makes it quite trivial to keep the vector map tightly packed
- * when dynamically creating IRQs, as well as tying in to otherwise
- * unused irq_desc positions in the sparse array.
- */
-
-/*
- * Dynamic IRQ allocation and deallocation
- */
-unsigned int create_irq_nr(unsigned int irq_want, int node)
-{
- int irq = irq_alloc_desc_at(irq_want, node);
- if (irq < 0)
- return 0;
-
- activate_irq(irq);
- return irq;
-}
-
-int create_irq(void)
-{
- int irq = irq_alloc_desc(numa_node_id());
- if (irq >= 0)
- activate_irq(irq);
-
- return irq;
-}
-
-void destroy_irq(unsigned int irq)
-{
- irq_free_desc(irq);
-}
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index 93cec21..f30ac93 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -219,12 +219,14 @@ restart:
if (radix_tree_deref_retry(entry))
goto restart;
- irq = create_irq();
+ irq = irq_alloc_desc(numa_node_id());
if (unlikely(irq < 0)) {
pr_err("no more free IRQs, bailing..\n");
break;
}
+ activate_irq(irq);
+
pr_info("Setting up a chained VIRQ from %d -> %d\n",
irq, entry->pirq);
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
deleted file mode 100644
index 522c6c4..0000000
--- a/drivers/sh/pfc.c
+++ /dev/null
@@ -1,739 +0,0 @@
-/*
- * Pinmuxed GPIO support for SuperH.
- *
- * Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/bitops.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-
-static void pfc_iounmap(struct pinmux_info *pip)
-{
- int k;
-
- for (k = 0; k < pip->num_resources; k++)
- if (pip->window[k].virt)
- iounmap(pip->window[k].virt);
-
- kfree(pip->window);
- pip->window = NULL;
-}
-
-static int pfc_ioremap(struct pinmux_info *pip)
-{
- struct resource *res;
- int k;
-
- if (!pip->num_resources)
- return 0;
-
- pip->window = kzalloc(pip->num_resources * sizeof(*pip->window),
- GFP_NOWAIT);
- if (!pip->window)
- goto err1;
-
- for (k = 0; k < pip->num_resources; k++) {
- res = pip->resource + k;
- WARN_ON(resource_type(res) != IORESOURCE_MEM);
- pip->window[k].phys = res->start;
- pip->window[k].size = resource_size(res);
- pip->window[k].virt = ioremap_nocache(res->start,
- resource_size(res));
- if (!pip->window[k].virt)
- goto err2;
- }
-
- return 0;
-
-err2:
- pfc_iounmap(pip);
-err1:
- return -1;
-}
-
-static void __iomem *pfc_phys_to_virt(struct pinmux_info *pip,
- unsigned long address)
-{
- struct pfc_window *window;
- int k;
-
- /* scan through physical windows and convert address */
- for (k = 0; k < pip->num_resources; k++) {
- window = pip->window + k;
-
- if (address < window->phys)
- continue;
-
- if (address >= (window->phys + window->size))
- continue;
-
- return window->virt + (address - window->phys);
- }
-
- /* no windows defined, register must be 1:1 mapped virt:phys */
- return (void __iomem *)address;
-}
-
-static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
-{
- if (enum_id < r->begin)
- return 0;
-
- if (enum_id > r->end)
- return 0;
-
- return 1;
-}
-
-static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width)
-{
- switch (reg_width) {
- case 8:
- return ioread8(mapped_reg);
- case 16:
- return ioread16(mapped_reg);
- case 32:
- return ioread32(mapped_reg);
- }
-
- BUG();
- return 0;
-}
-
-static void gpio_write_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width,
- unsigned long data)
-{
- switch (reg_width) {
- case 8:
- iowrite8(data, mapped_reg);
- return;
- case 16:
- iowrite16(data, mapped_reg);
- return;
- case 32:
- iowrite32(data, mapped_reg);
- return;
- }
-
- BUG();
-}
-
-static int gpio_read_bit(struct pinmux_data_reg *dr,
- unsigned long in_pos)
-{
- unsigned long pos;
-
- pos = dr->reg_width - (in_pos + 1);
-
- pr_debug("read_bit: addr = %lx, pos = %ld, "
- "r_width = %ld\n", dr->reg, pos, dr->reg_width);
-
- return (gpio_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
-}
-
-static void gpio_write_bit(struct pinmux_data_reg *dr,
- unsigned long in_pos, unsigned long value)
-{
- unsigned long pos;
-
- pos = dr->reg_width - (in_pos + 1);
-
- pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
- "r_width = %ld\n",
- dr->reg, !!value, pos, dr->reg_width);
-
- if (value)
- set_bit(pos, &dr->reg_shadow);
- else
- clear_bit(pos, &dr->reg_shadow);
-
- gpio_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
-}
-
-static void config_reg_helper(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- unsigned long in_pos,
- void __iomem **mapped_regp,
- unsigned long *maskp,
- unsigned long *posp)
-{
- int k;
-
- *mapped_regp = pfc_phys_to_virt(gpioc, crp->reg);
-
- if (crp->field_width) {
- *maskp = (1 << crp->field_width) - 1;
- *posp = crp->reg_width - ((in_pos + 1) * crp->field_width);
- } else {
- *maskp = (1 << crp->var_field_width[in_pos]) - 1;
- *posp = crp->reg_width;
- for (k = 0; k <= in_pos; k++)
- *posp -= crp->var_field_width[k];
- }
-}
-
-static int read_config_reg(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- unsigned long field)
-{
- void __iomem *mapped_reg;
- unsigned long mask, pos;
-
- config_reg_helper(gpioc, crp, field, &mapped_reg, &mask, &pos);
-
- pr_debug("read_reg: addr = %lx, field = %ld, "
- "r_width = %ld, f_width = %ld\n",
- crp->reg, field, crp->reg_width, crp->field_width);
-
- return (gpio_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
-}
-
-static void write_config_reg(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- unsigned long field, unsigned long value)
-{
- void __iomem *mapped_reg;
- unsigned long mask, pos, data;
-
- config_reg_helper(gpioc, crp, field, &mapped_reg, &mask, &pos);
-
- pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
- "r_width = %ld, f_width = %ld\n",
- crp->reg, value, field, crp->reg_width, crp->field_width);
-
- mask = ~(mask << pos);
- value = value << pos;
-
- data = gpio_read_raw_reg(mapped_reg, crp->reg_width);
- data &= mask;
- data |= value;
-
- if (gpioc->unlock_reg)
- gpio_write_raw_reg(pfc_phys_to_virt(gpioc, gpioc->unlock_reg),
- 32, ~data);
-
- gpio_write_raw_reg(mapped_reg, crp->reg_width, data);
-}
-
-static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
-{
- struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
- struct pinmux_data_reg *data_reg;
- int k, n;
-
- if (!enum_in_range(gpiop->enum_id, &gpioc->data))
- return -1;
-
- k = 0;
- while (1) {
- data_reg = gpioc->data_regs + k;
-
- if (!data_reg->reg_width)
- break;
-
- data_reg->mapped_reg = pfc_phys_to_virt(gpioc, data_reg->reg);
-
- for (n = 0; n < data_reg->reg_width; n++) {
- if (data_reg->enum_ids[n] == gpiop->enum_id) {
- gpiop->flags &= ~PINMUX_FLAG_DREG;
- gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT);
- gpiop->flags &= ~PINMUX_FLAG_DBIT;
- gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT);
- return 0;
- }
- }
- k++;
- }
-
- BUG();
-
- return -1;
-}
-
-static void setup_data_regs(struct pinmux_info *gpioc)
-{
- struct pinmux_data_reg *drp;
- int k;
-
- for (k = gpioc->first_gpio; k <= gpioc->last_gpio; k++)
- setup_data_reg(gpioc, k);
-
- k = 0;
- while (1) {
- drp = gpioc->data_regs + k;
-
- if (!drp->reg_width)
- break;
-
- drp->reg_shadow = gpio_read_raw_reg(drp->mapped_reg,
- drp->reg_width);
- k++;
- }
-}
-
-static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
- struct pinmux_data_reg **drp, int *bitp)
-{
- struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
- int k, n;
-
- if (!enum_in_range(gpiop->enum_id, &gpioc->data))
- return -1;
-
- k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
- n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
- *drp = gpioc->data_regs + k;
- *bitp = n;
- return 0;
-}
-
-static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
- struct pinmux_cfg_reg **crp,
- int *fieldp, int *valuep,
- unsigned long **cntp)
-{
- struct pinmux_cfg_reg *config_reg;
- unsigned long r_width, f_width, curr_width, ncomb;
- int k, m, n, pos, bit_pos;
-
- k = 0;
- while (1) {
- config_reg = gpioc->cfg_regs + k;
-
- r_width = config_reg->reg_width;
- f_width = config_reg->field_width;
-
- if (!r_width)
- break;
-
- pos = 0;
- m = 0;
- for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) {
- if (f_width)
- curr_width = f_width;
- else
- curr_width = config_reg->var_field_width[m];
-
- ncomb = 1 << curr_width;
- for (n = 0; n < ncomb; n++) {
- if (config_reg->enum_ids[pos + n] == enum_id) {
- *crp = config_reg;
- *fieldp = m;
- *valuep = n;
- *cntp = &config_reg->cnt[m];
- return 0;
- }
- }
- pos += ncomb;
- m++;
- }
- k++;
- }
-
- return -1;
-}
-
-static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
- int pos, pinmux_enum_t *enum_idp)
-{
- pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
- pinmux_enum_t *data = gpioc->gpio_data;
- int k;
-
- if (!enum_in_range(enum_id, &gpioc->data)) {
- if (!enum_in_range(enum_id, &gpioc->mark)) {
- pr_err("non data/mark enum_id for gpio %d\n", gpio);
- return -1;
- }
- }
-
- if (pos) {
- *enum_idp = data[pos + 1];
- return pos + 1;
- }
-
- for (k = 0; k < gpioc->gpio_data_size; k++) {
- if (data[k] == enum_id) {
- *enum_idp = data[k + 1];
- return k + 1;
- }
- }
-
- pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
- return -1;
-}
-
-enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
-
-static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
- int pinmux_type, int cfg_mode)
-{
- struct pinmux_cfg_reg *cr = NULL;
- pinmux_enum_t enum_id;
- struct pinmux_range *range;
- int in_range, pos, field, value;
- unsigned long *cntp;
-
- switch (pinmux_type) {
-
- case PINMUX_TYPE_FUNCTION:
- range = NULL;
- break;
-
- case PINMUX_TYPE_OUTPUT:
- range = &gpioc->output;
- break;
-
- case PINMUX_TYPE_INPUT:
- range = &gpioc->input;
- break;
-
- case PINMUX_TYPE_INPUT_PULLUP:
- range = &gpioc->input_pu;
- break;
-
- case PINMUX_TYPE_INPUT_PULLDOWN:
- range = &gpioc->input_pd;
- break;
-
- default:
- goto out_err;
- }
-
- pos = 0;
- enum_id = 0;
- field = 0;
- value = 0;
- while (1) {
- pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
- if (pos <= 0)
- goto out_err;
-
- if (!enum_id)
- break;
-
- /* first check if this is a function enum */
- in_range = enum_in_range(enum_id, &gpioc->function);
- if (!in_range) {
- /* not a function enum */
- if (range) {
- /*
- * other range exists, so this pin is
- * a regular GPIO pin that now is being
- * bound to a specific direction.
- *
- * for this case we only allow function enums
- * and the enums that match the other range.
- */
- in_range = enum_in_range(enum_id, range);
-
- /*
- * special case pass through for fixed
- * input-only or output-only pins without
- * function enum register association.
- */
- if (in_range && enum_id == range->force)
- continue;
- } else {
- /*
- * no other range exists, so this pin
- * must then be of the function type.
- *
- * allow function type pins to select
- * any combination of function/in/out
- * in their MARK lists.
- */
- in_range = 1;
- }
- }
-
- if (!in_range)
- continue;
-
- if (get_config_reg(gpioc, enum_id, &cr,
- &field, &value, &cntp) != 0)
- goto out_err;
-
- switch (cfg_mode) {
- case GPIO_CFG_DRYRUN:
- if (!*cntp ||
- (read_config_reg(gpioc, cr, field) != value))
- continue;
- break;
-
- case GPIO_CFG_REQ:
- write_config_reg(gpioc, cr, field, value);
- *cntp = *cntp + 1;
- break;
-
- case GPIO_CFG_FREE:
- *cntp = *cntp - 1;
- break;
- }
- }
-
- return 0;
- out_err:
- return -1;
-}
-
-static DEFINE_SPINLOCK(gpio_lock);
-
-static struct pinmux_info *chip_to_pinmux(struct gpio_chip *chip)
-{
- return container_of(chip, struct pinmux_info, chip);
-}
-
-static int sh_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- struct pinmux_data_reg *dummy;
- unsigned long flags;
- int i, ret, pinmux_type;
-
- ret = -EINVAL;
-
- if (!gpioc)
- goto err_out;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- if ((gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE)
- goto err_unlock;
-
- /* setup pin function here if no data is associated with pin */
-
- if (get_data_reg(gpioc, offset, &dummy, &i) != 0)
- pinmux_type = PINMUX_TYPE_FUNCTION;
- else
- pinmux_type = PINMUX_TYPE_GPIO;
-
- if (pinmux_type == PINMUX_TYPE_FUNCTION) {
- if (pinmux_config_gpio(gpioc, offset,
- pinmux_type,
- GPIO_CFG_DRYRUN) != 0)
- goto err_unlock;
-
- if (pinmux_config_gpio(gpioc, offset,
- pinmux_type,
- GPIO_CFG_REQ) != 0)
- BUG();
- }
-
- gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
- gpioc->gpios[offset].flags |= pinmux_type;
-
- ret = 0;
- err_unlock:
- spin_unlock_irqrestore(&gpio_lock, flags);
- err_out:
- return ret;
-}
-
-static void sh_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- unsigned long flags;
- int pinmux_type;
-
- if (!gpioc)
- return;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- pinmux_type = gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE;
- pinmux_config_gpio(gpioc, offset, pinmux_type, GPIO_CFG_FREE);
- gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
- gpioc->gpios[offset].flags |= PINMUX_TYPE_NONE;
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-}
-
-static int pinmux_direction(struct pinmux_info *gpioc,
- unsigned gpio, int new_pinmux_type)
-{
- int pinmux_type;
- int ret = -EINVAL;
-
- if (!gpioc)
- goto err_out;
-
- pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
-
- switch (pinmux_type) {
- case PINMUX_TYPE_GPIO:
- break;
- case PINMUX_TYPE_OUTPUT:
- case PINMUX_TYPE_INPUT:
- case PINMUX_TYPE_INPUT_PULLUP:
- case PINMUX_TYPE_INPUT_PULLDOWN:
- pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
- break;
- default:
- goto err_out;
- }
-
- if (pinmux_config_gpio(gpioc, gpio,
- new_pinmux_type,
- GPIO_CFG_DRYRUN) != 0)
- goto err_out;
-
- if (pinmux_config_gpio(gpioc, gpio,
- new_pinmux_type,
- GPIO_CFG_REQ) != 0)
- BUG();
-
- gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE;
- gpioc->gpios[gpio].flags |= new_pinmux_type;
-
- ret = 0;
- err_out:
- return ret;
-}
-
-static int sh_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&gpio_lock, flags);
- ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_INPUT);
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return ret;
-}
-
-static void sh_gpio_set_value(struct pinmux_info *gpioc,
- unsigned gpio, int value)
-{
- struct pinmux_data_reg *dr = NULL;
- int bit = 0;
-
- if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
- BUG();
- else
- gpio_write_bit(dr, bit, value);
-}
-
-static int sh_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- unsigned long flags;
- int ret;
-
- sh_gpio_set_value(gpioc, offset, value);
- spin_lock_irqsave(&gpio_lock, flags);
- ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_OUTPUT);
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return ret;
-}
-
-static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio)
-{
- struct pinmux_data_reg *dr = NULL;
- int bit = 0;
-
- if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
- return -EINVAL;
-
- return gpio_read_bit(dr, bit);
-}
-
-static int sh_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- return sh_gpio_get_value(chip_to_pinmux(chip), offset);
-}
-
-static void sh_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- sh_gpio_set_value(chip_to_pinmux(chip), offset, value);
-}
-
-static int sh_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- pinmux_enum_t enum_id;
- pinmux_enum_t *enum_ids;
- int i, k, pos;
-
- pos = 0;
- enum_id = 0;
- while (1) {
- pos = get_gpio_enum_id(gpioc, offset, pos, &enum_id);
- if (pos <= 0 || !enum_id)
- break;
-
- for (i = 0; i < gpioc->gpio_irq_size; i++) {
- enum_ids = gpioc->gpio_irq[i].enum_ids;
- for (k = 0; enum_ids[k]; k++) {
- if (enum_ids[k] == enum_id)
- return gpioc->gpio_irq[i].irq;
- }
- }
- }
-
- return -ENOSYS;
-}
-
-int register_pinmux(struct pinmux_info *pip)
-{
- struct gpio_chip *chip = &pip->chip;
- int ret;
-
- pr_info("%s handling gpio %d -> %d\n",
- pip->name, pip->first_gpio, pip->last_gpio);
-
- ret = pfc_ioremap(pip);
- if (ret < 0)
- return ret;
-
- setup_data_regs(pip);
-
- chip->request = sh_gpio_request;
- chip->free = sh_gpio_free;
- chip->direction_input = sh_gpio_direction_input;
- chip->get = sh_gpio_get;
- chip->direction_output = sh_gpio_direction_output;
- chip->set = sh_gpio_set;
- chip->to_irq = sh_gpio_to_irq;
-
- WARN_ON(pip->first_gpio != 0); /* needs testing */
-
- chip->label = pip->name;
- chip->owner = THIS_MODULE;
- chip->base = pip->first_gpio;
- chip->ngpio = (pip->last_gpio - pip->first_gpio) + 1;
-
- ret = gpiochip_add(chip);
- if (ret < 0)
- pfc_iounmap(pip);
-
- return ret;
-}
-
-int unregister_pinmux(struct pinmux_info *pip)
-{
- pr_info("%s deregistering\n", pip->name);
- pfc_iounmap(pip);
- return gpiochip_remove(&pip->chip);
-}
diff --git a/drivers/sh/pfc/Kconfig b/drivers/sh/pfc/Kconfig
new file mode 100644
index 0000000..804f9ad
--- /dev/null
+++ b/drivers/sh/pfc/Kconfig
@@ -0,0 +1,26 @@
+comment "Pin function controller options"
+
+config SH_PFC
+ # XXX move off the gpio dependency
+ depends on GENERIC_GPIO
+ select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
+ select PINCTRL_SH_PFC
+ def_bool y
+
+#
+# Placeholder for now, rehome to drivers/pinctrl once the PFC APIs
+# have settled.
+#
+config PINCTRL_SH_PFC
+ tristate "SuperH PFC pin controller driver"
+ depends on SH_PFC
+ select PINCTRL
+ select PINMUX
+ select PINCONF
+
+config GPIO_SH_PFC
+ tristate "SuperH PFC GPIO support"
+ depends on SH_PFC && GPIOLIB
+ help
+ This enables support for GPIOs within the SoC's pin function
+ controller.
diff --git a/drivers/sh/pfc/Makefile b/drivers/sh/pfc/Makefile
new file mode 100644
index 0000000..7916027
--- /dev/null
+++ b/drivers/sh/pfc/Makefile
@@ -0,0 +1,3 @@
+obj-y += core.o
+obj-$(CONFIG_PINCTRL_SH_PFC) += pinctrl.o
+obj-$(CONFIG_GPIO_SH_PFC) += gpio.o
diff --git a/drivers/sh/pfc/core.c b/drivers/sh/pfc/core.c
new file mode 100644
index 0000000..6816937
--- /dev/null
+++ b/drivers/sh/pfc/core.c
@@ -0,0 +1,572 @@
+/*
+ * SuperH Pin Function Controller support.
+ *
+ * Copyright (C) 2008 Magnus Damm
+ * Copyright (C) 2009 - 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "sh_pfc " KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sh_pfc.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/pinctrl/machine.h>
+
+static struct sh_pfc *sh_pfc __read_mostly;
+
+static inline bool sh_pfc_initialized(void)
+{
+ return !!sh_pfc;
+}
+
+static void pfc_iounmap(struct sh_pfc *pfc)
+{
+ int k;
+
+ for (k = 0; k < pfc->num_resources; k++)
+ if (pfc->window[k].virt)
+ iounmap(pfc->window[k].virt);
+
+ kfree(pfc->window);
+ pfc->window = NULL;
+}
+
+static int pfc_ioremap(struct sh_pfc *pfc)
+{
+ struct resource *res;
+ int k;
+
+ if (!pfc->num_resources)
+ return 0;
+
+ pfc->window = kzalloc(pfc->num_resources * sizeof(*pfc->window),
+ GFP_NOWAIT);
+ if (!pfc->window)
+ goto err1;
+
+ for (k = 0; k < pfc->num_resources; k++) {
+ res = pfc->resource + k;
+ WARN_ON(resource_type(res) != IORESOURCE_MEM);
+ pfc->window[k].phys = res->start;
+ pfc->window[k].size = resource_size(res);
+ pfc->window[k].virt = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!pfc->window[k].virt)
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ pfc_iounmap(pfc);
+err1:
+ return -1;
+}
+
+static void __iomem *pfc_phys_to_virt(struct sh_pfc *pfc,
+ unsigned long address)
+{
+ struct pfc_window *window;
+ int k;
+
+ /* scan through physical windows and convert address */
+ for (k = 0; k < pfc->num_resources; k++) {
+ window = pfc->window + k;
+
+ if (address < window->phys)
+ continue;
+
+ if (address >= (window->phys + window->size))
+ continue;
+
+ return window->virt + (address - window->phys);
+ }
+
+ /* no windows defined, register must be 1:1 mapped virt:phys */
+ return (void __iomem *)address;
+}
+
+static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
+{
+ if (enum_id < r->begin)
+ return 0;
+
+ if (enum_id > r->end)
+ return 0;
+
+ return 1;
+}
+
+static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width)
+{
+ switch (reg_width) {
+ case 8:
+ return ioread8(mapped_reg);
+ case 16:
+ return ioread16(mapped_reg);
+ case 32:
+ return ioread32(mapped_reg);
+ }
+
+ BUG();
+ return 0;
+}
+
+static void gpio_write_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width,
+ unsigned long data)
+{
+ switch (reg_width) {
+ case 8:
+ iowrite8(data, mapped_reg);
+ return;
+ case 16:
+ iowrite16(data, mapped_reg);
+ return;
+ case 32:
+ iowrite32(data, mapped_reg);
+ return;
+ }
+
+ BUG();
+}
+
+int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos)
+{
+ unsigned long pos;
+
+ pos = dr->reg_width - (in_pos + 1);
+
+ pr_debug("read_bit: addr = %lx, pos = %ld, "
+ "r_width = %ld\n", dr->reg, pos, dr->reg_width);
+
+ return (gpio_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_read_bit);
+
+void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
+ unsigned long value)
+{
+ unsigned long pos;
+
+ pos = dr->reg_width - (in_pos + 1);
+
+ pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
+ "r_width = %ld\n",
+ dr->reg, !!value, pos, dr->reg_width);
+
+ if (value)
+ set_bit(pos, &dr->reg_shadow);
+ else
+ clear_bit(pos, &dr->reg_shadow);
+
+ gpio_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
+}
+EXPORT_SYMBOL_GPL(sh_pfc_write_bit);
+
+static void config_reg_helper(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long in_pos,
+ void __iomem **mapped_regp,
+ unsigned long *maskp,
+ unsigned long *posp)
+{
+ int k;
+
+ *mapped_regp = pfc_phys_to_virt(pfc, crp->reg);
+
+ if (crp->field_width) {
+ *maskp = (1 << crp->field_width) - 1;
+ *posp = crp->reg_width - ((in_pos + 1) * crp->field_width);
+ } else {
+ *maskp = (1 << crp->var_field_width[in_pos]) - 1;
+ *posp = crp->reg_width;
+ for (k = 0; k <= in_pos; k++)
+ *posp -= crp->var_field_width[k];
+ }
+}
+
+static int read_config_reg(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field)
+{
+ void __iomem *mapped_reg;
+ unsigned long mask, pos;
+
+ config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
+
+ pr_debug("read_reg: addr = %lx, field = %ld, "
+ "r_width = %ld, f_width = %ld\n",
+ crp->reg, field, crp->reg_width, crp->field_width);
+
+ return (gpio_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
+}
+
+static void write_config_reg(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field, unsigned long value)
+{
+ void __iomem *mapped_reg;
+ unsigned long mask, pos, data;
+
+ config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
+
+ pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
+ "r_width = %ld, f_width = %ld\n",
+ crp->reg, value, field, crp->reg_width, crp->field_width);
+
+ mask = ~(mask << pos);
+ value = value << pos;
+
+ data = gpio_read_raw_reg(mapped_reg, crp->reg_width);
+ data &= mask;
+ data |= value;
+
+ if (pfc->unlock_reg)
+ gpio_write_raw_reg(pfc_phys_to_virt(pfc, pfc->unlock_reg),
+ 32, ~data);
+
+ gpio_write_raw_reg(mapped_reg, crp->reg_width, data);
+}
+
+static int setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
+{
+ struct pinmux_gpio *gpiop = &pfc->gpios[gpio];
+ struct pinmux_data_reg *data_reg;
+ int k, n;
+
+ if (!enum_in_range(gpiop->enum_id, &pfc->data))
+ return -1;
+
+ k = 0;
+ while (1) {
+ data_reg = pfc->data_regs + k;
+
+ if (!data_reg->reg_width)
+ break;
+
+ data_reg->mapped_reg = pfc_phys_to_virt(pfc, data_reg->reg);
+
+ for (n = 0; n < data_reg->reg_width; n++) {
+ if (data_reg->enum_ids[n] == gpiop->enum_id) {
+ gpiop->flags &= ~PINMUX_FLAG_DREG;
+ gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT);
+ gpiop->flags &= ~PINMUX_FLAG_DBIT;
+ gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT);
+ return 0;
+ }
+ }
+ k++;
+ }
+
+ BUG();
+
+ return -1;
+}
+
+static void setup_data_regs(struct sh_pfc *pfc)
+{
+ struct pinmux_data_reg *drp;
+ int k;
+
+ for (k = pfc->first_gpio; k <= pfc->last_gpio; k++)
+ setup_data_reg(pfc, k);
+
+ k = 0;
+ while (1) {
+ drp = pfc->data_regs + k;
+
+ if (!drp->reg_width)
+ break;
+
+ drp->reg_shadow = gpio_read_raw_reg(drp->mapped_reg,
+ drp->reg_width);
+ k++;
+ }
+}
+
+int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
+ struct pinmux_data_reg **drp, int *bitp)
+{
+ struct pinmux_gpio *gpiop = &pfc->gpios[gpio];
+ int k, n;
+
+ if (!enum_in_range(gpiop->enum_id, &pfc->data))
+ return -1;
+
+ k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
+ n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
+ *drp = pfc->data_regs + k;
+ *bitp = n;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_get_data_reg);
+
+static int get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
+ struct pinmux_cfg_reg **crp,
+ int *fieldp, int *valuep,
+ unsigned long **cntp)
+{
+ struct pinmux_cfg_reg *config_reg;
+ unsigned long r_width, f_width, curr_width, ncomb;
+ int k, m, n, pos, bit_pos;
+
+ k = 0;
+ while (1) {
+ config_reg = pfc->cfg_regs + k;
+
+ r_width = config_reg->reg_width;
+ f_width = config_reg->field_width;
+
+ if (!r_width)
+ break;
+
+ pos = 0;
+ m = 0;
+ for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) {
+ if (f_width)
+ curr_width = f_width;
+ else
+ curr_width = config_reg->var_field_width[m];
+
+ ncomb = 1 << curr_width;
+ for (n = 0; n < ncomb; n++) {
+ if (config_reg->enum_ids[pos + n] == enum_id) {
+ *crp = config_reg;
+ *fieldp = m;
+ *valuep = n;
+ *cntp = &config_reg->cnt[m];
+ return 0;
+ }
+ }
+ pos += ncomb;
+ m++;
+ }
+ k++;
+ }
+
+ return -1;
+}
+
+int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
+ pinmux_enum_t *enum_idp)
+{
+ pinmux_enum_t enum_id = pfc->gpios[gpio].enum_id;
+ pinmux_enum_t *data = pfc->gpio_data;
+ int k;
+
+ if (!enum_in_range(enum_id, &pfc->data)) {
+ if (!enum_in_range(enum_id, &pfc->mark)) {
+ pr_err("non data/mark enum_id for gpio %d\n", gpio);
+ return -1;
+ }
+ }
+
+ if (pos) {
+ *enum_idp = data[pos + 1];
+ return pos + 1;
+ }
+
+ for (k = 0; k < pfc->gpio_data_size; k++) {
+ if (data[k] == enum_id) {
+ *enum_idp = data[k + 1];
+ return k + 1;
+ }
+ }
+
+ pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
+ return -1;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_gpio_to_enum);
+
+int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
+ int cfg_mode)
+{
+ struct pinmux_cfg_reg *cr = NULL;
+ pinmux_enum_t enum_id;
+ struct pinmux_range *range;
+ int in_range, pos, field, value;
+ unsigned long *cntp;
+
+ switch (pinmux_type) {
+
+ case PINMUX_TYPE_FUNCTION:
+ range = NULL;
+ break;
+
+ case PINMUX_TYPE_OUTPUT:
+ range = &pfc->output;
+ break;
+
+ case PINMUX_TYPE_INPUT:
+ range = &pfc->input;
+ break;
+
+ case PINMUX_TYPE_INPUT_PULLUP:
+ range = &pfc->input_pu;
+ break;
+
+ case PINMUX_TYPE_INPUT_PULLDOWN:
+ range = &pfc->input_pd;
+ break;
+
+ default:
+ goto out_err;
+ }
+
+ pos = 0;
+ enum_id = 0;
+ field = 0;
+ value = 0;
+ while (1) {
+ pos = sh_pfc_gpio_to_enum(pfc, gpio, pos, &enum_id);
+ if (pos <= 0)
+ goto out_err;
+
+ if (!enum_id)
+ break;
+
+ /* first check if this is a function enum */
+ in_range = enum_in_range(enum_id, &pfc->function);
+ if (!in_range) {
+ /* not a function enum */
+ if (range) {
+ /*
+ * other range exists, so this pin is
+ * a regular GPIO pin that now is being
+ * bound to a specific direction.
+ *
+ * for this case we only allow function enums
+ * and the enums that match the other range.
+ */
+ in_range = enum_in_range(enum_id, range);
+
+ /*
+ * special case pass through for fixed
+ * input-only or output-only pins without
+ * function enum register association.
+ */
+ if (in_range && enum_id == range->force)
+ continue;
+ } else {
+ /*
+ * no other range exists, so this pin
+ * must then be of the function type.
+ *
+ * allow function type pins to select
+ * any combination of function/in/out
+ * in their MARK lists.
+ */
+ in_range = 1;
+ }
+ }
+
+ if (!in_range)
+ continue;
+
+ if (get_config_reg(pfc, enum_id, &cr,
+ &field, &value, &cntp) != 0)
+ goto out_err;
+
+ switch (cfg_mode) {
+ case GPIO_CFG_DRYRUN:
+ if (!*cntp ||
+ (read_config_reg(pfc, cr, field) != value))
+ continue;
+ break;
+
+ case GPIO_CFG_REQ:
+ write_config_reg(pfc, cr, field, value);
+ *cntp = *cntp + 1;
+ break;
+
+ case GPIO_CFG_FREE:
+ *cntp = *cntp - 1;
+ break;
+ }
+ }
+
+ return 0;
+ out_err:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_config_gpio);
+
+int register_sh_pfc(struct sh_pfc *pfc)
+{
+ int (*initroutine)(struct sh_pfc *) = NULL;
+ int ret;
+
+ /*
+ * Ensure that the type encoding fits
+ */
+ BUILD_BUG_ON(PINMUX_FLAG_TYPE > ((1 << PINMUX_FLAG_DBIT_SHIFT) - 1));
+
+ if (sh_pfc)
+ return -EBUSY;
+
+ ret = pfc_ioremap(pfc);
+ if (unlikely(ret < 0))
+ return ret;
+
+ spin_lock_init(&pfc->lock);
+
+ pinctrl_provide_dummies();
+ setup_data_regs(pfc);
+
+ sh_pfc = pfc;
+
+ /*
+ * Initialize pinctrl bindings first
+ */
+ initroutine = symbol_request(sh_pfc_register_pinctrl);
+ if (initroutine) {
+ ret = (*initroutine)(pfc);
+ symbol_put_addr(initroutine);
+
+ if (unlikely(ret != 0))
+ goto err;
+ } else {
+ pr_err("failed to initialize pinctrl bindings\n");
+ goto err;
+ }
+
+ /*
+ * Then the GPIO chip
+ */
+ initroutine = symbol_request(sh_pfc_register_gpiochip);
+ if (initroutine) {
+ ret = (*initroutine)(pfc);
+ symbol_put_addr(initroutine);
+
+ /*
+ * If the GPIO chip fails to come up we still leave the
+ * PFC state as it is, given that there are already
+ * extant users of it that have succeeded by this point.
+ */
+ if (unlikely(ret != 0)) {
+ pr_notice("failed to init GPIO chip, ignoring...\n");
+ ret = 0;
+ }
+ }
+
+ pr_info("%s support registered\n", pfc->name);
+
+ return 0;
+
+err:
+ pfc_iounmap(pfc);
+ sh_pfc = NULL;
+
+ return ret;
+}
diff --git a/drivers/sh/pfc/gpio.c b/drivers/sh/pfc/gpio.c
new file mode 100644
index 0000000..62bca98
--- /dev/null
+++ b/drivers/sh/pfc/gpio.c
@@ -0,0 +1,239 @@
+/*
+ * SuperH Pin Function Controller GPIO driver.
+ *
+ * Copyright (C) 2008 Magnus Damm
+ * Copyright (C) 2009 - 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "sh_pfc " KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+
+struct sh_pfc_chip {
+ struct sh_pfc *pfc;
+ struct gpio_chip gpio_chip;
+};
+
+static struct sh_pfc_chip *gpio_to_pfc_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct sh_pfc_chip, gpio_chip);
+}
+
+static struct sh_pfc *gpio_to_pfc(struct gpio_chip *gc)
+{
+ return gpio_to_pfc_chip(gc)->pfc;
+}
+
+static int sh_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+ return pinctrl_request_gpio(offset);
+}
+
+static void sh_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+ pinctrl_free_gpio(offset);
+}
+
+static void sh_gpio_set_value(struct sh_pfc *pfc, unsigned gpio, int value)
+{
+ struct pinmux_data_reg *dr = NULL;
+ int bit = 0;
+
+ if (!pfc || sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ BUG();
+ else
+ sh_pfc_write_bit(dr, bit, value);
+}
+
+static int sh_gpio_get_value(struct sh_pfc *pfc, unsigned gpio)
+{
+ struct pinmux_data_reg *dr = NULL;
+ int bit = 0;
+
+ if (!pfc || sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ return -EINVAL;
+
+ return sh_pfc_read_bit(dr, bit);
+}
+
+static int sh_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(offset);
+}
+
+static int sh_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ sh_gpio_set_value(gpio_to_pfc(gc), offset, value);
+
+ return pinctrl_gpio_direction_output(offset);
+}
+
+static int sh_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ return sh_gpio_get_value(gpio_to_pfc(gc), offset);
+}
+
+static void sh_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ sh_gpio_set_value(gpio_to_pfc(gc), offset, value);
+}
+
+static int sh_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct sh_pfc *pfc = gpio_to_pfc(gc);
+ pinmux_enum_t enum_id;
+ pinmux_enum_t *enum_ids;
+ int i, k, pos;
+
+ pos = 0;
+ enum_id = 0;
+ while (1) {
+ pos = sh_pfc_gpio_to_enum(pfc, offset, pos, &enum_id);
+ if (pos <= 0 || !enum_id)
+ break;
+
+ for (i = 0; i < pfc->gpio_irq_size; i++) {
+ enum_ids = pfc->gpio_irq[i].enum_ids;
+ for (k = 0; enum_ids[k]; k++) {
+ if (enum_ids[k] == enum_id)
+ return pfc->gpio_irq[i].irq;
+ }
+ }
+ }
+
+ return -ENOSYS;
+}
+
+static void sh_pfc_gpio_setup(struct sh_pfc_chip *chip)
+{
+ struct sh_pfc *pfc = chip->pfc;
+ struct gpio_chip *gc = &chip->gpio_chip;
+
+ gc->request = sh_gpio_request;
+ gc->free = sh_gpio_free;
+ gc->direction_input = sh_gpio_direction_input;
+ gc->get = sh_gpio_get;
+ gc->direction_output = sh_gpio_direction_output;
+ gc->set = sh_gpio_set;
+ gc->to_irq = sh_gpio_to_irq;
+
+ WARN_ON(pfc->first_gpio != 0); /* needs testing */
+
+ gc->label = pfc->name;
+ gc->owner = THIS_MODULE;
+ gc->base = pfc->first_gpio;
+ gc->ngpio = (pfc->last_gpio - pfc->first_gpio) + 1;
+}
+
+int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
+{
+ struct sh_pfc_chip *chip;
+ int ret;
+
+ chip = kzalloc(sizeof(struct sh_pfc_chip), GFP_KERNEL);
+ if (unlikely(!chip))
+ return -ENOMEM;
+
+ chip->pfc = pfc;
+
+ sh_pfc_gpio_setup(chip);
+
+ ret = gpiochip_add(&chip->gpio_chip);
+ if (unlikely(ret < 0))
+ kfree(chip);
+
+ pr_info("%s handling gpio %d -> %d\n",
+ pfc->name, pfc->first_gpio, pfc->last_gpio);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_register_gpiochip);
+
+static int sh_pfc_gpio_match(struct gpio_chip *gc, void *data)
+{
+ return !!strstr(gc->label, data);
+}
+
+static int __devinit sh_pfc_gpio_probe(struct platform_device *pdev)
+{
+ struct sh_pfc_chip *chip;
+ struct gpio_chip *gc;
+
+ gc = gpiochip_find("_pfc", sh_pfc_gpio_match);
+ if (unlikely(!gc)) {
+ pr_err("Cant find gpio chip\n");
+ return -ENODEV;
+ }
+
+ chip = gpio_to_pfc_chip(gc);
+ platform_set_drvdata(pdev, chip);
+
+ pr_info("attaching to GPIO chip %s\n", chip->pfc->name);
+
+ return 0;
+}
+
+static int __devexit sh_pfc_gpio_remove(struct platform_device *pdev)
+{
+ struct sh_pfc_chip *chip = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&chip->gpio_chip);
+ if (unlikely(ret < 0))
+ return ret;
+
+ kfree(chip);
+ return 0;
+}
+
+static struct platform_driver sh_pfc_gpio_driver = {
+ .probe = sh_pfc_gpio_probe,
+ .remove = __devexit_p(sh_pfc_gpio_remove),
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_device sh_pfc_gpio_device = {
+ .name = KBUILD_MODNAME,
+ .id = -1,
+};
+
+static int __init sh_pfc_gpio_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&sh_pfc_gpio_driver);
+ if (likely(!rc)) {
+ rc = platform_device_register(&sh_pfc_gpio_device);
+ if (unlikely(rc))
+ platform_driver_unregister(&sh_pfc_gpio_driver);
+ }
+
+ return rc;
+}
+
+static void __exit sh_pfc_gpio_exit(void)
+{
+ platform_device_unregister(&sh_pfc_gpio_device);
+ platform_driver_unregister(&sh_pfc_gpio_driver);
+}
+
+module_init(sh_pfc_gpio_init);
+module_exit(sh_pfc_gpio_exit);
+
+MODULE_AUTHOR("Magnus Damm, Paul Mundt");
+MODULE_DESCRIPTION("GPIO driver for SuperH pin function controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pfc-gpio");
diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c
new file mode 100644
index 0000000..0802b6c
--- /dev/null
+++ b/drivers/sh/pfc/pinctrl.c
@@ -0,0 +1,530 @@
+/*
+ * SuperH Pin Function Controller pinmux support.
+ *
+ * Copyright (C) 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define DRV_NAME "pinctrl-sh_pfc"
+
+#define pr_fmt(fmt) DRV_NAME " " KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sh_pfc.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+struct sh_pfc_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct sh_pfc *pfc;
+
+ struct pinmux_gpio **functions;
+ unsigned int nr_functions;
+
+ struct pinctrl_pin_desc *pads;
+ unsigned int nr_pads;
+
+ spinlock_t lock;
+};
+
+static struct sh_pfc_pinctrl *sh_pfc_pmx;
+
+static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pmx->nr_pads;
+}
+
+static const char *sh_pfc_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pmx->pads[selector].name;
+}
+
+static int sh_pfc_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
+ const unsigned **pins, unsigned *num_pins)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pmx->pads[group].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static void sh_pfc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, "%s", DRV_NAME);
+}
+
+static struct pinctrl_ops sh_pfc_pinctrl_ops = {
+ .get_groups_count = sh_pfc_get_groups_count,
+ .get_group_name = sh_pfc_get_group_name,
+ .get_group_pins = sh_pfc_get_group_pins,
+ .pin_dbg_show = sh_pfc_pin_dbg_show,
+};
+
+static int sh_pfc_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pmx->nr_functions;
+}
+
+static const char *sh_pfc_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pmx->functions[selector]->name;
+}
+
+static int sh_pfc_get_function_groups(struct pinctrl_dev *pctldev, unsigned func,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = &pmx->functions[func]->name;
+ *num_groups = 1;
+
+ return 0;
+}
+
+static int sh_pfc_noop_enable(struct pinctrl_dev *pctldev, unsigned func,
+ unsigned group)
+{
+ return 0;
+}
+
+static void sh_pfc_noop_disable(struct pinctrl_dev *pctldev, unsigned func,
+ unsigned group)
+{
+}
+
+static inline int sh_pfc_config_function(struct sh_pfc *pfc, unsigned offset)
+{
+ if (sh_pfc_config_gpio(pfc, offset,
+ PINMUX_TYPE_FUNCTION,
+ GPIO_CFG_DRYRUN) != 0)
+ return -EINVAL;
+
+ if (sh_pfc_config_gpio(pfc, offset,
+ PINMUX_TYPE_FUNCTION,
+ GPIO_CFG_REQ) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sh_pfc_reconfig_pin(struct sh_pfc *pfc, unsigned offset,
+ int new_type)
+{
+ unsigned long flags;
+ int pinmux_type;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+
+ /*
+ * See if the present config needs to first be de-configured.
+ */
+ switch (pinmux_type) {
+ case PINMUX_TYPE_GPIO:
+ break;
+ case PINMUX_TYPE_OUTPUT:
+ case PINMUX_TYPE_INPUT:
+ case PINMUX_TYPE_INPUT_PULLUP:
+ case PINMUX_TYPE_INPUT_PULLDOWN:
+ sh_pfc_config_gpio(pfc, offset, pinmux_type, GPIO_CFG_FREE);
+ break;
+ default:
+ goto err;
+ }
+
+ /*
+ * Dry run
+ */
+ if (sh_pfc_config_gpio(pfc, offset, new_type,
+ GPIO_CFG_DRYRUN) != 0)
+ goto err;
+
+ /*
+ * Request
+ */
+ if (sh_pfc_config_gpio(pfc, offset, new_type,
+ GPIO_CFG_REQ) != 0)
+ goto err;
+
+ pfc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
+ pfc->gpios[offset].flags |= new_type;
+
+ ret = 0;
+
+err:
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ return ret;
+}
+
+
+static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+ unsigned long flags;
+ int ret, pinmux_type;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+
+ switch (pinmux_type) {
+ case PINMUX_TYPE_FUNCTION:
+ pr_notice_once("Use of GPIO API for function requests is "
+ "deprecated, convert to pinctrl\n");
+ /* handle for now */
+ ret = sh_pfc_config_function(pfc, offset);
+ if (unlikely(ret < 0))
+ goto err;
+
+ break;
+ case PINMUX_TYPE_GPIO:
+ break;
+ default:
+ pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type);
+ return -ENOTSUPP;
+ }
+
+ ret = 0;
+
+err:
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ return ret;
+}
+
+static void sh_pfc_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+ unsigned long flags;
+ int pinmux_type;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+
+ sh_pfc_config_gpio(pfc, offset, pinmux_type, GPIO_CFG_FREE);
+
+ spin_unlock_irqrestore(&pfc->lock, flags);
+}
+
+static int sh_pfc_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset, bool input)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ int type = input ? PINMUX_TYPE_INPUT : PINMUX_TYPE_OUTPUT;
+
+ return sh_pfc_reconfig_pin(pmx->pfc, offset, type);
+}
+
+static struct pinmux_ops sh_pfc_pinmux_ops = {
+ .get_functions_count = sh_pfc_get_functions_count,
+ .get_function_name = sh_pfc_get_function_name,
+ .get_function_groups = sh_pfc_get_function_groups,
+ .enable = sh_pfc_noop_enable,
+ .disable = sh_pfc_noop_disable,
+ .gpio_request_enable = sh_pfc_gpio_request_enable,
+ .gpio_disable_free = sh_pfc_gpio_disable_free,
+ .gpio_set_direction = sh_pfc_gpio_set_direction,
+};
+
+static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+
+ *config = pfc->gpios[pin].flags & PINMUX_FLAG_TYPE;
+
+ return 0;
+}
+
+static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+
+ /* Validate the new type */
+ if (config >= PINMUX_FLAG_TYPE)
+ return -EINVAL;
+
+ return sh_pfc_reconfig_pin(pmx->pfc, pin, config);
+}
+
+static void sh_pfc_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned pin)
+{
+ const char *pinmux_type_str[] = {
+ [PINMUX_TYPE_NONE] = "none",
+ [PINMUX_TYPE_FUNCTION] = "function",
+ [PINMUX_TYPE_GPIO] = "gpio",
+ [PINMUX_TYPE_OUTPUT] = "output",
+ [PINMUX_TYPE_INPUT] = "input",
+ [PINMUX_TYPE_INPUT_PULLUP] = "input bias pull up",
+ [PINMUX_TYPE_INPUT_PULLDOWN] = "input bias pull down",
+ };
+ unsigned long config;
+ int rc;
+
+ rc = sh_pfc_pinconf_get(pctldev, pin, &config);
+ if (unlikely(rc != 0))
+ return;
+
+ seq_printf(s, " %s", pinmux_type_str[config]);
+}
+
+static struct pinconf_ops sh_pfc_pinconf_ops = {
+ .pin_config_get = sh_pfc_pinconf_get,
+ .pin_config_set = sh_pfc_pinconf_set,
+ .pin_config_dbg_show = sh_pfc_pinconf_dbg_show,
+};
+
+static struct pinctrl_gpio_range sh_pfc_gpio_range = {
+ .name = DRV_NAME,
+ .id = 0,
+};
+
+static struct pinctrl_desc sh_pfc_pinctrl_desc = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pctlops = &sh_pfc_pinctrl_ops,
+ .pmxops = &sh_pfc_pinmux_ops,
+ .confops = &sh_pfc_pinconf_ops,
+};
+
+int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
+{
+ sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL);
+ if (unlikely(!sh_pfc_pmx))
+ return -ENOMEM;
+
+ spin_lock_init(&sh_pfc_pmx->lock);
+
+ sh_pfc_pmx->pfc = pfc;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl);
+
+static inline void __devinit sh_pfc_map_one_gpio(struct sh_pfc *pfc,
+ struct sh_pfc_pinctrl *pmx,
+ struct pinmux_gpio *gpio,
+ unsigned offset)
+{
+ struct pinmux_data_reg *dummy;
+ unsigned long flags;
+ int bit;
+
+ gpio->flags &= ~PINMUX_FLAG_TYPE;
+
+ if (sh_pfc_get_data_reg(pfc, offset, &dummy, &bit) == 0)
+ gpio->flags |= PINMUX_TYPE_GPIO;
+ else {
+ gpio->flags |= PINMUX_TYPE_FUNCTION;
+
+ spin_lock_irqsave(&pmx->lock, flags);
+ pmx->nr_functions++;
+ spin_unlock_irqrestore(&pmx->lock, flags);
+ }
+}
+
+/* pinmux ranges -> pinctrl pin descs */
+static int __devinit sh_pfc_map_gpios(struct sh_pfc *pfc,
+ struct sh_pfc_pinctrl *pmx)
+{
+ unsigned long flags;
+ int i;
+
+ pmx->nr_pads = pfc->last_gpio - pfc->first_gpio + 1;
+
+ pmx->pads = kmalloc(sizeof(struct pinctrl_pin_desc) * pmx->nr_pads,
+ GFP_KERNEL);
+ if (unlikely(!pmx->pads)) {
+ pmx->nr_pads = 0;
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ /*
+ * We don't necessarily have a 1:1 mapping between pin and linux
+ * GPIO number, as the latter maps to the associated enum_id.
+ * Care needs to be taken to translate back to pin space when
+ * dealing with any pin configurations.
+ */
+ for (i = 0; i < pmx->nr_pads; i++) {
+ struct pinctrl_pin_desc *pin = pmx->pads + i;
+ struct pinmux_gpio *gpio = pfc->gpios + i;
+
+ pin->number = pfc->first_gpio + i;
+ pin->name = gpio->name;
+
+ /* XXX */
+ if (unlikely(!gpio->enum_id))
+ continue;
+
+ sh_pfc_map_one_gpio(pfc, pmx, gpio, i);
+ }
+
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ sh_pfc_pinctrl_desc.pins = pmx->pads;
+ sh_pfc_pinctrl_desc.npins = pmx->nr_pads;
+
+ return 0;
+}
+
+static int __devinit sh_pfc_map_functions(struct sh_pfc *pfc,
+ struct sh_pfc_pinctrl *pmx)
+{
+ unsigned long flags;
+ int i, fn;
+
+ pmx->functions = kzalloc(pmx->nr_functions * sizeof(void *),
+ GFP_KERNEL);
+ if (unlikely(!pmx->functions))
+ return -ENOMEM;
+
+ spin_lock_irqsave(&pmx->lock, flags);
+
+ for (i = fn = 0; i < pmx->nr_pads; i++) {
+ struct pinmux_gpio *gpio = pfc->gpios + i;
+
+ if ((gpio->flags & PINMUX_FLAG_TYPE) == PINMUX_TYPE_FUNCTION)
+ pmx->functions[fn++] = gpio;
+ }
+
+ spin_unlock_irqrestore(&pmx->lock, flags);
+
+ return 0;
+}
+
+static int __devinit sh_pfc_pinctrl_probe(struct platform_device *pdev)
+{
+ struct sh_pfc *pfc;
+ int ret;
+
+ if (unlikely(!sh_pfc_pmx))
+ return -ENODEV;
+
+ pfc = sh_pfc_pmx->pfc;
+
+ ret = sh_pfc_map_gpios(pfc, sh_pfc_pmx);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = sh_pfc_map_functions(pfc, sh_pfc_pmx);
+ if (unlikely(ret != 0))
+ goto free_pads;
+
+ sh_pfc_pmx->pctl = pinctrl_register(&sh_pfc_pinctrl_desc, &pdev->dev,
+ sh_pfc_pmx);
+ if (IS_ERR(sh_pfc_pmx->pctl)) {
+ ret = PTR_ERR(sh_pfc_pmx->pctl);
+ goto free_functions;
+ }
+
+ sh_pfc_gpio_range.npins = pfc->last_gpio - pfc->first_gpio + 1;
+ sh_pfc_gpio_range.base = pfc->first_gpio;
+ sh_pfc_gpio_range.pin_base = pfc->first_gpio;
+
+ pinctrl_add_gpio_range(sh_pfc_pmx->pctl, &sh_pfc_gpio_range);
+
+ platform_set_drvdata(pdev, sh_pfc_pmx);
+
+ return 0;
+
+free_functions:
+ kfree(sh_pfc_pmx->functions);
+free_pads:
+ kfree(sh_pfc_pmx->pads);
+ kfree(sh_pfc_pmx);
+
+ return ret;
+}
+
+static int __devexit sh_pfc_pinctrl_remove(struct platform_device *pdev)
+{
+ struct sh_pfc_pinctrl *pmx = platform_get_drvdata(pdev);
+
+ pinctrl_remove_gpio_range(pmx->pctl, &sh_pfc_gpio_range);
+ pinctrl_unregister(pmx->pctl);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(sh_pfc_pmx->functions);
+ kfree(sh_pfc_pmx->pads);
+ kfree(sh_pfc_pmx);
+
+ return 0;
+}
+
+static struct platform_driver sh_pfc_pinctrl_driver = {
+ .probe = sh_pfc_pinctrl_probe,
+ .remove = __devexit_p(sh_pfc_pinctrl_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_device sh_pfc_pinctrl_device = {
+ .name = DRV_NAME,
+ .id = -1,
+};
+
+static int __init sh_pfc_pinctrl_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&sh_pfc_pinctrl_driver);
+ if (likely(!rc)) {
+ rc = platform_device_register(&sh_pfc_pinctrl_device);
+ if (unlikely(rc))
+ platform_driver_unregister(&sh_pfc_pinctrl_driver);
+ }
+
+ return rc;
+}
+
+static void __exit sh_pfc_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sh_pfc_pinctrl_driver);
+}
+
+subsys_initcall(sh_pfc_pinctrl_init);
+module_exit(sh_pfc_pinctrl_exit);
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 46ef5fe..0c73dd4 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -801,7 +801,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
mcspi_dma = &mcspi->dma_channels[spi->chip_select];
if (!cs) {
- cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL);
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -842,6 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
cs = spi->controller_state;
list_del(&cs->node);
+ kfree(cs);
}
if (spi->chip_select < spi->master->num_chipselect) {
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 972a94c..646a765 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -27,10 +27,15 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <mach/dma.h>
#include <plat/s3c64xx-spi.h>
+#define MAX_SPI_PORTS 3
+
/* Registers and bit-fields */
#define S3C64XX_SPI_CH_CFG 0x00
@@ -74,11 +79,6 @@
#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
-#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
-
-#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
- (c)->regs + S3C64XX_SPI_SLAVE_SEL)
-
#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
@@ -113,13 +113,12 @@
#define S3C64XX_SPI_FBCLK_MSK (3<<0)
-#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
- (((i)->fifo_lvl_mask + 1))) \
- ? 1 : 0)
-
-#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0)
-#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
-#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
+#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
+#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
+ (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
+#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
+#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
+ FIFO_LVL_MASK(i))
#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
#define S3C64XX_SPI_TRAILCNT_OFF 19
@@ -135,6 +134,29 @@ struct s3c64xx_spi_dma_data {
unsigned ch;
enum dma_data_direction direction;
enum dma_ch dmach;
+ struct property *dma_prop;
+};
+
+/**
+ * struct s3c64xx_spi_info - SPI Controller hardware info
+ * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
+ * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
+ * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
+ * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
+ * @clk_from_cmu: True, if the controller does not include a clock mux and
+ * prescaler unit.
+ *
+ * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
+ * differ in some aspects such as the size of the fifo and spi bus clock
+ * setup. Such differences are specified to the driver using this structure
+ * which is provided as driver data to the driver.
+ */
+struct s3c64xx_spi_port_config {
+ int fifo_lvl_mask[MAX_SPI_PORTS];
+ int rx_lvl_offset;
+ int tx_st_done;
+ bool high_speed;
+ bool clk_from_cmu;
};
/**
@@ -175,6 +197,9 @@ struct s3c64xx_spi_driver_data {
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
struct samsung_dma_ops *ops;
+ struct s3c64xx_spi_port_config *port_conf;
+ unsigned int port_id;
+ unsigned long gpios[4];
};
static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
@@ -183,7 +208,6 @@ static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned long loops;
u32 val;
@@ -199,7 +223,7 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
- } while (TX_FIFO_LVL(val, sci) && loops--);
+ } while (TX_FIFO_LVL(val, sdd) && loops--);
if (loops == 0)
dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
@@ -208,7 +232,7 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
- if (RX_FIFO_LVL(val, sci))
+ if (RX_FIFO_LVL(val, sdd))
readl(regs + S3C64XX_SPI_RX_DATA);
else
break;
@@ -262,14 +286,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
unsigned len, dma_addr_t buf)
{
struct s3c64xx_spi_driver_data *sdd;
- struct samsung_dma_prep_info info;
+ struct samsung_dma_prep info;
+ struct samsung_dma_config config;
- if (dma->direction == DMA_DEV_TO_MEM)
+ if (dma->direction == DMA_DEV_TO_MEM) {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
- else
+ config.direction = sdd->rx_dma.direction;
+ config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
+ config.width = sdd->cur_bpw / 8;
+ sdd->ops->config(sdd->rx_dma.ch, &config);
+ } else {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
+ config.direction = sdd->tx_dma.direction;
+ config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
+ config.width = sdd->cur_bpw / 8;
+ sdd->ops->config(sdd->tx_dma.ch, &config);
+ }
info.cap = DMA_SLAVE;
info.len = len;
@@ -284,20 +318,17 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
{
- struct samsung_dma_info info;
+ struct samsung_dma_req req;
sdd->ops = samsung_dma_get_ops();
- info.cap = DMA_SLAVE;
- info.client = &s3c64xx_spi_dma_client;
- info.width = sdd->cur_bpw / 8;
+ req.cap = DMA_SLAVE;
+ req.client = &s3c64xx_spi_dma_client;
- info.direction = sdd->rx_dma.direction;
- info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
- sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info);
- info.direction = sdd->tx_dma.direction;
- info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
- sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info);
+ req.dt_dmach_prop = sdd->rx_dma.dma_prop;
+ sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req);
+ req.dt_dmach_prop = sdd->tx_dma.dma_prop;
+ sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req);
return 1;
}
@@ -306,7 +337,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
struct spi_transfer *xfer, int dma_mode)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
@@ -356,7 +386,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
if (xfer->rx_buf != NULL) {
sdd->state |= RXBUSY;
- if (sci->high_speed && sdd->cur_speed >= 30000000UL
+ if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
&& !(sdd->cur_mode & SPI_CPHA))
chcfg |= S3C64XX_SPI_CH_HS_EN;
@@ -383,20 +413,19 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
/* Deselect the last toggled device */
cs = sdd->tgl_spi->controller_data;
- cs->set_level(cs->line,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
+ gpio_set_value(cs->line,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
}
sdd->tgl_spi = NULL;
}
cs = spi->controller_data;
- cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
+ gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
}
static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned long val;
int ms;
@@ -413,7 +442,7 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
val = msecs_to_loops(ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
}
if (!val)
@@ -432,8 +461,8 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
if (xfer->rx_buf == NULL) {
val = msecs_to_loops(10);
status = readl(regs + S3C64XX_SPI_STATUS);
- while ((TX_FIFO_LVL(status, sci)
- || !S3C64XX_SPI_ST_TX_DONE(status, sci))
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
&& --val) {
cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
@@ -477,17 +506,16 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
if (sdd->tgl_spi == spi)
sdd->tgl_spi = NULL;
- cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 val;
/* Disable Clock */
- if (sci->clk_from_cmu) {
+ if (sdd->port_conf->clk_from_cmu) {
clk_disable(sdd->src_clk);
} else {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -531,7 +559,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
writel(val, regs + S3C64XX_SPI_MODE_CFG);
- if (sci->clk_from_cmu) {
+ if (sdd->port_conf->clk_from_cmu) {
/* Configure Clock */
/* There is half-multiplier before the SPI */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
@@ -557,7 +585,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
@@ -573,7 +600,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
/* Map until end or first fail */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
+ if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
continue;
if (xfer->tx_buf != NULL) {
@@ -607,7 +634,6 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
@@ -616,7 +642,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
+ if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
continue;
if (xfer->rx_buf != NULL
@@ -635,7 +661,6 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct spi_transfer *xfer;
@@ -690,7 +715,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
}
/* Polling method for xfers not bigger than FIFO capacity */
- if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
+ if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
use_dma = 0;
else
use_dma = 1;
@@ -707,14 +732,15 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
enable_cs(sdd, spi);
/* Start the signals */
- S3C64XX_SPI_ACT(sdd);
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
spin_unlock_irqrestore(&sdd->lock, flags);
status = wait_for_xfer(sdd, xfer, use_dma);
/* Quiese the signals */
- S3C64XX_SPI_DEACT(sdd);
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT,
+ sdd->regs + S3C64XX_SPI_SLAVE_SEL);
if (status) {
dev_err(&spi->dev, "I/O Error: "
@@ -795,6 +821,48 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
return 0;
}
+static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
+ struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs;
+ struct device_node *slave_np, *data_np;
+ u32 fb_delay = 0;
+
+ slave_np = spi->dev.of_node;
+ if (!slave_np) {
+ dev_err(&spi->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ for_each_child_of_node(slave_np, data_np)
+ if (!strcmp(data_np->name, "controller-data"))
+ break;
+ if (!data_np) {
+ dev_err(&spi->dev, "child node 'controller-data' not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs) {
+ dev_err(&spi->dev, "could not allocate memory for controller"
+ " data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
+ if (!gpio_is_valid(cs->line)) {
+ dev_err(&spi->dev, "chip select gpio is not specified or "
+ "invalid\n");
+ kfree(cs);
+ return ERR_PTR(-EINVAL);
+ }
+
+ of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
+ cs->fb_delay = fb_delay;
+ return cs;
+}
+
/*
* Here we only check the validity of requested configuration
* and save the configuration in a local data-structure.
@@ -808,14 +876,31 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
struct s3c64xx_spi_info *sci;
struct spi_message *msg;
unsigned long flags;
- int err = 0;
+ int err;
- if (cs == NULL || cs->set_level == NULL) {
+ sdd = spi_master_get_devdata(spi->master);
+ if (!cs && spi->dev.of_node) {
+ cs = s3c64xx_get_slave_ctrldata(sdd, spi);
+ spi->controller_data = cs;
+ }
+
+ if (IS_ERR_OR_NULL(cs)) {
dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
return -ENODEV;
}
- sdd = spi_master_get_devdata(spi->master);
+ if (!spi_get_ctldata(spi)) {
+ err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (err) {
+ dev_err(&spi->dev,
+ "Failed to get /CS gpio [%d]: %d\n",
+ cs->line, err);
+ goto err_gpio_req;
+ }
+ spi_set_ctldata(spi, cs);
+ }
+
sci = sdd->cntrlr_info;
spin_lock_irqsave(&sdd->lock, flags);
@@ -826,7 +911,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
dev_err(&spi->dev,
"setup: attempt while mssg in queue!\n");
spin_unlock_irqrestore(&sdd->lock, flags);
- return -EBUSY;
+ err = -EBUSY;
+ goto err_msgq;
}
}
@@ -844,7 +930,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
- if (!sci->clk_from_cmu) {
+ if (!sdd->port_conf->clk_from_cmu) {
u32 psr, speed;
/* Max possible */
@@ -869,22 +955,44 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
- if (spi->max_speed_hz >= speed)
+ if (spi->max_speed_hz >= speed) {
spi->max_speed_hz = speed;
- else
+ } else {
err = -EINVAL;
+ goto setup_exit;
+ }
}
pm_runtime_put(&sdd->pdev->dev);
+ disable_cs(sdd, spi);
+ return 0;
setup_exit:
-
/* setup() returns with device de-selected */
disable_cs(sdd, spi);
+err_msgq:
+ gpio_free(cs->line);
+ spi_set_ctldata(spi, NULL);
+
+err_gpio_req:
+ kfree(cs);
+
return err;
}
+static void s3c64xx_spi_cleanup(struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
+
+ if (cs) {
+ gpio_free(cs->line);
+ if (spi->dev.of_node)
+ kfree(cs);
+ }
+ spi_set_ctldata(spi, NULL);
+}
+
static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
{
struct s3c64xx_spi_driver_data *sdd = data;
@@ -920,12 +1028,12 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
sdd->cur_speed = 0;
- S3C64XX_SPI_DEACT(sdd);
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
/* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN);
- if (!sci->clk_from_cmu)
+ if (!sdd->port_conf->clk_from_cmu)
writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
regs + S3C64XX_SPI_CLK_CFG);
writel(0, regs + S3C64XX_SPI_MODE_CFG);
@@ -946,40 +1054,165 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
flush_fifo(sdd);
}
-static int __init s3c64xx_spi_probe(struct platform_device *pdev)
+static int __devinit s3c64xx_spi_get_dmares(
+ struct s3c64xx_spi_driver_data *sdd, bool tx)
+{
+ struct platform_device *pdev = sdd->pdev;
+ struct s3c64xx_spi_dma_data *dma_data;
+ struct property *prop;
+ struct resource *res;
+ char prop_name[15], *chan_str;
+
+ if (tx) {
+ dma_data = &sdd->tx_dma;
+ dma_data->direction = DMA_TO_DEVICE;
+ chan_str = "tx";
+ } else {
+ dma_data = &sdd->rx_dma;
+ dma_data->direction = DMA_FROM_DEVICE;
+ chan_str = "rx";
+ }
+
+ if (!sdd->pdev->dev.of_node) {
+ res = platform_get_resource(pdev, IORESOURCE_DMA, tx ? 0 : 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get SPI-%s dma "
+ "resource\n", chan_str);
+ return -ENXIO;
+ }
+ dma_data->dmach = res->start;
+ return 0;
+ }
+
+ sprintf(prop_name, "%s-dma-channel", chan_str);
+ prop = of_find_property(pdev->dev.of_node, prop_name, NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "%s dma channel property not specified\n",
+ chan_str);
+ return -ENXIO;
+ }
+
+ dma_data->dmach = DMACH_DT_PROP;
+ dma_data->dma_prop = prop;
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
+{
+ struct device *dev = &sdd->pdev->dev;
+ int idx, gpio, ret;
+
+ /* find gpios for mosi, miso and clock lines */
+ for (idx = 0; idx < 3; idx++) {
+ gpio = of_get_gpio(dev->of_node, idx);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "invalid gpio[%d]: %d\n", idx, gpio);
+ goto free_gpio;
+ }
+
+ ret = gpio_request(gpio, "spi-bus");
+ if (ret) {
+ dev_err(dev, "gpio [%d] request failed: %d\n",
+ gpio, ret);
+ goto free_gpio;
+ }
+ }
+ return 0;
+
+free_gpio:
+ while (--idx >= 0)
+ gpio_free(sdd->gpios[idx]);
+ return -EINVAL;
+}
+
+static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
+{
+ unsigned int idx;
+ for (idx = 0; idx < 3; idx++)
+ gpio_free(sdd->gpios[idx]);
+}
+
+static struct __devinit s3c64xx_spi_info * s3c64xx_spi_parse_dt(
+ struct device *dev)
{
- struct resource *mem_res, *dmatx_res, *dmarx_res;
- struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci;
- struct spi_master *master;
- int ret, irq;
- char clk_name[16];
+ u32 temp;
- if (pdev->id < 0) {
- dev_err(&pdev->dev,
- "Invalid platform device id-%d\n", pdev->id);
- return -ENODEV;
+ sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
+ if (!sci) {
+ dev_err(dev, "memory allocation for spi_info failed\n");
+ return ERR_PTR(-ENOMEM);
}
- if (pdev->dev.platform_data == NULL) {
- dev_err(&pdev->dev, "platform_data missing!\n");
- return -ENODEV;
+ if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
+ dev_warn(dev, "spi bus clock parent not specified, using "
+ "clock at index 0 as parent\n");
+ sci->src_clk_nr = 0;
+ } else {
+ sci->src_clk_nr = temp;
}
- sci = pdev->dev.platform_data;
+ if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
+ dev_warn(dev, "number of chip select lines not specified, "
+ "assuming 1 chip select line\n");
+ sci->num_cs = 1;
+ } else {
+ sci->num_cs = temp;
+ }
+
+ return sci;
+}
+#else
+static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
+{
+ return dev->platform_data;
+}
+
+static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
+{
+ return -EINVAL;
+}
+
+static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
+{
+}
+#endif
- /* Check for availability of necessary resource */
+static const struct of_device_id s3c64xx_spi_dt_match[];
- dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (dmatx_res == NULL) {
- dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
- return -ENXIO;
+static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
+ return (struct s3c64xx_spi_port_config *)match->data;
}
+#endif
+ return (struct s3c64xx_spi_port_config *)
+ platform_get_device_id(pdev)->driver_data;
+}
- dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (dmarx_res == NULL) {
- dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
- return -ENXIO;
+static int __init s3c64xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *mem_res;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_info *sci = pdev->dev.platform_data;
+ struct spi_master *master;
+ int ret, irq;
+ char clk_name[16];
+
+ if (!sci && pdev->dev.of_node) {
+ sci = s3c64xx_spi_parse_dt(&pdev->dev);
+ if (IS_ERR(sci))
+ return PTR_ERR(sci);
+ }
+
+ if (!sci) {
+ dev_err(&pdev->dev, "platform_data missing!\n");
+ return -ENODEV;
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1004,19 +1237,37 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sdd = spi_master_get_devdata(master);
+ sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
sdd->master = master;
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
sdd->sfr_start = mem_res->start;
- sdd->tx_dma.dmach = dmatx_res->start;
- sdd->tx_dma.direction = DMA_MEM_TO_DEV;
- sdd->rx_dma.dmach = dmarx_res->start;
- sdd->rx_dma.direction = DMA_DEV_TO_MEM;
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, "
+ "errno %d\n", ret);
+ goto err0;
+ }
+ sdd->port_id = ret;
+ } else {
+ sdd->port_id = pdev->id;
+ }
sdd->cur_bpw = 8;
- master->bus_num = pdev->id;
+ ret = s3c64xx_spi_get_dmares(sdd, true);
+ if (ret)
+ goto err0;
+
+ ret = s3c64xx_spi_get_dmares(sdd, false);
+ if (ret)
+ goto err0;
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = sdd->port_id;
master->setup = s3c64xx_spi_setup;
+ master->cleanup = s3c64xx_spi_cleanup;
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->transfer_one_message = s3c64xx_spi_transfer_one_message;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
@@ -1025,21 +1276,17 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- if (request_mem_region(mem_res->start,
- resource_size(mem_res), pdev->name) == NULL) {
- dev_err(&pdev->dev, "Req mem region failed\n");
- ret = -ENXIO;
- goto err0;
- }
-
- sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
+ sdd->regs = devm_request_and_ioremap(&pdev->dev, mem_res);
if (sdd->regs == NULL) {
dev_err(&pdev->dev, "Unable to remap IO\n");
ret = -ENXIO;
goto err1;
}
- if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
+ if (!sci->cfg_gpio && pdev->dev.of_node) {
+ if (s3c64xx_spi_parse_dt_gpio(sdd))
+ return -EBUSY;
+ } else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
goto err2;
@@ -1075,7 +1322,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
}
/* Setup Deufult Mode */
- s3c64xx_spi_hwinit(sdd, pdev->id);
+ s3c64xx_spi_hwinit(sdd, sdd->port_id);
spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion);
@@ -1100,7 +1347,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
"with %d Slaves attached\n",
- pdev->id, master->num_chipselect);
+ sdd->port_id, master->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
mem_res->end, mem_res->start,
sdd->rx_dma.dmach, sdd->tx_dma.dmach);
@@ -1120,10 +1367,10 @@ err5:
err4:
clk_put(sdd->clk);
err3:
+ if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
+ s3c64xx_spi_dt_gpio_free(sdd);
err2:
- iounmap((void *) sdd->regs);
err1:
- release_mem_region(mem_res->start, resource_size(mem_res));
err0:
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
@@ -1135,7 +1382,6 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- struct resource *mem_res;
pm_runtime_disable(&pdev->dev);
@@ -1151,11 +1397,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
clk_disable(sdd->clk);
clk_put(sdd->clk);
- iounmap((void *) sdd->regs);
-
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem_res != NULL)
- release_mem_region(mem_res->start, resource_size(mem_res));
+ if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
+ s3c64xx_spi_dt_gpio_free(sdd);
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
@@ -1175,6 +1418,9 @@ static int s3c64xx_spi_suspend(struct device *dev)
clk_disable(sdd->src_clk);
clk_disable(sdd->clk);
+ if (!sdd->cntrlr_info->cfg_gpio && dev->of_node)
+ s3c64xx_spi_dt_gpio_free(sdd);
+
sdd->cur_speed = 0; /* Output Clock is stopped */
return 0;
@@ -1182,18 +1428,20 @@ static int s3c64xx_spi_suspend(struct device *dev)
static int s3c64xx_spi_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
- sci->cfg_gpio(pdev);
+ if (!sci->cfg_gpio && dev->of_node)
+ s3c64xx_spi_parse_dt_gpio(sdd);
+ else
+ sci->cfg_gpio();
/* Enable the clock */
clk_enable(sdd->src_clk);
clk_enable(sdd->clk);
- s3c64xx_spi_hwinit(sdd, pdev->id);
+ s3c64xx_spi_hwinit(sdd, sdd->port_id);
spi_master_resume(master);
@@ -1231,13 +1479,89 @@ static const struct dev_pm_ops s3c64xx_spi_pm = {
s3c64xx_spi_runtime_resume, NULL)
};
+struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .high_speed = true,
+};
+
+struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7F },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+};
+
+struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+};
+
+struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7F },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .high_speed = true,
+};
+
+struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+};
+
+struct s3c64xx_spi_port_config exynos4_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+};
+
+static struct platform_device_id s3c64xx_spi_driver_ids[] = {
+ {
+ .name = "s3c2443-spi",
+ .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
+ }, {
+ .name = "s3c6410-spi",
+ .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
+ }, {
+ .name = "s5p64x0-spi",
+ .driver_data = (kernel_ulong_t)&s5p64x0_spi_port_config,
+ }, {
+ .name = "s5pc100-spi",
+ .driver_data = (kernel_ulong_t)&s5pc100_spi_port_config,
+ }, {
+ .name = "s5pv210-spi",
+ .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config,
+ }, {
+ .name = "exynos4210-spi",
+ .driver_data = (kernel_ulong_t)&exynos4_spi_port_config,
+ },
+ { },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id s3c64xx_spi_dt_match[] = {
+ { .compatible = "samsung,exynos4210-spi",
+ .data = (void *)&exynos4_spi_port_config,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
+#endif /* CONFIG_OF */
+
static struct platform_driver s3c64xx_spi_driver = {
.driver = {
.name = "s3c64xx-spi",
.owner = THIS_MODULE,
.pm = &s3c64xx_spi_pm,
+ .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
},
.remove = s3c64xx_spi_remove,
+ .id_table = s3c64xx_spi_driver_ids,
};
MODULE_ALIAS("platform:s3c64xx-spi");
diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c
index ae6d78a..7f99ff3 100644
--- a/drivers/spi/spi-tegra.c
+++ b/drivers/spi/spi-tegra.c
@@ -261,7 +261,7 @@ static void spi_tegra_start_transfer(struct spi_device *spi,
clk_set_rate(tspi->clk, speed);
if (tspi->cur_speed == 0)
- clk_enable(tspi->clk);
+ clk_prepare_enable(tspi->clk);
tspi->cur_speed = speed;
@@ -373,7 +373,7 @@ static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
spi = m->state;
spi_tegra_start_message(spi, m);
} else {
- clk_disable(tspi->clk);
+ clk_disable_unprepare(tspi->clk);
tspi->cur_speed = 0;
}
}
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index f551e53..266aa16 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -36,6 +36,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 266c7c5..ab4627c 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -90,6 +90,8 @@ const char *ssb_core_name(u16 coreid)
return "ARM 1176";
case SSB_DEV_ARM_7TDMI:
return "ARM 7TDMI";
+ case SSB_DEV_ARM_CM3:
+ return "ARM Cortex M3";
}
return "UNKNOWN";
}
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 1c3d638..aeac1ca 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -30,6 +30,7 @@
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/errno.h>
+#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
@@ -981,6 +982,8 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
}
EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
+#if IS_ENABLED(CONFIG_USB)
+
static int comedi_old_usb_auto_config(struct usb_interface *intf,
struct comedi_driver *driver)
{
@@ -1043,3 +1046,5 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
comedi_driver_unregister(comedi_driver);
}
EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
+
+#endif
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 292af0f..87c3a07 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -88,13 +88,15 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
void *msg, int len))
{
struct sock *sock;
+ struct netlink_kernel_cfg cfg = {
+ .input = netlink_rcv,
+ };
#if !defined(DEFINE_MUTEX)
init_MUTEX(&netlink_mutex);
#endif
- sock = netlink_kernel_create(&init_net, unit, 0, netlink_rcv, NULL,
- THIS_MODULE);
+ sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg);
if (sock)
rcv_cb = cb;
@@ -104,7 +106,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
void netlink_exit(struct sock *sock)
{
- sock_release(sock->sk_socket);
+ netlink_kernel_release(sock);
}
int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
@@ -127,8 +129,12 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
}
seq++;
- nlh = NLMSG_PUT(skb, 0, seq, type, len);
- memcpy(NLMSG_DATA(nlh), msg, len);
+ nlh = nlmsg_put(skb, 0, seq, type, len, 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+ memcpy(nlmsg_data(nlh), msg, len);
NETLINK_CB(skb).pid = 0;
NETLINK_CB(skb).dst_group = 0;
@@ -144,7 +150,5 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
}
ret = 0;
}
-
-nlmsg_failure:
return ret;
}
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 0338c7c..f03fbd3 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -29,8 +29,6 @@ Then fill in the following:
* info->driver_module:
Set to THIS_MODULE. Used to ensure correct ownership
of various resources allocate by the core.
- * info->num_interrupt_lines:
- Number of event triggering hardware lines the device has.
* info->event_attrs:
Attributes used to enable / disable hardware events.
* info->attrs:
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 2490dd2..8f1b3af 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -13,6 +13,7 @@ config AD7291
config AD7298
tristate "Analog Devices AD7298 ADC driver"
depends on SPI
+ select IIO_KFIFO_BUF if IIO_BUFFER
help
Say yes here to build support for Analog Devices AD7298
8 Channel ADC with temperature sensor.
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 10ab6dc..a13afff 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -235,7 +235,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
.indexed = 1, \
.channel = num, \
.address = num, \
- .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, \
+ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.scan_index = num, \
.scan_type = IIO_ST('s', 16, 16, 0), \
}
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 3295ea6..97ef670 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -129,6 +129,7 @@ static void send_space_homebrew(long length);
static struct lirc_serial hardware[] = {
[LIRC_HOMEBREW] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_HOMEBREW].lock),
.signal_pin = UART_MSR_DCD,
.signal_pin_change = UART_MSR_DDCD,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -145,6 +146,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IRDEO] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = UART_MCR_OUT2,
@@ -156,6 +158,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IRDEO_REMOTE] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IRDEO_REMOTE].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = (UART_MCR_RTS | UART_MCR_DTR | UART_MCR_OUT2),
@@ -167,6 +170,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_ANIMAX] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_ANIMAX].lock),
.signal_pin = UART_MSR_DCD,
.signal_pin_change = UART_MSR_DDCD,
.on = 0,
@@ -177,6 +181,7 @@ static struct lirc_serial hardware[] = {
},
[LIRC_IGOR] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_IGOR].lock),
.signal_pin = UART_MSR_DSR,
.signal_pin_change = UART_MSR_DDSR,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
@@ -201,6 +206,7 @@ static struct lirc_serial hardware[] = {
* See also http://www.nslu2-linux.org for this device
*/
[LIRC_NSLU2] = {
+ .lock = __SPIN_LOCK_UNLOCKED(hardware[LIRC_NSLU2].lock),
.signal_pin = UART_MSR_CTS,
.signal_pin_change = UART_MSR_DCTS,
.on = (UART_MCR_RTS | UART_MCR_OUT2 | UART_MCR_DTR),
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 3c60088..9356886 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -675,7 +675,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
{
u32 val;
- clk_enable(nvec->i2c_clk);
+ clk_prepare_enable(nvec->i2c_clk);
tegra_periph_reset_assert(nvec->i2c_clk);
udelay(2);
@@ -695,14 +695,14 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
enable_irq(nvec->irq);
- clk_disable(nvec->i2c_clk);
+ clk_disable_unprepare(nvec->i2c_clk);
}
static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
{
disable_irq(nvec->irq);
writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
- clk_disable(nvec->i2c_clk);
+ clk_disable_unprepare(nvec->i2c_clk);
}
static void nvec_power_off(void)
@@ -812,7 +812,7 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
tegra_init_i2c_slave(nvec);
- clk_enable(i2c_clk);
+ clk_prepare_enable(i2c_clk);
/* enable event reporting */
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index f238d57..2092a91 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -25,8 +25,8 @@
#include <linux/types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <linux/platform_data/omap_drm.h>
#include "omap_drm.h"
-#include "omap_priv.h"
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 11acd4c..8c6ed3b 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -208,7 +208,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
*/
ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
if (ret) {
- dev_err(dev->dev, "could not map (paddr)!\n");
+ dev_err(dev->dev,
+ "could not map (paddr)! Skipping framebuffer alloc\n");
ret = -ENOMEM;
goto fail;
}
@@ -388,8 +389,11 @@ void omap_fbdev_free(struct drm_device *dev)
fbi = helper->fbdev;
- unregister_framebuffer(fbi);
- framebuffer_release(fbi);
+ /* only cleanup framebuffer if it is present */
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+ }
drm_fb_helper_fini(helper);
diff --git a/drivers/staging/omapdrm/omap_priv.h b/drivers/staging/omapdrm/omap_priv.h
deleted file mode 100644
index ef64414..0000000
--- a/drivers/staging/omapdrm/omap_priv.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * include/drm/omap_priv.h
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __OMAP_PRIV_H__
-#define __OMAP_PRIV_H__
-
-/* Non-userspace facing APIs
- */
-
-/* optional platform data to configure the default configuration of which
- * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
- * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
- * one manager, with priority given to managers that are connected to
- * detected devices. Remaining overlays are used as video planes. This
- * should be a good default behavior for most cases, but yet there still
- * might be times when you wish to do something different.
- */
-struct omap_kms_platform_data {
- /* overlays to use as CRTCs: */
- int ovl_cnt;
- const int *ovl_ids;
-
- /* overlays to use as video planes: */
- int pln_cnt;
- const int *pln_ids;
-
- int mgr_cnt;
- const int *mgr_ids;
-
- int dev_cnt;
- const char **dev_names;
-};
-
-struct omap_drm_platform_data {
- struct omap_kms_platform_data *kms_pdata;
- struct omap_dmm_platform_data *dmm_pdata;
-};
-
-#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 4e7ef0e..d46764b 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
return oid;
}
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
/* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored)
}
static struct frontswap_ops zcache_frontswap_ops = {
- .put_page = zcache_frontswap_put_page,
- .get_page = zcache_frontswap_get_page,
+ .store = zcache_frontswap_store,
+ .load = zcache_frontswap_load,
.invalidate_page = zcache_frontswap_flush_page,
.invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bd18e2..69f616c 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
/* - */
{USB_DEVICE(0x20F4, 0x646B)},
{USB_DEVICE(0x083A, 0xC512)},
+ {USB_DEVICE(0x25D4, 0x4CA1)},
+ {USB_DEVICE(0x25D4, 0x4CAB)},
/* RTL8191SU */
/* Realtek */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 2734dac..784c796 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1;
* Swizzling increases objects per swaptype, increasing tmem concurrency
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
* Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
+ * frontswap_load(), but has side-effects. Hence using 8.
*/
#define SWIZ_BITS 8
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
return oid;
}
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
/* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
+static int zcache_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored)
}
static struct frontswap_ops zcache_frontswap_ops = {
- .put_page = zcache_frontswap_put_page,
- .get_page = zcache_frontswap_get_page,
+ .store = zcache_frontswap_store,
+ .load = zcache_frontswap_load,
.invalidate_page = zcache_frontswap_flush_page,
.invalidate_area = zcache_frontswap_flush_area,
.init = zcache_frontswap_init
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 61648d8..9fdcb56 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -9,7 +9,8 @@ target_core_mod-y := target_core_configfs.o \
target_core_tmr.o \
target_core_tpg.o \
target_core_transport.o \
- target_core_cdb.o \
+ target_core_sbc.o \
+ target_core_spc.o \
target_core_ua.o \
target_core_rd.o \
target_core_stat.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d57d10c..97c0f78 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -429,19 +429,8 @@ int iscsit_reset_np_thread(
int iscsit_del_np_comm(struct iscsi_np *np)
{
- if (!np->np_socket)
- return 0;
-
- /*
- * Some network transports allocate their own struct sock->file,
- * see if we need to free any additional allocated resources.
- */
- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
- kfree(np->np_socket->file);
- np->np_socket->file = NULL;
- }
-
- sock_release(np->np_socket);
+ if (np->np_socket)
+ sock_release(np->np_socket);
return 0;
}
@@ -1413,8 +1402,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_bh(&cmd->istate_lock);
iscsit_stop_dataout_timer(cmd);
- return (!ooo_cmdsn) ? transport_generic_handle_data(
- &cmd->se_cmd) : 0;
+ if (ooo_cmdsn)
+ return 0;
+ target_execute_cmd(&cmd->se_cmd);
+ return 0;
} else /* DATAOUT_CANNOT_RECOVER */
return -1;
@@ -2683,7 +2674,7 @@ static int iscsit_send_logout_response(
*/
logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
cmd->logout_cid);
- if ((logout_conn)) {
+ if (logout_conn) {
iscsit_connection_reinstatement_rcfr(logout_conn);
iscsit_dec_conn_usage_count(logout_conn);
}
@@ -4077,13 +4068,8 @@ int iscsit_close_connection(
kfree(conn->conn_ops);
conn->conn_ops = NULL;
- if (conn->sock) {
- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
- kfree(conn->sock->file);
- conn->sock->file = NULL;
- }
+ if (conn->sock)
sock_release(conn->sock);
- }
conn->thread_set = NULL;
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 69dc8e3..a7b25e78 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -47,28 +47,6 @@ struct lio_target_configfs_attribute {
ssize_t (*store)(void *, const char *, size_t);
};
-struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
- struct config_item *item,
- struct iscsi_tiqn **tiqn_out)
-{
- struct se_portal_group *se_tpg = container_of(to_config_group(item),
- struct se_portal_group, tpg_group);
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- int ret;
-
- if (!tpg) {
- pr_err("Unable to locate struct iscsi_portal_group "
- "pointer\n");
- return NULL;
- }
- ret = iscsit_get_tpg(tpg);
- if (ret < 0)
- return NULL;
-
- *tiqn_out = tpg->tpg_tiqn;
- return tpg;
-}
-
/* Start items for lio_target_portal_cit */
static ssize_t lio_target_np_show_sctp(
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 1c70144..8a908b2 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table {
/* Used for struct iscsi_np->np_flags */
enum np_flags_table {
NPF_IP_NETWORK = 0x00,
- NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
};
/* Used for struct iscsi_np->np_thread_state */
@@ -481,6 +480,7 @@ struct iscsi_tmr_req {
bool task_reassign:1;
u32 ref_cmd_sn;
u32 exp_data_sn;
+ struct iscsi_cmd *ref_cmd;
struct iscsi_conn_recovery *conn_recovery;
struct se_tmr_req *se_tmr_req;
};
@@ -503,7 +503,6 @@ struct iscsi_conn {
u16 local_port;
int net_size;
u32 auth_id;
-#define CONNFLAG_SCTP_STRUCT_FILE 0x01
u32 conn_flags;
/* Used for iscsi_tx_login_rsp() */
u32 login_itt;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index ecdd46d..3df8a2c 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -965,8 +965,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if (cmd->immediate_data) {
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
spin_unlock_bh(&cmd->istate_lock);
- return transport_generic_handle_data(
- &cmd->se_cmd);
+ target_execute_cmd(&cmd->se_cmd);
+ return 0;
}
spin_unlock_bh(&cmd->istate_lock);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index a3656c9..0694d9b 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -518,7 +518,7 @@ int iscsi_login_post_auth_non_zero_tsih(
* initiator and release the new connection.
*/
conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
- if ((conn_ptr)) {
+ if (conn_ptr) {
pr_err("Connection exists with CID %hu for %s,"
" performing connection reinstatement.\n",
conn_ptr->cid, sess->sess_ops->InitiatorName);
@@ -539,7 +539,7 @@ int iscsi_login_post_auth_non_zero_tsih(
if (sess->sess_ops->ErrorRecoveryLevel == 2) {
cr = iscsit_get_inactive_connection_recovery_entry(
sess, cid);
- if ((cr)) {
+ if (cr) {
pr_debug("Performing implicit logout"
" for connection recovery on CID: %hu\n",
conn->cid);
@@ -795,22 +795,6 @@ int iscsi_target_setup_login_socket(
}
np->np_socket = sock;
/*
- * The SCTP stack needs struct socket->file.
- */
- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
- (np->np_network_transport == ISCSI_SCTP_UDP)) {
- if (!sock->file) {
- sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
- if (!sock->file) {
- pr_err("Unable to allocate struct"
- " file for SCTP\n");
- ret = -ENOMEM;
- goto fail;
- }
- np->np_flags |= NPF_SCTP_STRUCT_FILE;
- }
- }
- /*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
@@ -869,21 +853,15 @@ int iscsi_target_setup_login_socket(
fail:
np->np_socket = NULL;
- if (sock) {
- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
- kfree(sock->file);
- sock->file = NULL;
- }
-
+ if (sock)
sock_release(sock);
- }
return ret;
}
static int __iscsi_target_login_thread(struct iscsi_np *np)
{
u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
- int err, ret = 0, set_sctp_conn_flag, stop;
+ int err, ret = 0, stop;
struct iscsi_conn *conn = NULL;
struct iscsi_login *login;
struct iscsi_portal_group *tpg = NULL;
@@ -894,7 +872,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
struct sockaddr_in6 sock_in6;
flush_signals(current);
- set_sctp_conn_flag = 0;
sock = np->np_socket;
spin_lock_bh(&np->np_thread_lock);
@@ -917,35 +894,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
spin_unlock_bh(&np->np_thread_lock);
goto out;
}
- /*
- * The SCTP stack needs struct socket->file.
- */
- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
- (np->np_network_transport == ISCSI_SCTP_UDP)) {
- if (!new_sock->file) {
- new_sock->file = kzalloc(
- sizeof(struct file), GFP_KERNEL);
- if (!new_sock->file) {
- pr_err("Unable to allocate struct"
- " file for SCTP\n");
- sock_release(new_sock);
- /* Get another socket */
- return 1;
- }
- set_sctp_conn_flag = 1;
- }
- }
-
iscsi_start_login_thread_timer(np);
conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
if (!conn) {
pr_err("Could not allocate memory for"
" new connection\n");
- if (set_sctp_conn_flag) {
- kfree(new_sock->file);
- new_sock->file = NULL;
- }
sock_release(new_sock);
/* Get another socket */
return 1;
@@ -955,9 +909,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
conn->conn_state = TARG_CONN_STATE_FREE;
conn->sock = new_sock;
- if (set_sctp_conn_flag)
- conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
-
pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
conn->conn_state = TARG_CONN_STATE_XPT_UP;
@@ -1081,7 +1032,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto new_sess_out;
zero_tsih = (pdu->tsih == 0x0000);
- if ((zero_tsih)) {
+ if (zero_tsih) {
/*
* This is the leading connection of a new session.
* We wait until after authentication to check for
@@ -1205,13 +1156,8 @@ old_sess_out:
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
}
- if (conn->sock) {
- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
- kfree(conn->sock->file);
- conn->sock->file = NULL;
- }
+ if (conn->sock)
sock_release(conn->sock);
- }
kfree(conn);
if (tpg) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index ed5241e..0c4760f 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -681,7 +681,7 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for value.\n");
- return -1;
+ return -ENOMEM;
}
memcpy(param->value, value, strlen(value));
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index f4e640b..f62fe12 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -19,6 +19,7 @@
******************************************************************************/
#include <asm/unaligned.h>
+#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -61,7 +62,7 @@ u8 iscsit_tmr_abort_task(
}
se_tmr->ref_task_tag = hdr->rtt;
- se_tmr->ref_cmd = &ref_cmd->se_cmd;
+ tmr_req->ref_cmd = ref_cmd;
tmr_req->ref_cmd_sn = hdr->refcmdsn;
tmr_req->exp_data_sn = hdr->exp_datasn;
@@ -121,7 +122,7 @@ u8 iscsit_tmr_task_reassign(
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
- int ret;
+ int ret, ref_lun;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
@@ -155,9 +156,16 @@ u8 iscsit_tmr_task_reassign(
return ISCSI_TMF_RSP_REJECTED;
}
+ ref_lun = scsilun_to_int(&hdr->lun);
+ if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
+ pr_err("Unable to perform connection recovery for"
+ " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n",
+ ref_lun, ref_cmd->se_cmd.orig_fe_lun);
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
se_tmr->ref_task_tag = hdr->rtt;
- se_tmr->ref_cmd = &ref_cmd->se_cmd;
- se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
+ tmr_req->ref_cmd = ref_cmd;
tmr_req->ref_cmd_sn = hdr->refcmdsn;
tmr_req->exp_data_sn = hdr->exp_datasn;
tmr_req->conn_recovery = cr;
@@ -191,9 +199,7 @@ static int iscsit_task_reassign_complete_nop_out(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
@@ -251,7 +257,8 @@ static int iscsit_task_reassign_complete_write(
pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state);
- return transport_generic_handle_data(se_cmd);
+ target_execute_cmd(se_cmd);
+ return 0;
}
cmd->i_state = ISTATE_SEND_STATUS;
@@ -360,9 +367,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
@@ -385,7 +390,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
- if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
@@ -411,17 +416,14 @@ static int iscsit_task_reassign_complete(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd;
struct iscsi_cmd *cmd;
int ret = 0;
- if (!se_tmr->ref_cmd) {
+ if (!tmr_req->ref_cmd) {
pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
return -1;
}
- se_cmd = se_tmr->ref_cmd;
- cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ cmd = tmr_req->ref_cmd;
cmd->conn = conn;
@@ -547,9 +549,7 @@ int iscsit_task_reassign_prepare_write(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_pdu *pdu = NULL;
struct iscsi_r2t *r2t = NULL, *r2t_tmp;
int first_incomplete_r2t = 1, i = 0;
@@ -782,14 +782,12 @@ int iscsit_check_task_reassign_expdatasn(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd;
if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
return 0;
- if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+ if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION)
return 0;
if (ref_cmd->data_direction == DMA_NONE)
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 879d8d0..a38a3f8 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -303,6 +303,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
{
struct iscsi_param *param;
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+ int ret;
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
@@ -319,19 +320,19 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param) {
spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
+ return -EINVAL;
}
if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
- if (!strcmp(param->value, NONE))
- if (iscsi_update_param_value(param, CHAP) < 0) {
- spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
- }
- if (iscsit_ta_authentication(tpg, 1) < 0) {
- spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
+ if (!strcmp(param->value, NONE)) {
+ ret = iscsi_update_param_value(param, CHAP);
+ if (ret)
+ goto err;
}
+
+ ret = iscsit_ta_authentication(tpg, 1);
+ if (ret < 0)
+ goto err;
}
tpg->tpg_state = TPG_STATE_ACTIVE;
@@ -344,6 +345,10 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
+
+err:
+ spin_unlock(&tpg->tpg_state_lock);
+ return ret;
}
int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
@@ -558,7 +563,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
if ((authentication != 1) && (authentication != 0)) {
pr_err("Illegal value for authentication parameter:"
" %u, ignoring request.\n", authentication);
- return -1;
+ return -EINVAL;
}
memset(buf1, 0, sizeof(buf1));
@@ -593,7 +598,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
} else {
snprintf(buf1, sizeof(buf1), "%s", param->value);
none = strstr(buf1, NONE);
- if ((none))
+ if (none)
goto out;
strncat(buf1, ",", strlen(","));
strncat(buf1, NONE, strlen(NONE));
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 38dfac2..5491c63 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -211,12 +211,11 @@ static void tcm_loop_submission_work(struct work_struct *work)
/*
* Because some userspace code via scsi-generic do not memset their
* associated read buffers, go ahead and do that here for type
- * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
- * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
- * by target core in target_setup_cmd_from_cdb() ->
- * transport_generic_cmd_sequencer().
+ * non-data CDBs. Also note that this is currently guaranteed to be a
+ * single SGL for this case by target core in
+ * target_setup_cmd_from_cdb() -> transport_generic_cmd_sequencer().
*/
- if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
+ if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
se_cmd->data_direction == DMA_FROM_DEVICE) {
struct scatterlist *sg = scsi_sglist(sc);
unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
@@ -779,7 +778,7 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
* We now tell TCM to add this WRITE CDB directly into the TCM storage
* object execution queue.
*/
- transport_generic_process_write(se_cmd);
+ target_execute_cmd(se_cmd);
return 0;
}
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 37c6098..39ddba5 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -587,14 +587,14 @@ static void sbp_management_request_logout(
{
struct sbp_tport *tport = agent->tport;
struct sbp_tpg *tpg = tport->tpg;
- int login_id;
+ int id;
struct sbp_login_descriptor *login;
- login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
+ id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
- login = sbp_login_find_by_id(tpg, login_id);
+ login = sbp_login_find_by_id(tpg, id);
if (!login) {
- pr_warn("cannot find login: %d\n", login_id);
+ pr_warn("cannot find login: %d\n", id);
req->status.status = cpu_to_be32(
STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
@@ -1219,28 +1219,14 @@ static void sbp_handle_command(struct sbp_target_request *req)
ret = sbp_fetch_command(req);
if (ret) {
pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
- req->status.status |= cpu_to_be32(
- STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
- STATUS_BLOCK_DEAD(0) |
- STATUS_BLOCK_LEN(1) |
- STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
- sbp_send_status(req);
- sbp_free_request(req);
- return;
+ goto err;
}
ret = sbp_fetch_page_table(req);
if (ret) {
pr_debug("sbp_handle_command: fetch page table failed: %d\n",
ret);
- req->status.status |= cpu_to_be32(
- STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
- STATUS_BLOCK_DEAD(0) |
- STATUS_BLOCK_LEN(1) |
- STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
- sbp_send_status(req);
- sbp_free_request(req);
- return;
+ goto err;
}
unpacked_lun = req->login->lun->unpacked_lun;
@@ -1249,9 +1235,21 @@ static void sbp_handle_command(struct sbp_target_request *req)
pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
req->orb_pointer, unpacked_lun, data_length, data_dir);
- target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
- req->sense_buf, unpacked_lun, data_length,
- MSG_SIMPLE_TAG, data_dir, 0);
+ if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+ req->sense_buf, unpacked_lun, data_length,
+ MSG_SIMPLE_TAG, data_dir, 0))
+ goto err;
+
+ return;
+
+err:
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ sbp_free_request(req);
}
/*
@@ -1784,8 +1782,7 @@ static int sbp_write_pending(struct se_cmd *se_cmd)
return ret;
}
- transport_generic_process_write(se_cmd);
-
+ target_execute_cmd(se_cmd);
return 0;
}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index e624b83..9179997 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
out:
transport_kunmap_data_sg(cmd);
- target_complete_cmd(cmd, GOOD);
- return 0;
+ if (!rc)
+ target_complete_cmd(cmd, GOOD);
+ return rc;
}
static inline int core_alua_state_nonoptimized(
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 5ad9728..cf2c66f 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -300,8 +300,8 @@ int core_free_device_list_for_node(
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
spin_unlock_irq(&nacl->device_list_lock);
@@ -342,72 +342,46 @@ void core_update_device_list_access(
spin_unlock_irq(&nacl->device_list_lock);
}
-/* core_update_device_list_for_node():
+/* core_enable_device_list_for_node():
*
*
*/
-int core_update_device_list_for_node(
+int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u32 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl,
- struct se_portal_group *tpg,
- int enable)
+ struct se_portal_group *tpg)
{
struct se_port *port = lun->lun_sep;
- struct se_dev_entry *deve = nacl->device_list[mapped_lun];
- int trans = 0;
- /*
- * If the MappedLUN entry is being disabled, the entry in
- * port->sep_alua_list must be removed now before clearing the
- * struct se_dev_entry pointers below as logic in
- * core_alua_do_transition_tg_pt() depends on these being present.
- */
- if (!enable) {
- /*
- * deve->se_lun_acl will be NULL for demo-mode created LUNs
- * that have not been explicitly concerted to MappedLUNs ->
- * struct se_lun_acl, but we remove deve->alua_port_list from
- * port->sep_alua_list. This also means that active UAs and
- * NodeACL context specific PR metadata for demo-mode
- * MappedLUN *deve will be released below..
- */
- spin_lock_bh(&port->sep_alua_lock);
- list_del(&deve->alua_port_list);
- spin_unlock_bh(&port->sep_alua_lock);
- }
+ struct se_dev_entry *deve;
spin_lock_irq(&nacl->device_list_lock);
- if (enable) {
- /*
- * Check if the call is handling demo mode -> explict LUN ACL
- * transition. This transition must be for the same struct se_lun
- * + mapped_lun that was setup in demo mode..
- */
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- if (deve->se_lun_acl != NULL) {
- pr_err("struct se_dev_entry->se_lun_acl"
- " already set for demo mode -> explict"
- " LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
- }
- if (deve->se_lun != lun) {
- pr_err("struct se_dev_entry->se_lun does"
- " match passed struct se_lun for demo mode"
- " -> explict LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
- }
- deve->se_lun_acl = lun_acl;
- trans = 1;
- } else {
- deve->se_lun = lun;
- deve->se_lun_acl = lun_acl;
- deve->mapped_lun = mapped_lun;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+
+ deve = nacl->device_list[mapped_lun];
+
+ /*
+ * Check if the call is handling demo mode -> explict LUN ACL
+ * transition. This transition must be for the same struct se_lun
+ * + mapped_lun that was setup in demo mode..
+ */
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ if (deve->se_lun_acl != NULL) {
+ pr_err("struct se_dev_entry->se_lun_acl"
+ " already set for demo mode -> explict"
+ " LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -EINVAL;
}
+ if (deve->se_lun != lun) {
+ pr_err("struct se_dev_entry->se_lun does"
+ " match passed struct se_lun for demo mode"
+ " -> explict LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -EINVAL;
+ }
+ deve->se_lun_acl = lun_acl;
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
@@ -417,27 +391,72 @@ int core_update_device_list_for_node(
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
- if (trans) {
- spin_unlock_irq(&nacl->device_list_lock);
- return 0;
- }
- deve->creation_time = get_jiffies_64();
- deve->attach_count++;
spin_unlock_irq(&nacl->device_list_lock);
+ return 0;
+ }
- spin_lock_bh(&port->sep_alua_lock);
- list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
- spin_unlock_bh(&port->sep_alua_lock);
+ deve->se_lun = lun;
+ deve->se_lun_acl = lun_acl;
+ deve->mapped_lun = mapped_lun;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
- return 0;
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
+
+ deve->creation_time = get_jiffies_64();
+ deve->attach_count++;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ return 0;
+}
+
+/* core_disable_device_list_for_node():
+ *
+ *
+ */
+int core_disable_device_list_for_node(
+ struct se_lun *lun,
+ struct se_lun_acl *lun_acl,
+ u32 mapped_lun,
+ u32 lun_access,
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg)
+{
+ struct se_port *port = lun->lun_sep;
+ struct se_dev_entry *deve = nacl->device_list[mapped_lun];
+
+ /*
+ * If the MappedLUN entry is being disabled, the entry in
+ * port->sep_alua_list must be removed now before clearing the
+ * struct se_dev_entry pointers below as logic in
+ * core_alua_do_transition_tg_pt() depends on these being present.
+ *
+ * deve->se_lun_acl will be NULL for demo-mode created LUNs
+ * that have not been explicitly converted to MappedLUNs ->
+ * struct se_lun_acl, but we remove deve->alua_port_list from
+ * port->sep_alua_list. This also means that active UAs and
+ * NodeACL context specific PR metadata for demo-mode
+ * MappedLUN *deve will be released below..
+ */
+ spin_lock_bh(&port->sep_alua_lock);
+ list_del(&deve->alua_port_list);
+ spin_unlock_bh(&port->sep_alua_lock);
/*
* Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
* PR operation to complete.
*/
- spin_unlock_irq(&nacl->device_list_lock);
while (atomic_read(&deve->pr_ref_count) != 0)
cpu_relax();
+
spin_lock_irq(&nacl->device_list_lock);
/*
* Disable struct se_dev_entry LUN ACL mapping
@@ -475,9 +494,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
continue;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL,
+ core_disable_device_list_for_node(lun, NULL,
deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
- nacl, tpg, 0);
+ nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
@@ -715,7 +734,7 @@ void se_release_device_for_hba(struct se_device *dev)
se_dev_stop(dev);
if (dev->dev_ptr) {
- kthread_stop(dev->process_thread);
+ destroy_workqueue(dev->tmr_wq);
if (dev->transport->free_device)
dev->transport->free_device(dev->dev_ptr);
}
@@ -822,7 +841,7 @@ int se_dev_check_shutdown(struct se_device *dev)
return ret;
}
-u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
+static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
u32 tmp, aligned_max_sectors;
/*
@@ -1273,7 +1292,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
- struct se_hba *hba,
struct se_device *dev,
u32 lun)
{
@@ -1298,7 +1316,7 @@ struct se_lun *core_dev_add_lun(
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
- tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
+ tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
@@ -1470,8 +1488,8 @@ int core_dev_add_initiator_node_lun_acl(
lacl->se_lun = lun;
- if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
- lun_access, nacl, tpg, 1) < 0)
+ if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
+ lun_access, nacl, tpg) < 0)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
@@ -1514,8 +1532,8 @@ int core_dev_del_initiator_node_lun_acl(
smp_mb__after_atomic_dec();
spin_unlock(&lun->lun_acl_lock);
- core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
lacl->se_lun = NULL;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 405cc98..ea479e5 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -764,8 +764,7 @@ static int target_fabric_port_link(
goto out;
}
- lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
- lun->unpacked_lun);
+ lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n");
ret = PTR_ERR(lun_p);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 686dba1..9e21005 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice(
ret = PTR_ERR(dev_p);
goto fail;
}
-
- /* O_DIRECT too? */
- flags = O_RDWR | O_CREAT | O_LARGEFILE;
-
/*
- * If fd_buffered_io=1 has not been set explicitly (the default),
- * use O_SYNC to force FILEIO writes to disk.
+ * Use O_DSYNC by default instead of O_SYNC to forgo syncing
+ * of pure timestamp updates.
*/
- if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
- flags |= O_SYNC;
+ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file)) {
@@ -336,7 +331,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
return 1;
}
-static void fd_emulate_sync_cache(struct se_cmd *cmd)
+static int fd_execute_sync_cache(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
@@ -370,7 +365,7 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (immed)
- return;
+ return 0;
if (ret) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -378,28 +373,15 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
} else {
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
-}
-
-static void fd_emulate_write_fua(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- struct fd_dev *fd_dev = dev->dev_ptr;
- loff_t start = cmd->t_task_lba *
- dev->se_sub_dev->se_dev_attrib.block_size;
- loff_t end = start + cmd->data_length;
- int ret;
-
- pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
- cmd->t_task_lba, cmd->data_length);
- ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
- if (ret != 0)
- pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
+ return 0;
}
-static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction)
+static int fd_execute_rw(struct se_cmd *cmd)
{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev;
int ret = 0;
@@ -411,19 +393,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
ret = fd_do_readv(cmd, sgl, sgl_nents);
} else {
ret = fd_do_writev(cmd, sgl, sgl_nents);
-
+ /*
+ * Perform implict vfs_fsync_range() for fd_do_writev() ops
+ * for SCSI WRITEs with Forced Unit Access (FUA) set.
+ * Allow this to happen independent of WCE=0 setting.
+ */
if (ret > 0 &&
- dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
(cmd->se_cmd_flags & SCF_FUA)) {
- /*
- * We might need to be a bit smarter here
- * and return some sense data to let the initiator
- * know the FUA WRITE cache sync failed..?
- */
- fd_emulate_write_fua(cmd);
- }
+ struct fd_dev *fd_dev = dev->dev_ptr;
+ loff_t start = cmd->t_task_lba *
+ dev->se_sub_dev->se_dev_attrib.block_size;
+ loff_t end = start + cmd->data_length;
+ vfs_fsync_range(fd_dev->fd_file, start, end, 1);
+ }
}
if (ret < 0) {
@@ -442,7 +426,6 @@ enum {
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
- {Opt_fd_buffered_io, "fd_buffered_io=%d"},
{Opt_err, NULL}
};
@@ -454,7 +437,7 @@ static ssize_t fd_set_configfs_dev_params(
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, arg, token;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -498,19 +481,6 @@ static ssize_t fd_set_configfs_dev_params(
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
- case Opt_fd_buffered_io:
- match_int(args, &arg);
- if (arg != 1) {
- pr_err("bogus fd_buffered_io=%d value\n", arg);
- ret = -EINVAL;
- goto out;
- }
-
- pr_debug("FILEIO: Using buffered I/O"
- " operations for struct fd_dev\n");
-
- fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
- break;
default:
break;
}
@@ -542,10 +512,8 @@ static ssize_t fd_show_configfs_dev_params(
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
- bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
- fd_dev->fd_dev_name, fd_dev->fd_dev_size,
- (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
- "Buffered" : "Synchronous");
+ bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
+ fd_dev->fd_dev_name, fd_dev->fd_dev_size);
return bl;
}
@@ -586,6 +554,16 @@ static sector_t fd_get_blocks(struct se_device *dev)
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
}
+static struct spc_ops fd_spc_ops = {
+ .execute_rw = fd_execute_rw,
+ .execute_sync_cache = fd_execute_sync_cache,
+};
+
+static int fd_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &fd_spc_ops);
+}
+
static struct se_subsystem_api fileio_template = {
.name = "fileio",
.owner = THIS_MODULE,
@@ -597,8 +575,7 @@ static struct se_subsystem_api fileio_template = {
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device,
- .execute_cmd = fd_execute_cmd,
- .do_sync_cache = fd_emulate_sync_cache,
+ .parse_cdb = fd_parse_cdb,
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index fbd59ef..70ce7fd 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,7 +14,6 @@
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
-#define FDBD_USE_BUFFERED_IO 0x04
struct fd_dev {
u32 fbd_flags;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index fd47950..76db75e 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -40,6 +40,7 @@
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
+#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -96,6 +97,7 @@ static struct se_device *iblock_create_virtdevice(
struct request_queue *q;
struct queue_limits *limits;
u32 dev_flags = 0;
+ fmode_t mode;
int ret = -EINVAL;
if (!ib_dev) {
@@ -117,8 +119,11 @@ static struct se_device *iblock_create_virtdevice(
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
- bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
- FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+ mode = FMODE_READ|FMODE_EXCL;
+ if (!ib_dev->ibd_readonly)
+ mode |= FMODE_WRITE;
+
+ bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
goto failed;
@@ -292,7 +297,7 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
-static void iblock_emulate_sync_cache(struct se_cmd *cmd)
+static int iblock_execute_sync_cache(struct se_cmd *cmd)
{
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (cmd->t_task_cdb[1] & 0x2);
@@ -311,23 +316,98 @@ static void iblock_emulate_sync_cache(struct se_cmd *cmd)
if (!immed)
bio->bi_private = cmd;
submit_bio(WRITE_FLUSH, bio);
+ return 0;
}
-static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+static int iblock_execute_unmap(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
struct iblock_dev *ibd = dev->dev_ptr;
- struct block_device *bd = ibd->ibd_bd;
- int barrier = 0;
+ unsigned char *buf, *ptr = NULL;
+ sector_t lba;
+ int size = cmd->data_length;
+ u32 range;
+ int ret = 0;
+ int dl, bd_dl;
+
+ buf = transport_kmap_data_sg(cmd);
+
+ dl = get_unaligned_be16(&buf[0]);
+ bd_dl = get_unaligned_be16(&buf[2]);
+
+ size = min(size - 8, bd_dl);
+ if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* First UNMAP block descriptor starts at 8 byte offset */
+ ptr = &buf[8];
+ pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
+ " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+ while (size >= 16) {
+ lba = get_unaligned_be64(&ptr[0]);
+ range = get_unaligned_be32(&ptr[8]);
+ pr_debug("UNMAP: Using lba: %llu and range: %u\n",
+ (unsigned long long)lba, range);
+
+ if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (lba + range > dev->transport->get_blocks(dev) + 1) {
+ cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
+ ret = -EINVAL;
+ goto err;
+ }
- return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+ ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
+ GFP_KERNEL, 0);
+ if (ret < 0) {
+ pr_err("blkdev_issue_discard() failed: %d\n",
+ ret);
+ goto err;
+ }
+
+ ptr += 16;
+ size -= 16;
+ }
+
+err:
+ transport_kunmap_data_sg(cmd);
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
+ return ret;
+}
+
+static int iblock_execute_write_same(struct se_cmd *cmd)
+{
+ struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
+ int ret;
+
+ ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
+ spc_get_write_same_sectors(cmd), GFP_KERNEL,
+ 0);
+ if (ret < 0) {
+ pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
+ return ret;
+ }
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
}
enum {
- Opt_udev_path, Opt_force, Opt_err
+ Opt_udev_path, Opt_readonly, Opt_force, Opt_err
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
+ {Opt_readonly, "readonly=%d"},
{Opt_force, "force=%d"},
{Opt_err, NULL}
};
@@ -340,6 +420,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
+ unsigned long tmp_readonly;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -372,6 +453,22 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
+ case Opt_readonly:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = strict_strtoul(arg_p, 0, &tmp_readonly);
+ kfree(arg_p);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed for"
+ " readonly=\n");
+ goto out;
+ }
+ ib_dev->ibd_readonly = tmp_readonly;
+ pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
+ break;
case Opt_force:
break;
default:
@@ -411,11 +508,10 @@ static ssize_t iblock_show_configfs_dev_params(
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
- if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
- bl += sprintf(b + bl, " UDEV PATH: %s\n",
+ if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
+ bl += sprintf(b + bl, " UDEV PATH: %s",
ibd->ibd_udev_path);
- } else
- bl += sprintf(b + bl, "\n");
+ bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly);
bl += sprintf(b + bl, " ");
if (bd) {
@@ -493,9 +589,11 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
blk_finish_plug(&plug);
}
-static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction)
+static int iblock_execute_rw(struct se_cmd *cmd)
{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr;
struct bio *bio;
@@ -642,6 +740,18 @@ static void iblock_bio_done(struct bio *bio, int err)
iblock_complete_cmd(cmd);
}
+static struct spc_ops iblock_spc_ops = {
+ .execute_rw = iblock_execute_rw,
+ .execute_sync_cache = iblock_execute_sync_cache,
+ .execute_write_same = iblock_execute_write_same,
+ .execute_unmap = iblock_execute_unmap,
+};
+
+static int iblock_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &iblock_spc_ops);
+}
+
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
@@ -653,9 +763,7 @@ static struct se_subsystem_api iblock_template = {
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
- .execute_cmd = iblock_execute_cmd,
- .do_discard = iblock_do_discard,
- .do_sync_cache = iblock_emulate_sync_cache,
+ .parse_cdb = iblock_parse_cdb,
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 66cf7b9..533627a 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -18,6 +18,7 @@ struct iblock_dev {
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
+ bool ibd_readonly;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 165e824..0fd4282 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -4,25 +4,16 @@
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
-/* target_core_cdb.c */
-int target_emulate_inquiry(struct se_cmd *cmd);
-int target_emulate_readcapacity(struct se_cmd *cmd);
-int target_emulate_readcapacity_16(struct se_cmd *cmd);
-int target_emulate_modesense(struct se_cmd *cmd);
-int target_emulate_request_sense(struct se_cmd *cmd);
-int target_emulate_unmap(struct se_cmd *cmd);
-int target_emulate_write_same(struct se_cmd *cmd);
-int target_emulate_synchronize_cache(struct se_cmd *cmd);
-int target_emulate_noop(struct se_cmd *cmd);
-
/* target_core_device.c */
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
void core_update_device_list_access(u32, u32, struct se_node_acl *);
-int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
- u32, u32, struct se_node_acl *, struct se_portal_group *, int);
+int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+ u32, u32, struct se_node_acl *, struct se_portal_group *);
+int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+ u32, u32, struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
@@ -56,8 +47,7 @@ int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
-struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
- struct se_device *, u32);
+struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
int core_dev_del_lun(struct se_portal_group *, u32);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
@@ -104,7 +94,6 @@ void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
void transport_cmd_finish_abort(struct se_cmd *, int);
-void __target_remove_from_execute_list(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -116,6 +105,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
+int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 8556499..1e94650 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(
* Check if write exclusive initiator ports *NOT* holding the
* WRITE_EXCLUSIVE_* reservation.
*/
- if ((we) && !(registered_nexus)) {
+ if (we && !registered_nexus) {
if (cmd->data_direction == DMA_TO_DEVICE) {
/*
* Conflict for write exclusive
@@ -2031,7 +2031,7 @@ static int __core_scsi3_write_aptpl_to_file(
if (IS_ERR(file) || !file || !file->f_dentry) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
+ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
}
iov[0].iov_base = &buf[0];
@@ -2486,7 +2486,7 @@ static int core_scsi3_pro_reserve(
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if ((pr_res_holder)) {
+ if (pr_res_holder) {
/*
* From spc4r17 Section 5.7.9: Reserving:
*
@@ -3818,7 +3818,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -3828,7 +3828,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
*/
if (!cmd->se_sess) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (cmd->data_length < 24) {
@@ -4029,7 +4030,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
spin_lock(&se_dev->dev_reservation_lock);
pr_reg = se_dev->dev_pr_res_holder;
- if ((pr_reg)) {
+ if (pr_reg) {
/*
* Set the hardcoded Additional Length
*/
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 4ce2cf6..6e32ff6 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -35,8 +35,10 @@
#include <linux/spinlock.h>
#include <linux/genhd.h>
#include <linux/cdrom.h>
-#include <linux/file.h>
+#include <linux/ratelimit.h>
#include <linux/module.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@@ -46,12 +48,14 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include "target_core_alua.h"
#include "target_core_pscsi.h"
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
static struct se_subsystem_api pscsi_template;
+static int pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int);
/* pscsi_attach_hba():
@@ -1019,9 +1023,79 @@ fail:
return -ENOMEM;
}
-static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction)
+/*
+ * Clear a lun set in the cdb if the initiator talking to use spoke
+ * and old standards version, as we can't assume the underlying device
+ * won't choke up on it.
+ */
+static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
+{
+ switch (cdb[0]) {
+ case READ_10: /* SBC - RDProtect */
+ case READ_12: /* SBC - RDProtect */
+ case READ_16: /* SBC - RDProtect */
+ case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+ case VERIFY: /* SBC - VRProtect */
+ case VERIFY_16: /* SBC - VRProtect */
+ case WRITE_VERIFY: /* SBC - VRProtect */
+ case WRITE_VERIFY_12: /* SBC - VRProtect */
+ case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+ break;
+ default:
+ cdb[1] &= 0x1f; /* clear logical unit number */
+ break;
+ }
+}
+
+static int pscsi_parse_cdb(struct se_cmd *cmd)
+{
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned int dummy_size;
+ int ret;
+
+ if (cmd->se_cmd_flags & SCF_BIDI) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -EINVAL;
+ }
+
+ pscsi_clear_cdb_lun(cdb);
+
+ /*
+ * For REPORT LUNS we always need to emulate the response, for everything
+ * else the default for pSCSI is to pass the command to the underlying
+ * LLD / physical hardware.
+ */
+ switch (cdb[0]) {
+ case REPORT_LUNS:
+ ret = spc_parse_cdb(cmd, &dummy_size);
+ if (ret)
+ return ret;
+ break;
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_VERIFY:
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ /* FALLTHROUGH*/
+ default:
+ cmd->execute_cmd = pscsi_execute_cmd;
+ break;
+ }
+
+ return 0;
+}
+
+static int pscsi_execute_cmd(struct se_cmd *cmd)
{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt;
struct request *req;
@@ -1042,7 +1116,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
scsi_command_size(cmd->t_task_cdb));
- if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+ if (!sgl) {
req = blk_get_request(pdv->pdv_sd->request_queue,
(data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
@@ -1188,7 +1262,7 @@ static struct se_subsystem_api pscsi_template = {
.create_virtdevice = pscsi_create_virtdevice,
.free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete,
- .execute_cmd = pscsi_execute_cmd,
+ .parse_cdb = pscsi_parse_cdb,
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index d0ceb87..d00bbe3 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -284,9 +284,11 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction)
+static int rd_execute_rw(struct se_cmd *cmd)
{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = se_dev->dev_ptr;
struct rd_dev_sg_table *table;
@@ -460,6 +462,15 @@ static sector_t rd_get_blocks(struct se_device *dev)
return blocks_long;
}
+static struct spc_ops rd_spc_ops = {
+ .execute_rw = rd_execute_rw,
+};
+
+static int rd_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &rd_spc_ops);
+}
+
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
@@ -468,7 +479,7 @@ static struct se_subsystem_api rd_mcp_template = {
.allocate_virtdevice = rd_allocate_virtdevice,
.create_virtdevice = rd_create_virtdevice,
.free_device = rd_free_device,
- .execute_cmd = rd_execute_cmd,
+ .parse_cdb = rd_parse_cdb,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
new file mode 100644
index 0000000..a9dd946
--- /dev/null
+++ b/drivers/target/target_core_sbc.c
@@ -0,0 +1,581 @@
+/*
+ * SCSI Block Commands (SBC) parsing and emulation.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_ua.h"
+
+
+static int sbc_emulate_readcapacity(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
+ unsigned long long blocks_long = dev->transport->get_blocks(dev);
+ u32 blocks;
+
+ if (blocks_long >= 0x00000000ffffffff)
+ blocks = 0xffffffff;
+ else
+ blocks = (u32)blocks_long;
+
+ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = (blocks >> 24) & 0xff;
+ buf[1] = (blocks >> 16) & 0xff;
+ buf[2] = (blocks >> 8) & 0xff;
+ buf[3] = blocks & 0xff;
+ buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
+ unsigned long long blocks = dev->transport->get_blocks(dev);
+
+ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = (blocks >> 56) & 0xff;
+ buf[1] = (blocks >> 48) & 0xff;
+ buf[2] = (blocks >> 40) & 0xff;
+ buf[3] = (blocks >> 32) & 0xff;
+ buf[4] = (blocks >> 24) & 0xff;
+ buf[5] = (blocks >> 16) & 0xff;
+ buf[6] = (blocks >> 8) & 0xff;
+ buf[7] = blocks & 0xff;
+ buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+ /*
+ * Set Thin Provisioning Enable bit following sbc3r22 in section
+ * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+ */
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+ buf[14] = 0x80;
+
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+int spc_get_write_same_sectors(struct se_cmd *cmd)
+{
+ u32 num_blocks;
+
+ if (cmd->t_task_cdb[0] == WRITE_SAME)
+ num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
+ else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
+ else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+
+ /*
+ * Use the explicit range when non zero is supplied, otherwise calculate
+ * the remaining range based on ->get_blocks() - starting LBA.
+ */
+ if (num_blocks)
+ return num_blocks;
+
+ return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
+ cmd->t_task_lba + 1;
+}
+EXPORT_SYMBOL(spc_get_write_same_sectors);
+
+static int sbc_emulate_verify(struct se_cmd *cmd)
+{
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
+{
+ return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
+}
+
+static int sbc_check_valid_sectors(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned long long end_lba;
+ u32 sectors;
+
+ sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
+ end_lba = dev->transport->get_blocks(dev) + 1;
+
+ if (cmd->t_task_lba + sectors > end_lba) {
+ pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
+ cmd->t_task_lba, sectors, end_lba);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline u32 transport_get_sectors_6(unsigned char *cdb)
+{
+ /*
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
+ */
+ return cdb[4] ? : 256;
+}
+
+static inline u32 transport_get_sectors_10(unsigned char *cdb)
+{
+ return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(unsigned char *cdb)
+{
+ return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(unsigned char *cdb)
+{
+ return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+ (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(unsigned char *cdb)
+{
+ return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+ (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+ return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+ return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+ __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+ __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static int sbc_write_same_supported(struct se_device *dev,
+ unsigned char *flags)
+{
+ if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+ pr_err("WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+ return -ENOSYS;
+ }
+
+ /*
+ * Currently for the emulated case we only accept
+ * tpws with the UNMAP=1 bit set.
+ */
+ if (!(flags[0] & 0x08)) {
+ pr_err("WRITE_SAME w/o UNMAP bit not"
+ " supported for Block Discard Emulation\n");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static void xdreadwrite_callback(struct se_cmd *cmd)
+{
+ unsigned char *buf, *addr;
+ struct scatterlist *sg;
+ unsigned int offset;
+ int i;
+ int count;
+ /*
+ * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+ *
+ * 1) read the specified logical block(s);
+ * 2) transfer logical blocks from the data-out buffer;
+ * 3) XOR the logical blocks transferred from the data-out buffer with
+ * the logical blocks read, storing the resulting XOR data in a buffer;
+ * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+ * blocks transferred from the data-out buffer; and
+ * 5) transfer the resulting XOR data to the data-in buffer.
+ */
+ buf = kmalloc(cmd->data_length, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate xor_callback buf\n");
+ return;
+ }
+ /*
+ * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
+ * into the locally allocated *buf
+ */
+ sg_copy_to_buffer(cmd->t_data_sg,
+ cmd->t_data_nents,
+ buf,
+ cmd->data_length);
+
+ /*
+ * Now perform the XOR against the BIDI read memory located at
+ * cmd->t_mem_bidi_list
+ */
+
+ offset = 0;
+ for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
+ addr = kmap_atomic(sg_page(sg));
+ if (!addr)
+ goto out;
+
+ for (i = 0; i < sg->length; i++)
+ *(addr + sg->offset + i) ^= *(buf + offset + i);
+
+ offset += sg->length;
+ kunmap_atomic(addr);
+ }
+
+out:
+ kfree(buf);
+}
+
+int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
+{
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned int size;
+ u32 sectors = 0;
+ int ret;
+
+ switch (cdb[0]) {
+ case READ_6:
+ sectors = transport_get_sectors_6(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case READ_10:
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case READ_12:
+ sectors = transport_get_sectors_12(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case READ_16:
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case WRITE_6:
+ sectors = transport_get_sectors_6(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case WRITE_10:
+ case WRITE_VERIFY:
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case WRITE_12:
+ sectors = transport_get_sectors_12(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case WRITE_16:
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case XDWRITEREAD_10:
+ if ((cmd->data_direction != DMA_TO_DEVICE) ||
+ !(cmd->se_cmd_flags & SCF_BIDI))
+ goto out_invalid_cdb_field;
+ sectors = transport_get_sectors_10(cdb);
+
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+
+ /*
+ * Setup BIDI XOR callback to be run after I/O completion.
+ */
+ cmd->execute_cmd = ops->execute_rw;
+ cmd->transport_complete_callback = &xdreadwrite_callback;
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ break;
+ case VARIABLE_LENGTH_CMD:
+ {
+ u16 service_action = get_unaligned_be16(&cdb[8]);
+ switch (service_action) {
+ case XDWRITEREAD_32:
+ sectors = transport_get_sectors_32(cdb);
+
+ /*
+ * Use WRITE_32 and READ_32 opcodes for the emulated
+ * XDWRITE_READ_32 logic.
+ */
+ cmd->t_task_lba = transport_lba_64_ext(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+
+ /*
+ * Setup BIDI XOR callback to be run during after I/O
+ * completion.
+ */
+ cmd->execute_cmd = ops->execute_rw;
+ cmd->transport_complete_callback = &xdreadwrite_callback;
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ break;
+ case WRITE_SAME_32:
+ if (!ops->execute_write_same)
+ goto out_unsupported_cdb;
+
+ sectors = transport_get_sectors_32(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+ " supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
+
+ if (sbc_write_same_supported(dev, &cdb[10]) < 0)
+ goto out_unsupported_cdb;
+ cmd->execute_cmd = ops->execute_write_same;
+ break;
+ default:
+ pr_err("VARIABLE_LENGTH_CMD service action"
+ " 0x%04x not supported\n", service_action);
+ goto out_unsupported_cdb;
+ }
+ break;
+ }
+ case READ_CAPACITY:
+ size = READ_CAP_LEN;
+ cmd->execute_cmd = sbc_emulate_readcapacity;
+ break;
+ case SERVICE_ACTION_IN:
+ switch (cmd->t_task_cdb[1] & 0x1f) {
+ case SAI_READ_CAPACITY_16:
+ cmd->execute_cmd = sbc_emulate_readcapacity_16;
+ break;
+ default:
+ pr_err("Unsupported SA: 0x%02x\n",
+ cmd->t_task_cdb[1] & 0x1f);
+ goto out_invalid_cdb_field;
+ }
+ size = (cdb[10] << 24) | (cdb[11] << 16) |
+ (cdb[12] << 8) | cdb[13];
+ break;
+ case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
+ if (!ops->execute_sync_cache)
+ goto out_unsupported_cdb;
+
+ /*
+ * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
+ */
+ if (cdb[0] == SYNCHRONIZE_CACHE) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
+
+ size = sbc_get_size(cmd, sectors);
+
+ /*
+ * Check to ensure that LBA + Range does not exceed past end of
+ * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
+ */
+ if (cmd->t_task_lba || sectors) {
+ if (sbc_check_valid_sectors(cmd) < 0)
+ goto out_invalid_cdb_field;
+ }
+ cmd->execute_cmd = ops->execute_sync_cache;
+ break;
+ case UNMAP:
+ if (!ops->execute_unmap)
+ goto out_unsupported_cdb;
+
+ size = get_unaligned_be16(&cdb[7]);
+ cmd->execute_cmd = ops->execute_unmap;
+ break;
+ case WRITE_SAME_16:
+ if (!ops->execute_write_same)
+ goto out_unsupported_cdb;
+
+ sectors = transport_get_sectors_16(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+
+ if (sbc_write_same_supported(dev, &cdb[1]) < 0)
+ goto out_unsupported_cdb;
+ cmd->execute_cmd = ops->execute_write_same;
+ break;
+ case WRITE_SAME:
+ if (!ops->execute_write_same)
+ goto out_unsupported_cdb;
+
+ sectors = transport_get_sectors_10(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
+
+ /*
+ * Follow sbcr26 with WRITE_SAME (10) and check for the existence
+ * of byte 1 bit 3 UNMAP instead of original reserved field
+ */
+ if (sbc_write_same_supported(dev, &cdb[1]) < 0)
+ goto out_unsupported_cdb;
+ cmd->execute_cmd = ops->execute_write_same;
+ break;
+ case VERIFY:
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_verify;
+ break;
+ default:
+ ret = spc_parse_cdb(cmd, &size);
+ if (ret)
+ return ret;
+ }
+
+ /* reject any command that we don't have a handler for */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
+ goto out_unsupported_cdb;
+
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ unsigned long long end_lba;
+
+ if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
+ printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+ " big sectors %u exceeds fabric_max_sectors:"
+ " %u\n", cdb[0], sectors,
+ su_dev->se_dev_attrib.fabric_max_sectors);
+ goto out_invalid_cdb_field;
+ }
+ if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
+ printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+ " big sectors %u exceeds backend hw_max_sectors:"
+ " %u\n", cdb[0], sectors,
+ su_dev->se_dev_attrib.hw_max_sectors);
+ goto out_invalid_cdb_field;
+ }
+
+ end_lba = dev->transport->get_blocks(dev) + 1;
+ if (cmd->t_task_lba + sectors > end_lba) {
+ pr_err("cmd exceeds last lba %llu "
+ "(lba %llu, sectors %u)\n",
+ end_lba, cmd->t_task_lba, sectors);
+ goto out_invalid_cdb_field;
+ }
+
+ size = sbc_get_size(cmd, sectors);
+ }
+
+ ret = target_cmd_size_check(cmd, size);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+out_unsupported_cdb:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -EINVAL;
+out_invalid_cdb_field:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+}
+EXPORT_SYMBOL(sbc_parse_cdb);
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_spc.c
index 9888693..4c861de 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_spc.c
@@ -1,5 +1,5 @@
/*
- * CDB emulation for non-READ/WRITE commands.
+ * SCSI Primary Commands (SPC) parsing and emulation.
*
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
@@ -26,17 +26,21 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
#include "target_core_ua.h"
-static void
-target_fill_alua_data(struct se_port *port, unsigned char *buf)
+
+static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
@@ -65,8 +69,7 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
-static int
-target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
+static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
@@ -93,7 +96,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
* Enable SCCS and TPGS fields for Emulated ALUA
*/
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
- target_fill_alua_data(lun->lun_sep, buf);
+ spc_fill_alua_data(lun->lun_sep, buf);
buf[7] = 0x2; /* CmdQue=1 */
@@ -106,8 +109,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
}
/* unit serial number */
-static int
-target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u16 len = 0;
@@ -127,8 +129,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-static void
-target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
+static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
+ unsigned char *buf)
{
unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
int cnt;
@@ -162,8 +164,7 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
-static int
-target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun;
@@ -220,7 +221,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
- target_parse_naa_6h_vendor_specific(dev, &buf[off]);
+ spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);
@@ -414,8 +415,7 @@ check_scsi_name:
}
/* Extended INQUIRY Data VPD Page */
-static int
-target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
@@ -428,15 +428,14 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
}
/* Block Limits VPD page */
-static int
-target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
int have_tp = 0;
/*
- * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
+ * Following spc3r22 section 6.5.3 Block Limits VPD page, when
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
@@ -500,8 +499,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
}
/* Block Device Characteristics VPD page */
-static int
-target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
@@ -513,13 +511,12 @@ target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
}
/* Thin Provisioning VPD */
-static int
-target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
/*
- * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
+ * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
*
* The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
* zero, then the page length shall be set to 0004h. If the DP bit
@@ -564,25 +561,23 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static struct {
uint8_t page;
int (*emulate)(struct se_cmd *, unsigned char *);
} evpd_handlers[] = {
- { .page = 0x00, .emulate = target_emulate_evpd_00 },
- { .page = 0x80, .emulate = target_emulate_evpd_80 },
- { .page = 0x83, .emulate = target_emulate_evpd_83 },
- { .page = 0x86, .emulate = target_emulate_evpd_86 },
- { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
- { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
- { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
+ { .page = 0x00, .emulate = spc_emulate_evpd_00 },
+ { .page = 0x80, .emulate = spc_emulate_evpd_80 },
+ { .page = 0x83, .emulate = spc_emulate_evpd_83 },
+ { .page = 0x86, .emulate = spc_emulate_evpd_86 },
+ { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
+ { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
+ { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
};
/* supported vital product data pages */
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{
int p;
@@ -601,7 +596,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-int target_emulate_inquiry(struct se_cmd *cmd)
+static int spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
@@ -643,7 +638,7 @@ int target_emulate_inquiry(struct se_cmd *cmd)
goto out;
}
- ret = target_emulate_inquiry_std(cmd, buf);
+ ret = spc_emulate_inquiry_std(cmd, buf);
goto out;
}
@@ -671,70 +666,7 @@ out:
return ret;
}
-int target_emulate_readcapacity(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf;
- unsigned long long blocks_long = dev->transport->get_blocks(dev);
- u32 blocks;
-
- if (blocks_long >= 0x00000000ffffffff)
- blocks = 0xffffffff;
- else
- blocks = (u32)blocks_long;
-
- buf = transport_kmap_data_sg(cmd);
-
- buf[0] = (blocks >> 24) & 0xff;
- buf[1] = (blocks >> 16) & 0xff;
- buf[2] = (blocks >> 8) & 0xff;
- buf[3] = blocks & 0xff;
- buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
-
- transport_kunmap_data_sg(cmd);
-
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-int target_emulate_readcapacity_16(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf;
- unsigned long long blocks = dev->transport->get_blocks(dev);
-
- buf = transport_kmap_data_sg(cmd);
-
- buf[0] = (blocks >> 56) & 0xff;
- buf[1] = (blocks >> 48) & 0xff;
- buf[2] = (blocks >> 40) & 0xff;
- buf[3] = (blocks >> 32) & 0xff;
- buf[4] = (blocks >> 24) & 0xff;
- buf[5] = (blocks >> 16) & 0xff;
- buf[6] = (blocks >> 8) & 0xff;
- buf[7] = blocks & 0xff;
- buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
- /*
- * Set Thin Provisioning Enable bit following sbc3r22 in section
- * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
- buf[14] = 0x80;
-
- transport_kunmap_data_sg(cmd);
-
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-static int
-target_modesense_rwrecovery(unsigned char *p)
+static int spc_modesense_rwrecovery(unsigned char *p)
{
p[0] = 0x01;
p[1] = 0x0a;
@@ -742,8 +674,7 @@ target_modesense_rwrecovery(unsigned char *p)
return 12;
}
-static int
-target_modesense_control(struct se_device *dev, unsigned char *p)
+static int spc_modesense_control(struct se_device *dev, unsigned char *p)
{
p[0] = 0x0a;
p[1] = 0x0a;
@@ -828,8 +759,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
return 12;
}
-static int
-target_modesense_caching(struct se_device *dev, unsigned char *p)
+static int spc_modesense_caching(struct se_device *dev, unsigned char *p)
{
p[0] = 0x08;
p[1] = 0x12;
@@ -840,8 +770,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p)
return 20;
}
-static void
-target_modesense_write_protect(unsigned char *buf, int type)
+static void spc_modesense_write_protect(unsigned char *buf, int type)
{
/*
* I believe that the WP bit (bit 7) in the mode header is the same for
@@ -856,8 +785,7 @@ target_modesense_write_protect(unsigned char *buf, int type)
}
}
-static void
-target_modesense_dpofua(unsigned char *buf, int type)
+static void spc_modesense_dpofua(unsigned char *buf, int type)
{
switch (type) {
case TYPE_DISK:
@@ -868,7 +796,7 @@ target_modesense_dpofua(unsigned char *buf, int type)
}
}
-int target_emulate_modesense(struct se_cmd *cmd)
+static int spc_emulate_modesense(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
@@ -883,18 +811,18 @@ int target_emulate_modesense(struct se_cmd *cmd)
switch (cdb[2] & 0x3f) {
case 0x01:
- length = target_modesense_rwrecovery(&buf[offset]);
+ length = spc_modesense_rwrecovery(&buf[offset]);
break;
case 0x08:
- length = target_modesense_caching(dev, &buf[offset]);
+ length = spc_modesense_caching(dev, &buf[offset]);
break;
case 0x0a:
- length = target_modesense_control(dev, &buf[offset]);
+ length = spc_modesense_control(dev, &buf[offset]);
break;
case 0x3f:
- length = target_modesense_rwrecovery(&buf[offset]);
- length += target_modesense_caching(dev, &buf[offset+length]);
- length += target_modesense_control(dev, &buf[offset+length]);
+ length = spc_modesense_rwrecovery(&buf[offset]);
+ length += spc_modesense_caching(dev, &buf[offset+length]);
+ length += spc_modesense_control(dev, &buf[offset+length]);
break;
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
@@ -912,11 +840,11 @@ int target_emulate_modesense(struct se_cmd *cmd)
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- target_modesense_write_protect(&buf[3], type);
+ spc_modesense_write_protect(&buf[3], type);
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- target_modesense_dpofua(&buf[3], type);
+ spc_modesense_dpofua(&buf[3], type);
if ((offset + 2) > cmd->data_length)
offset = cmd->data_length;
@@ -928,11 +856,11 @@ int target_emulate_modesense(struct se_cmd *cmd)
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- target_modesense_write_protect(&buf[2], type);
+ spc_modesense_write_protect(&buf[2], type);
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- target_modesense_dpofua(&buf[2], type);
+ spc_modesense_dpofua(&buf[2], type);
if ((offset + 1) > cmd->data_length)
offset = cmd->data_length;
@@ -946,7 +874,7 @@ int target_emulate_modesense(struct se_cmd *cmd)
return 0;
}
-int target_emulate_request_sense(struct se_cmd *cmd)
+static int spc_emulate_request_sense(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
@@ -1005,126 +933,172 @@ end:
return 0;
}
-/*
- * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
- * Note this is not used for TCM/pSCSI passthrough
- */
-int target_emulate_unmap(struct se_cmd *cmd)
+static int spc_emulate_testunitready(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf, *ptr = NULL;
- unsigned char *cdb = &cmd->t_task_cdb[0];
- sector_t lba;
- unsigned int size = cmd->data_length, range;
- int ret = 0, offset;
- unsigned short dl, bd_dl;
-
- if (!dev->transport->do_discard) {
- pr_err("UNMAP emulation not supported for: %s\n",
- dev->transport->name);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
- }
-
- /* First UNMAP block descriptor starts at 8 byte offset */
- offset = 8;
- size -= 8;
- dl = get_unaligned_be16(&cdb[0]);
- bd_dl = get_unaligned_be16(&cdb[2]);
-
- buf = transport_kmap_data_sg(cmd);
-
- ptr = &buf[offset];
- pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
- " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
-
- while (size) {
- lba = get_unaligned_be64(&ptr[0]);
- range = get_unaligned_be32(&ptr[8]);
- pr_debug("UNMAP: Using lba: %llu and range: %u\n",
- (unsigned long long)lba, range);
-
- ret = dev->transport->do_discard(dev, lba, range);
- if (ret < 0) {
- pr_err("blkdev_issue_discard() failed: %d\n",
- ret);
- goto err;
- }
-
- ptr += 16;
- size -= 16;
- }
-
-err:
- transport_kunmap_data_sg(cmd);
- if (!ret)
- target_complete_cmd(cmd, GOOD);
- return ret;
+ target_complete_cmd(cmd, GOOD);
+ return 0;
}
-/*
- * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
- * Note this is not used for TCM/pSCSI passthrough
- */
-int target_emulate_write_same(struct se_cmd *cmd)
+int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
struct se_device *dev = cmd->se_dev;
- sector_t range;
- sector_t lba = cmd->t_task_lba;
- u32 num_blocks;
- int ret;
-
- if (!dev->transport->do_discard) {
- pr_err("WRITE_SAME emulation not supported"
- " for: %s\n", dev->transport->name);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
- }
-
- if (cmd->t_task_cdb[0] == WRITE_SAME)
- num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
- else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
- num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
- else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
- num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
-
- /*
- * Use the explicit range when non zero is supplied, otherwise calculate
- * the remaining range based on ->get_blocks() - starting LBA.
- */
- if (num_blocks != 0)
- range = num_blocks;
- else
- range = (dev->transport->get_blocks(dev) - lba);
-
- pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
- (unsigned long long)lba, (unsigned long long)range);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
- ret = dev->transport->do_discard(dev, lba, range);
- if (ret < 0) {
- pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
- return ret;
- }
+ switch (cdb[0]) {
+ case MODE_SELECT:
+ *size = cdb[4];
+ break;
+ case MODE_SELECT_10:
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case MODE_SENSE:
+ *size = cdb[4];
+ cmd->execute_cmd = spc_emulate_modesense;
+ break;
+ case MODE_SENSE_10:
+ *size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = spc_emulate_modesense;
+ break;
+ case LOG_SELECT:
+ case LOG_SENSE:
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case PERSISTENT_RESERVE_IN:
+ if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+ cmd->execute_cmd = target_scsi3_emulate_pr_in;
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case PERSISTENT_RESERVE_OUT:
+ if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+ cmd->execute_cmd = target_scsi3_emulate_pr_out;
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case RELEASE:
+ case RELEASE_10:
+ if (cdb[0] == RELEASE_10)
+ *size = (cdb[7] << 8) | cdb[8];
+ else
+ *size = cmd->data_length;
+
+ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+ cmd->execute_cmd = target_scsi2_reservation_release;
+ break;
+ case RESERVE:
+ case RESERVE_10:
+ /*
+ * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+ * Assume the passthrough or $FABRIC_MOD will tell us about it.
+ */
+ if (cdb[0] == RESERVE_10)
+ *size = (cdb[7] << 8) | cdb[8];
+ else
+ *size = cmd->data_length;
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
+ /*
+ * Setup the legacy emulated handler for SPC-2 and
+ * >= SPC-3 compatible reservation handling (CRH=1)
+ * Otherwise, we assume the underlying SCSI logic is
+ * is running in SPC_PASSTHROUGH, and wants reservations
+ * emulation disabled.
+ */
+ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+ cmd->execute_cmd = target_scsi2_reservation_reserve;
+ break;
+ case REQUEST_SENSE:
+ *size = cdb[4];
+ cmd->execute_cmd = spc_emulate_request_sense;
+ break;
+ case INQUIRY:
+ *size = (cdb[3] << 8) + cdb[4];
-int target_emulate_synchronize_cache(struct se_cmd *cmd)
-{
- if (!cmd->se_dev->transport->do_sync_cache) {
- pr_err("SYNCHRONIZE_CACHE emulation not supported"
- " for: %s\n", cmd->se_dev->transport->name);
+ /*
+ * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+ * See spc4r17 section 5.3
+ */
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->execute_cmd = spc_emulate_inquiry;
+ break;
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ break;
+ case EXTENDED_COPY:
+ case READ_ATTRIBUTE:
+ case RECEIVE_COPY_RESULTS:
+ case WRITE_ATTRIBUTE:
+ *size = (cdb[10] << 24) | (cdb[11] << 16) |
+ (cdb[12] << 8) | cdb[13];
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
+ *size = (cdb[3] << 8) | cdb[4];
+ break;
+ case WRITE_BUFFER:
+ *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ break;
+ case REPORT_LUNS:
+ cmd->execute_cmd = target_report_luns;
+ *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ /*
+ * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+ * See spc4r17 section 5.3
+ */
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ break;
+ case TEST_UNIT_READY:
+ cmd->execute_cmd = spc_emulate_testunitready;
+ *size = 0;
+ break;
+ case MAINTENANCE_IN:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+ /*
+ * MAINTENANCE_IN from SCC-2
+ * Check for emulated MI_REPORT_TARGET_PGS
+ */
+ if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_cmd =
+ target_emulate_report_target_port_groups;
+ }
+ *size = get_unaligned_be32(&cdb[6]);
+ } else {
+ /*
+ * GPCMD_SEND_KEY from multi media commands
+ */
+ *size = get_unaligned_be16(&cdb[8]);
+ }
+ break;
+ case MAINTENANCE_OUT:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+ /*
+ * MAINTENANCE_OUT from SCC-2
+ * Check for emulated MO_SET_TARGET_PGS.
+ */
+ if (cdb[1] == MO_SET_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_cmd =
+ target_emulate_set_target_port_groups;
+ }
+ *size = get_unaligned_be32(&cdb[6]);
+ } else {
+ /*
+ * GPCMD_SEND_KEY from multi media commands
+ */
+ *size = get_unaligned_be16(&cdb[8]);
+ }
+ break;
+ default:
+ pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
+ " 0x%02x, sending CHECK_CONDITION.\n",
+ cmd->se_tfo->get_fabric_name(), cdb[0]);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
+ return -EINVAL;
}
- cmd->se_dev->transport->do_sync_cache(cmd);
- return 0;
-}
-
-int target_emulate_noop(struct se_cmd *cmd)
-{
- target_complete_cmd(cmd, GOOD);
return 0;
}
+EXPORT_SYMBOL(spc_parse_cdb);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 84caf1b..1c59a3c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -295,9 +295,6 @@ static void core_tmr_drain_state_list(
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
-
- if (!list_empty(&cmd->execute_list))
- __target_remove_from_execute_list(cmd);
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
@@ -354,57 +351,6 @@ static void core_tmr_drain_state_list(
}
}
-static void core_tmr_drain_cmd_list(
- struct se_device *dev,
- struct se_cmd *prout_cmd,
- struct se_node_acl *tmr_nacl,
- int tas,
- struct list_head *preempt_and_abort_list)
-{
- LIST_HEAD(drain_cmd_list);
- struct se_queue_obj *qobj = &dev->dev_queue_obj;
- struct se_cmd *cmd, *tcmd;
- unsigned long flags;
-
- /*
- * Release all commands remaining in the per-device command queue.
- *
- * This follows the same logic as above for the state list.
- */
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
- /*
- * For PREEMPT_AND_ABORT usage, only process commands
- * with a matching reservation key.
- */
- if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
- continue;
- /*
- * Not aborting PROUT PREEMPT_AND_ABORT CDB..
- */
- if (prout_cmd == cmd)
- continue;
-
- cmd->transport_state &= ~CMD_T_QUEUED;
- atomic_dec(&qobj->queue_cnt);
- list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
- }
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- while (!list_empty(&drain_cmd_list)) {
- cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
- list_del_init(&cmd->se_queue_node);
-
- pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
- " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
- "Preempt" : "", cmd, cmd->t_state,
- atomic_read(&cmd->t_fe_count));
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
- atomic_read(&cmd->t_fe_count));
- }
-}
-
int core_tmr_lun_reset(
struct se_device *dev,
struct se_tmr_req *tmr,
@@ -447,8 +393,7 @@ int core_tmr_lun_reset(
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
preempt_and_abort_list);
- core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
- preempt_and_abort_list);
+
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 8bd58e2..b8628a5 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -77,8 +77,8 @@ static void core_clear_initiator_node_from_tpg(
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
@@ -172,8 +172,8 @@ void core_tpg_add_node_to_devs(
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
- core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
- lun_access, acl, tpg, 1);
+ core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ lun_access, acl, tpg);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
@@ -306,10 +306,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
* TPG LUNs if the fabric is not explictly asking for
* tpg_check_demo_mode_login_only() == 1.
*/
- if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
- (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
- do { ; } while (0);
- else
+ if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
+ (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_irq(&tpg->acl_node_lock);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b05fdc0..0eaae23 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -66,15 +66,12 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
-static int transport_generic_write_pending(struct se_cmd *);
-static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
static int transport_generic_get_mem(struct se_cmd *cmd);
+static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
static void transport_put_cmd(struct se_cmd *cmd);
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void target_complete_ok_work(struct work_struct *work);
@@ -195,14 +192,6 @@ u32 scsi_get_new_index(scsi_index_t type)
return new_index;
}
-static void transport_init_queue_obj(struct se_queue_obj *qobj)
-{
- atomic_set(&qobj->queue_cnt, 0);
- INIT_LIST_HEAD(&qobj->qobj_list);
- init_waitqueue_head(&qobj->thread_wq);
- spin_lock_init(&qobj->cmd_queue_lock);
-}
-
void transport_subsystem_check_init(void)
{
int ret;
@@ -243,7 +232,6 @@ struct se_session *transport_init_session(void)
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
kref_init(&se_sess->sess_kref);
@@ -315,7 +303,7 @@ void transport_register_session(
}
EXPORT_SYMBOL(transport_register_session);
-static void target_release_session(struct kref *kref)
+void target_release_session(struct kref *kref)
{
struct se_session *se_sess = container_of(kref,
struct se_session, sess_kref);
@@ -332,6 +320,12 @@ EXPORT_SYMBOL(target_get_session);
void target_put_session(struct se_session *se_sess)
{
+ struct se_portal_group *tpg = se_sess->se_tpg;
+
+ if (tpg->se_tpg_tfo->put_session != NULL) {
+ tpg->se_tpg_tfo->put_session(se_sess);
+ return;
+ }
kref_put(&se_sess->sess_kref, target_release_session);
}
EXPORT_SYMBOL(target_put_session);
@@ -462,18 +456,7 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
-/* transport_cmd_check_stop():
- *
- * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
- * 'transport_off = 2' determines if task_dev_state should be removed.
- *
- * A non-zero u8 t_state sets cmd->t_state.
- * Returns 1 when command is stopped, else 0.
- */
-static int transport_cmd_check_stop(
- struct se_cmd *cmd,
- int transport_off,
- u8 t_state)
+static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
{
unsigned long flags;
@@ -487,13 +470,23 @@ static int transport_cmd_check_stop(
__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
cmd->transport_state &= ~CMD_T_ACTIVE;
- if (transport_off == 2)
+ if (remove_from_lists)
target_remove_from_state_list(cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->transport_lun_stop_comp);
return 1;
}
+
+ if (remove_from_lists) {
+ target_remove_from_state_list(cmd);
+
+ /*
+ * Clear struct se_cmd->se_lun before the handoff to FE.
+ */
+ cmd->se_lun = NULL;
+ }
+
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
@@ -503,58 +496,36 @@ static int transport_cmd_check_stop(
__func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- if (transport_off == 2)
- target_remove_from_state_list(cmd);
-
- /*
- * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
- * to FE.
- */
- if (transport_off == 2)
- cmd->se_lun = NULL;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp);
return 1;
}
- if (transport_off) {
- cmd->transport_state &= ~CMD_T_ACTIVE;
- if (transport_off == 2) {
- target_remove_from_state_list(cmd);
- /*
- * Clear struct se_cmd->se_lun before the transport_off == 2
- * handoff to fabric module.
- */
- cmd->se_lun = NULL;
- /*
- * Some fabric modules like tcm_loop can release
- * their internally allocated I/O reference now and
- * struct se_cmd now.
- *
- * Fabric modules are expected to return '1' here if the
- * se_cmd being passed is released at this point,
- * or zero if not being released.
- */
- if (cmd->se_tfo->check_stop_free != NULL) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- return cmd->se_tfo->check_stop_free(cmd);
- }
+
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ if (remove_from_lists) {
+ /*
+ * Some fabric modules like tcm_loop can release
+ * their internally allocated I/O reference now and
+ * struct se_cmd now.
+ *
+ * Fabric modules are expected to return '1' here if the
+ * se_cmd being passed is released at this point,
+ * or zero if not being released.
+ */
+ if (cmd->se_tfo->check_stop_free != NULL) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return cmd->se_tfo->check_stop_free(cmd);
}
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ }
- return 0;
- } else if (t_state)
- cmd->t_state = t_state;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
return 0;
}
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
- return transport_cmd_check_stop(cmd, 2, 0);
+ return transport_cmd_check_stop(cmd, true);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
@@ -585,79 +556,8 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- if (remove) {
- transport_remove_cmd_from_queue(cmd);
+ if (remove)
transport_put_cmd(cmd);
- }
-}
-
-static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
- bool at_head)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_queue_obj *qobj = &dev->dev_queue_obj;
- unsigned long flags;
-
- if (t_state) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->t_state = t_state;
- cmd->transport_state |= CMD_T_ACTIVE;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- }
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-
- /* If the cmd is already on the list, remove it before we add it */
- if (!list_empty(&cmd->se_queue_node))
- list_del(&cmd->se_queue_node);
- else
- atomic_inc(&qobj->queue_cnt);
-
- if (at_head)
- list_add(&cmd->se_queue_node, &qobj->qobj_list);
- else
- list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
- cmd->transport_state |= CMD_T_QUEUED;
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- wake_up_interruptible(&qobj->thread_wq);
-}
-
-static struct se_cmd *
-transport_get_cmd_from_queue(struct se_queue_obj *qobj)
-{
- struct se_cmd *cmd;
- unsigned long flags;
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (list_empty(&qobj->qobj_list)) {
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return NULL;
- }
- cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
-
- cmd->transport_state &= ~CMD_T_QUEUED;
- list_del_init(&cmd->se_queue_node);
- atomic_dec(&qobj->queue_cnt);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- return cmd;
-}
-
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
-{
- struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
- unsigned long flags;
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (!(cmd->transport_state & CMD_T_QUEUED)) {
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return;
- }
- cmd->transport_state &= ~CMD_T_QUEUED;
- atomic_dec(&qobj->queue_cnt);
- list_del_init(&cmd->se_queue_node);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
}
static void target_complete_failure_work(struct work_struct *work)
@@ -736,68 +636,11 @@ static void target_add_to_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
-static void __target_add_to_execute_list(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- bool head_of_queue = false;
-
- if (!list_empty(&cmd->execute_list))
- return;
-
- if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
- cmd->sam_task_attr == MSG_HEAD_TAG)
- head_of_queue = true;
-
- if (head_of_queue)
- list_add(&cmd->execute_list, &dev->execute_list);
- else
- list_add_tail(&cmd->execute_list, &dev->execute_list);
-
- atomic_inc(&dev->execute_tasks);
-
- if (cmd->state_active)
- return;
-
- if (head_of_queue)
- list_add(&cmd->state_list, &dev->state_list);
- else
- list_add_tail(&cmd->state_list, &dev->state_list);
-
- cmd->state_active = true;
-}
-
-static void target_add_to_execute_list(struct se_cmd *cmd)
-{
- unsigned long flags;
- struct se_device *dev = cmd->se_dev;
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- __target_add_to_execute_list(cmd);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-}
-
-void __target_remove_from_execute_list(struct se_cmd *cmd)
-{
- list_del_init(&cmd->execute_list);
- atomic_dec(&cmd->se_dev->execute_tasks);
-}
-
-static void target_remove_from_execute_list(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- unsigned long flags;
-
- if (WARN_ON(list_empty(&cmd->execute_list)))
- return;
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- __target_remove_from_execute_list(cmd);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-}
-
/*
* Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
*/
+static void transport_write_pending_qf(struct se_cmd *cmd);
+static void transport_complete_qf(struct se_cmd *cmd);
static void target_qf_do_work(struct work_struct *work)
{
@@ -821,7 +664,10 @@ static void target_qf_do_work(struct work_struct *work)
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN");
- transport_add_cmd_to_queue(cmd, cmd->t_state, true);
+ if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
+ transport_write_pending_qf(cmd);
+ else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+ transport_complete_qf(cmd);
}
}
@@ -868,8 +714,7 @@ void transport_dump_dev_state(
break;
}
- *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
- atomic_read(&dev->execute_tasks), dev->queue_depth);
+ *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
@@ -1206,7 +1051,6 @@ struct se_device *transport_add_device_to_core_hba(
return NULL;
}
- transport_init_queue_obj(&dev->dev_queue_obj);
dev->dev_flags = device_flags;
dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
dev->dev_ptr = transport_dev;
@@ -1216,7 +1060,6 @@ struct se_device *transport_add_device_to_core_hba(
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
- INIT_LIST_HEAD(&dev->execute_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
@@ -1255,17 +1098,17 @@ struct se_device *transport_add_device_to_core_hba(
* Setup the Asymmetric Logical Unit Assignment for struct se_device
*/
if (core_setup_alua(dev, force_pt) < 0)
- goto out;
+ goto err_dev_list;
/*
* Startup the struct se_device processing thread
*/
- dev->process_thread = kthread_run(transport_processing_thread, dev,
- "LIO_%s", dev->transport->name);
- if (IS_ERR(dev->process_thread)) {
- pr_err("Unable to create kthread: LIO_%s\n",
+ dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+ dev->transport->name);
+ if (!dev->tmr_wq) {
+ pr_err("Unable to create tmr workqueue for %s\n",
dev->transport->name);
- goto out;
+ goto err_dev_list;
}
/*
* Setup work_queue for QUEUE_FULL
@@ -1283,7 +1126,7 @@ struct se_device *transport_add_device_to_core_hba(
if (!inquiry_prod || !inquiry_rev) {
pr_err("All non TCM/pSCSI plugins require"
" INQUIRY consts\n");
- goto out;
+ goto err_wq;
}
strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
@@ -1293,9 +1136,10 @@ struct se_device *transport_add_device_to_core_hba(
scsi_dump_inquiry(dev);
return dev;
-out:
- kthread_stop(dev->process_thread);
+err_wq:
+ destroy_workqueue(dev->tmr_wq);
+err_dev_list:
spin_lock(&hba->device_lock);
list_del(&dev->dev_list);
hba->dev_count--;
@@ -1309,35 +1153,54 @@ out:
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);
-/* transport_generic_prepare_cdb():
- *
- * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
- * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
- * The point of this is since we are mapping iSCSI LUNs to
- * SCSI Target IDs having a non-zero LUN in the CDB will throw the
- * devices and HBAs for a loop.
- */
-static inline void transport_generic_prepare_cdb(
- unsigned char *cdb)
+int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
- switch (cdb[0]) {
- case READ_10: /* SBC - RDProtect */
- case READ_12: /* SBC - RDProtect */
- case READ_16: /* SBC - RDProtect */
- case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
- case VERIFY: /* SBC - VRProtect */
- case VERIFY_16: /* SBC - VRProtect */
- case WRITE_VERIFY: /* SBC - VRProtect */
- case WRITE_VERIFY_12: /* SBC - VRProtect */
- case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
- break;
- default:
- cdb[1] &= 0x1f; /* clear logical unit number */
- break;
+ struct se_device *dev = cmd->se_dev;
+
+ if (cmd->unknown_data_length) {
+ cmd->data_length = size;
+ } else if (size != cmd->data_length) {
+ pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+ " %u does not match SCSI CDB Length: %u for SAM Opcode:"
+ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+ cmd->data_length, size, cmd->t_task_cdb[0]);
+
+ cmd->cmd_spdtl = size;
+
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ pr_err("Rejecting underflow/overflow"
+ " WRITE data\n");
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Reject READ_* or WRITE_* with overflow/underflow for
+ * type SCF_SCSI_DATA_CDB.
+ */
+ if (dev->se_sub_dev->se_dev_attrib.block_size != 512) {
+ pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
+ " CDB on non 512-byte sector setup subsystem"
+ " plugin: %s\n", dev->transport->name);
+ /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+ goto out_invalid_cdb_field;
+ }
+
+ if (size > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = (size - cmd->data_length);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - size);
+ }
+ cmd->data_length = size;
}
-}
-static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
+ return 0;
+
+out_invalid_cdb_field:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+}
/*
* Used by fabric modules containing a local struct se_cmd within their
@@ -1355,9 +1218,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
- INIT_LIST_HEAD(&cmd->execute_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->transport_lun_fe_stop_comp);
init_completion(&cmd->transport_lun_stop_comp);
@@ -1412,9 +1273,12 @@ int target_setup_cmd_from_cdb(
struct se_cmd *cmd,
unsigned char *cdb)
{
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ u32 pr_reg_type = 0;
+ u8 alua_ascq = 0;
+ unsigned long flags;
int ret;
- transport_generic_prepare_cdb(cdb);
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@@ -1451,15 +1315,66 @@ int target_setup_cmd_from_cdb(
* Copy the original CDB into cmd->
*/
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
+
/*
- * Setup the received CDB based on SCSI defined opcodes and
- * perform unit attention, persistent reservations and ALUA
- * checks for virtual device backends. The cmd->t_task_cdb
- * pointer is expected to be setup before we reach this point.
+ * Check for an existing UNIT ATTENTION condition
*/
- ret = transport_generic_cmd_sequencer(cmd, cdb);
+ if (core_scsi3_ua_check(cmd, cdb) < 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
+ return -EINVAL;
+ }
+
+ ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
+ if (ret != 0) {
+ /*
+ * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+ * The ALUA additional sense code qualifier (ASCQ) is determined
+ * by the ALUA primary or secondary access state..
+ */
+ if (ret > 0) {
+ pr_debug("[%s]: ALUA TG Port not available, "
+ "SenseKey: NOT_READY, ASC/ASCQ: "
+ "0x04/0x%02x\n",
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+ transport_set_sense_codes(cmd, 0x04, alua_ascq);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
+ return -EINVAL;
+ }
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
+
+ /*
+ * Check status for SPC-3 Persistent Reservations
+ */
+ if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
+ if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
+ /*
+ * This means the CDB is allowed for the SCSI Initiator port
+ * when said port is *NOT* holding the legacy SPC-2 or
+ * SPC-3 Persistent Reservation.
+ */
+ }
+
+ ret = cmd->se_dev->transport->parse_cdb(cmd);
if (ret < 0)
return ret;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
/*
* Check for SAM Task Attribute Emulation
*/
@@ -1497,10 +1412,9 @@ int transport_handle_cdb_direct(
return -EINVAL;
}
/*
- * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
- * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
- * in existing usage to ensure that outstanding descriptors are handled
- * correctly during shutdown via transport_wait_for_tasks()
+ * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
+ * outstanding descriptors are handled correctly during shutdown via
+ * transport_wait_for_tasks()
*
* Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission.
@@ -1534,10 +1448,14 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
+ * Returns non zero to signal active I/O shutdown failure. All other
+ * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
+ * but still return zero here.
+ *
* This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core.
**/
-void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
@@ -1563,7 +1481,9 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
* for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ if (rc)
+ return rc;
/*
* Signal bidirectional data payloads to target-core
*/
@@ -1576,16 +1496,13 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
target_put_sess_cmd(se_sess, se_cmd);
- return;
+ return 0;
}
- /*
- * Sanitize CDBs via transport_generic_cmd_sequencer() and
- * allocate the necessary tasks to complete the received CDB+data
- */
+
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) {
transport_generic_request_failure(se_cmd);
- return;
+ return 0;
}
/*
@@ -1594,14 +1511,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
*/
core_alua_check_nonop_delay(se_cmd);
- /*
- * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
- * for immediate execution of READs, otherwise wait for
- * transport_generic_handle_data() to be called for WRITEs
- * when fabric has filled the incoming buffer.
- */
transport_handle_cdb_direct(se_cmd);
- return;
+ return 0;
}
EXPORT_SYMBOL(target_submit_cmd);
@@ -1656,7 +1567,11 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
se_cmd->se_tmr_req->ref_task_tag = tag;
/* See target_submit_cmd for commentary */
- target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ if (ret) {
+ core_tmr_release_req(se_cmd->se_tmr_req);
+ return ret;
+ }
ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
if (ret) {
@@ -1674,67 +1589,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
EXPORT_SYMBOL(target_submit_tmr);
/*
- * Used by fabric module frontends defining a TFO->new_cmd_map() caller
- * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
- * complete setup in TCM process context w/ TFO->new_cmd_map().
- */
-int transport_generic_handle_cdb_map(
- struct se_cmd *cmd)
-{
- if (!cmd->se_lun) {
- dump_stack();
- pr_err("cmd->se_lun is NULL\n");
- return -EINVAL;
- }
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_cdb_map);
-
-/* transport_generic_handle_data():
- *
- *
- */
-int transport_generic_handle_data(
- struct se_cmd *cmd)
-{
- /*
- * For the software fabric case, then we assume the nexus is being
- * failed/shutdown when signals are pending from the kthread context
- * caller, so we return a failure. For the HW target mode case running
- * in interrupt code, the signal_pending() check is skipped.
- */
- if (!in_interrupt() && signal_pending(current))
- return -EPERM;
- /*
- * If the received CDB has aleady been ABORTED by the generic
- * target engine, we now call transport_check_aborted_status()
- * to queue any delated TASK_ABORTED status for the received CDB to the
- * fabric module as we are expecting no further incoming DATA OUT
- * sequences at this point.
- */
- if (transport_check_aborted_status(cmd, 1) != 0)
- return 0;
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_data);
-
-/* transport_generic_handle_tmr():
- *
- *
- */
-int transport_generic_handle_tmr(
- struct se_cmd *cmd)
-{
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_tmr);
-
-/*
* If the cmd is active, request it to be stopped and sleep until it
* has completed.
*/
@@ -1791,6 +1645,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED:
+ case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
@@ -1826,13 +1681,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
- /*
- * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
- * make the call to transport_send_check_condition_and_sense()
- * directly. Otherwise expect the fabric to make the call to
- * transport_send_check_condition_and_sense() after handling
- * possible unsoliticied write data payloads.
- */
+
ret = transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
@@ -1850,406 +1699,123 @@ queue_full:
}
EXPORT_SYMBOL(transport_generic_request_failure);
-static inline u32 transport_lba_21(unsigned char *cdb)
-{
- return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
-}
-
-static inline u32 transport_lba_32(unsigned char *cdb)
-{
- return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
-}
-
-static inline unsigned long long transport_lba_64(unsigned char *cdb)
-{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
- __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
-}
-
-/*
- * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
- */
-static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
-{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
- __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
-}
-
-static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
-/*
- * Called from Fabric Module context from transport_execute_tasks()
- *
- * The return of this function determins if the tasks from struct se_cmd
- * get added to the execution queue in transport_execute_tasks(),
- * or are added to the delayed or ordered lists here.
- */
-static inline int transport_execute_task_attr(struct se_cmd *cmd)
-{
- if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
- return 1;
- /*
- * Check for the existence of HEAD_OF_QUEUE, and if true return 1
- * to allow the passed struct se_cmd list of tasks to the front of the list.
- */
- if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- pr_debug("Added HEAD_OF_QUEUE for CDB:"
- " 0x%02x, se_ordered_id: %u\n",
- cmd->t_task_cdb[0],
- cmd->se_ordered_id);
- return 1;
- } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- atomic_inc(&cmd->se_dev->dev_ordered_sync);
- smp_mb__after_atomic_inc();
-
- pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
- " list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0],
- cmd->se_ordered_id);
- /*
- * Add ORDERED command to tail of execution queue if
- * no other older commands exist that need to be
- * completed first.
- */
- if (!atomic_read(&cmd->se_dev->simple_cmds))
- return 1;
- } else {
- /*
- * For SIMPLE and UNTAGGED Task Attribute commands
- */
- atomic_inc(&cmd->se_dev->simple_cmds);
- smp_mb__after_atomic_inc();
- }
- /*
- * Otherwise if one or more outstanding ORDERED task attribute exist,
- * add the dormant task(s) built for the passed struct se_cmd to the
- * execution queue and become in Active state for this struct se_device.
- */
- if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
- /*
- * Otherwise, add cmd w/ tasks to delayed cmd queue that
- * will be drained upon completion of HEAD_OF_QUEUE task.
- */
- spin_lock(&cmd->se_dev->delayed_cmd_lock);
- cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
- list_add_tail(&cmd->se_delayed_node,
- &cmd->se_dev->delayed_cmd_list);
- spin_unlock(&cmd->se_dev->delayed_cmd_lock);
-
- pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
- " delayed CMD list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->sam_task_attr,
- cmd->se_ordered_id);
- /*
- * Return zero to let transport_execute_tasks() know
- * not to add the delayed tasks to the execution list.
- */
- return 0;
- }
- /*
- * Otherwise, no ORDERED task attributes exist..
- */
- return 1;
-}
-
-/*
- * Called from fabric module context in transport_generic_new_cmd() and
- * transport_generic_process_write()
- */
-static void transport_execute_tasks(struct se_cmd *cmd)
+static void __target_execute_cmd(struct se_cmd *cmd)
{
- int add_tasks;
- struct se_device *se_dev = cmd->se_dev;
- /*
- * Call transport_cmd_check_stop() to see if a fabric exception
- * has occurred that prevents execution.
- */
- if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
- /*
- * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
- * attribute for the tasks of the received struct se_cmd CDB
- */
- add_tasks = transport_execute_task_attr(cmd);
- if (add_tasks) {
- __transport_execute_tasks(se_dev, cmd);
- return;
- }
- }
- __transport_execute_tasks(se_dev, NULL);
-}
-
-static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
-{
- int error;
- struct se_cmd *cmd = NULL;
- unsigned long flags;
-
-check_depth:
- spin_lock_irq(&dev->execute_task_lock);
- if (new_cmd != NULL)
- __target_add_to_execute_list(new_cmd);
-
- if (list_empty(&dev->execute_list)) {
- spin_unlock_irq(&dev->execute_task_lock);
- return 0;
- }
- cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
- __target_remove_from_execute_list(cmd);
- spin_unlock_irq(&dev->execute_task_lock);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state |= CMD_T_BUSY;
- cmd->transport_state |= CMD_T_SENT;
+ int error = 0;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
if (cmd->execute_cmd)
error = cmd->execute_cmd(cmd);
- else {
- error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
- cmd->t_data_nents, cmd->data_direction);
- }
- if (error != 0) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state &= ~CMD_T_BUSY;
- cmd->transport_state &= ~CMD_T_SENT;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (error) {
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd);
}
-
- new_cmd = NULL;
- goto check_depth;
-
- return 0;
}
-static inline u32 transport_get_sectors_6(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
+void target_execute_cmd(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
/*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 8-bit sector value.
+ * If the received CDB has aleady been aborted stop processing it here.
*/
- if (!dev)
- goto type_disk;
-
- /*
- * Use 24-bit allocation length for TYPE_TAPE.
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE)
- return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
-
- /*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value. SBC-3 says:
- *
- * A TRANSFER LENGTH field set to zero specifies that 256
- * logical blocks shall be written. Any other value
- * specifies the number of logical blocks that shall be
- * written.
- */
-type_disk:
- return cdb[4] ? : 256;
-}
-
-static inline u32 transport_get_sectors_10(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
+ if (transport_check_aborted_status(cmd, 1))
+ return;
/*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 16-bit sector value.
+ * Determine if IOCTL context caller in requesting the stopping of this
+ * command for LUN shutdown purposes.
*/
- if (!dev)
- goto type_disk;
+ spin_lock_irq(&cmd->t_state_lock);
+ if (cmd->transport_state & CMD_T_LUN_STOP) {
+ pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
- /*
- * XXX_10 is not defined in SSC, throw an exception
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- *ret = -EINVAL;
- return 0;
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete(&cmd->transport_lun_stop_comp);
+ return;
}
-
/*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 16-bit sector value.
- */
-type_disk:
- return (u32)(cdb[7] << 8) + cdb[8];
-}
-
-static inline u32 transport_get_sectors_12(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
-
- /*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
*/
- if (!dev)
- goto type_disk;
+ if (cmd->transport_state & CMD_T_STOP) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__,
+ cmd->se_tfo->get_task_tag(cmd));
- /*
- * XXX_12 is not defined in SSC, throw an exception
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- *ret = -EINVAL;
- return 0;
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete(&cmd->t_transport_stop_comp);
+ return;
}
- /*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 32-bit sector value.
- */
-type_disk:
- return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
-}
-
-static inline u32 transport_get_sectors_16(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
-
- /*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
- */
- if (!dev)
- goto type_disk;
+ cmd->t_state = TRANSPORT_PROCESSING;
+ spin_unlock_irq(&cmd->t_state_lock);
- /*
- * Use 24-bit allocation length for TYPE_TAPE.
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE)
- return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
+ if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ goto execute;
-type_disk:
- return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
- (cdb[12] << 8) + cdb[13];
-}
-
-/*
- * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
- */
-static inline u32 transport_get_sectors_32(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
/*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
+ * Check for the existence of HEAD_OF_QUEUE, and if true return 1
+ * to allow the passed struct se_cmd list of tasks to the front of the list.
*/
- return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
- (cdb[30] << 8) + cdb[31];
-
-}
+ switch (cmd->sam_task_attr) {
+ case MSG_HEAD_TAG:
+ pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
+ "se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->se_ordered_id);
+ goto execute;
+ case MSG_ORDERED_TAG:
+ atomic_inc(&dev->dev_ordered_sync);
+ smp_mb__after_atomic_inc();
-static inline u32 transport_get_size(
- u32 sectors,
- unsigned char *cdb,
- struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
+ pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
+ " se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->se_ordered_id);
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- if (cdb[1] & 1) { /* sectors */
- return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
- } else /* bytes */
- return sectors;
+ /*
+ * Execute an ORDERED command if no other older commands
+ * exist that need to be completed first.
+ */
+ if (!atomic_read(&dev->simple_cmds))
+ goto execute;
+ break;
+ default:
+ /*
+ * For SIMPLE and UNTAGGED Task Attribute commands
+ */
+ atomic_inc(&dev->simple_cmds);
+ smp_mb__after_atomic_inc();
+ break;
}
- pr_debug("Returning block_size: %u, sectors: %u == %u for"
- " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
- sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
- dev->transport->name);
-
- return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
-}
+ if (atomic_read(&dev->dev_ordered_sync) != 0) {
+ spin_lock(&dev->delayed_cmd_lock);
+ list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+ spin_unlock(&dev->delayed_cmd_lock);
-static void transport_xor_callback(struct se_cmd *cmd)
-{
- unsigned char *buf, *addr;
- struct scatterlist *sg;
- unsigned int offset;
- int i;
- int count;
- /*
- * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
- *
- * 1) read the specified logical block(s);
- * 2) transfer logical blocks from the data-out buffer;
- * 3) XOR the logical blocks transferred from the data-out buffer with
- * the logical blocks read, storing the resulting XOR data in a buffer;
- * 4) if the DISABLE WRITE bit is set to zero, then write the logical
- * blocks transferred from the data-out buffer; and
- * 5) transfer the resulting XOR data to the data-in buffer.
- */
- buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate xor_callback buf\n");
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ " delayed CMD list, se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->se_ordered_id);
return;
}
- /*
- * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
- * into the locally allocated *buf
- */
- sg_copy_to_buffer(cmd->t_data_sg,
- cmd->t_data_nents,
- buf,
- cmd->data_length);
+execute:
/*
- * Now perform the XOR against the BIDI read memory located at
- * cmd->t_mem_bidi_list
+ * Otherwise, no ORDERED task attributes exist..
*/
-
- offset = 0;
- for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
- addr = kmap_atomic(sg_page(sg));
- if (!addr)
- goto out;
-
- for (i = 0; i < sg->length; i++)
- *(addr + sg->offset + i) ^= *(buf + offset + i);
-
- offset += sg->length;
- kunmap_atomic(addr);
- }
-
-out:
- kfree(buf);
+ __target_execute_cmd(cmd);
}
+EXPORT_SYMBOL(target_execute_cmd);
/*
* Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
@@ -2306,737 +1872,31 @@ out:
return -1;
}
-static inline long long transport_dev_end_lba(struct se_device *dev)
-{
- return dev->transport->get_blocks(dev) + 1;
-}
-
-static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- u32 sectors;
-
- if (dev->transport->get_device_type(dev) != TYPE_DISK)
- return 0;
-
- sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
-
- if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
- pr_err("LBA: %llu Sectors: %u exceeds"
- " transport_dev_end_lba(): %llu\n",
- cmd->t_task_lba, sectors,
- transport_dev_end_lba(dev));
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
-{
- /*
- * Determine if the received WRITE_SAME is used to for direct
- * passthrough into Linux/SCSI with struct request via TCM/pSCSI
- * or we are signaling the use of internal WRITE_SAME + UNMAP=1
- * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
- */
- int passthrough = (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
-
- if (!passthrough) {
- if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
- pr_err("WRITE_SAME PBDATA and LBDATA"
- " bits not supported for Block Discard"
- " Emulation\n");
- return -ENOSYS;
- }
- /*
- * Currently for the emulated case we only accept
- * tpws with the UNMAP=1 bit set.
- */
- if (!(flags[0] & 0x08)) {
- pr_err("WRITE_SAME w/o UNMAP bit not"
- " supported for Block Discard Emulation\n");
- return -ENOSYS;
- }
- }
-
- return 0;
-}
-
-/* transport_generic_cmd_sequencer():
- *
- * Generic Command Sequencer that should work for most DAS transport
- * drivers.
- *
- * Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
- * RX Thread.
- *
- * FIXME: Need to support other SCSI OPCODES where as well.
+/*
+ * Process all commands up to the last received ORDERED task attribute which
+ * requires another blocking boundary
*/
-static int transport_generic_cmd_sequencer(
- struct se_cmd *cmd,
- unsigned char *cdb)
+static void target_restart_delayed_cmds(struct se_device *dev)
{
- struct se_device *dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- int ret = 0, sector_ret = 0, passthrough;
- u32 sectors = 0, size = 0, pr_reg_type = 0;
- u16 service_action;
- u8 alua_ascq = 0;
- /*
- * Check for an existing UNIT ATTENTION condition
- */
- if (core_scsi3_ua_check(cmd, cdb) < 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
- return -EINVAL;
- }
- /*
- * Check status of Asymmetric Logical Unit Assignment port
- */
- ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
- if (ret != 0) {
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
- if (ret > 0) {
- pr_debug("[%s]: ALUA TG Port not available,"
- " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
- cmd->se_tfo->get_fabric_name(), alua_ascq);
-
- transport_set_sense_codes(cmd, 0x04, alua_ascq);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
- return -EINVAL;
- }
- goto out_invalid_cdb_field;
- }
- /*
- * Check status for SPC-3 Persistent Reservations
- */
- if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
- if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EBUSY;
- }
- /*
- * This means the CDB is allowed for the SCSI Initiator port
- * when said port is *NOT* holding the legacy SPC-2 or
- * SPC-3 Persistent Reservation.
- */
- }
-
- /*
- * If we operate in passthrough mode we skip most CDB emulation and
- * instead hand the commands down to the physical SCSI device.
- */
- passthrough =
- (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
-
- switch (cdb[0]) {
- case READ_6:
- sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_21(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_10:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_12:
- sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_64(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_6:
- sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_21(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_10:
- case WRITE_VERIFY:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_12:
- sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_64(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case XDWRITEREAD_10:
- if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->se_cmd_flags & SCF_BIDI))
- goto out_invalid_cdb_field;
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
-
- /*
- * Do now allow BIDI commands for passthrough mode.
- */
- if (passthrough)
- goto out_unsupported_cdb;
-
- /*
- * Setup BIDI XOR callback to be run after I/O completion.
- */
- cmd->transport_complete_callback = &transport_xor_callback;
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- break;
- case VARIABLE_LENGTH_CMD:
- service_action = get_unaligned_be16(&cdb[8]);
- switch (service_action) {
- case XDWRITEREAD_32:
- sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- /*
- * Use WRITE_32 and READ_32 opcodes for the emulated
- * XDWRITE_READ_32 logic.
- */
- cmd->t_task_lba = transport_lba_64_ext(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
-
- /*
- * Do now allow BIDI commands for passthrough mode.
- */
- if (passthrough)
- goto out_unsupported_cdb;
-
- /*
- * Setup BIDI XOR callback to be run during after I/O
- * completion.
- */
- cmd->transport_complete_callback = &transport_xor_callback;
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- break;
- case WRITE_SAME_32:
- sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
-
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
- " supported\n");
- goto out_invalid_cdb_field;
- }
+ for (;;) {
+ struct se_cmd *cmd;
- cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
-
- if (target_check_write_same_discard(&cdb[10], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_write_same;
- break;
- default:
- pr_err("VARIABLE_LENGTH_CMD service action"
- " 0x%04x not supported\n", service_action);
- goto out_unsupported_cdb;
- }
- break;
- case MAINTENANCE_IN:
- if (dev->transport->get_device_type(dev) != TYPE_ROM) {
- /* MAINTENANCE_IN from SCC-2 */
- /*
- * Check for emulated MI_REPORT_TARGET_PGS.
- */
- if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
- cmd->execute_cmd =
- target_emulate_report_target_port_groups;
- }
- size = (cdb[6] << 24) | (cdb[7] << 16) |
- (cdb[8] << 8) | cdb[9];
- } else {
- /* GPCMD_SEND_KEY from multi media commands */
- size = (cdb[8] << 8) + cdb[9];
- }
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SELECT:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SELECT_10:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SENSE:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_modesense;
- break;
- case MODE_SENSE_10:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_modesense;
- break;
- case GPCMD_READ_BUFFER_CAPACITY:
- case GPCMD_SEND_OPC:
- case LOG_SELECT:
- case LOG_SENSE:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_BLOCK_LIMITS:
- size = READ_BLOCK_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case GPCMD_GET_CONFIGURATION:
- case GPCMD_READ_FORMAT_CAPACITIES:
- case GPCMD_READ_DISC_INFO:
- case GPCMD_READ_TRACK_RZONE_INFO:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case PERSISTENT_RESERVE_IN:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_cmd = target_scsi3_emulate_pr_in;
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case PERSISTENT_RESERVE_OUT:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_cmd = target_scsi3_emulate_pr_out;
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case GPCMD_MECHANISM_STATUS:
- case GPCMD_READ_DVD_STRUCTURE:
- size = (cdb[8] << 8) + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_POSITION:
- size = READ_POSITION_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MAINTENANCE_OUT:
- if (dev->transport->get_device_type(dev) != TYPE_ROM) {
- /* MAINTENANCE_OUT from SCC-2
- *
- * Check for emulated MO_SET_TARGET_PGS.
- */
- if (cdb[1] == MO_SET_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
- cmd->execute_cmd =
- target_emulate_set_target_port_groups;
- }
-
- size = (cdb[6] << 24) | (cdb[7] << 16) |
- (cdb[8] << 8) | cdb[9];
- } else {
- /* GPCMD_REPORT_KEY from multi media commands */
- size = (cdb[8] << 8) + cdb[9];
- }
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case INQUIRY:
- size = (cdb[3] << 8) + cdb[4];
- /*
- * Do implict HEAD_OF_QUEUE processing for INQUIRY.
- * See spc4r17 section 5.3
- */
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_inquiry;
- break;
- case READ_BUFFER:
- size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_CAPACITY:
- size = READ_CAP_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_readcapacity;
- break;
- case READ_MEDIA_SERIAL_NUMBER:
- case SECURITY_PROTOCOL_IN:
- case SECURITY_PROTOCOL_OUT:
- size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case SERVICE_ACTION_IN:
- switch (cmd->t_task_cdb[1] & 0x1f) {
- case SAI_READ_CAPACITY_16:
- if (!passthrough)
- cmd->execute_cmd =
- target_emulate_readcapacity_16;
- break;
- default:
- if (passthrough)
- break;
-
- pr_err("Unsupported SA: 0x%02x\n",
- cmd->t_task_cdb[1] & 0x1f);
- goto out_invalid_cdb_field;
- }
- /*FALLTHROUGH*/
- case ACCESS_CONTROL_IN:
- case ACCESS_CONTROL_OUT:
- case EXTENDED_COPY:
- case READ_ATTRIBUTE:
- case RECEIVE_COPY_RESULTS:
- case WRITE_ATTRIBUTE:
- size = (cdb[10] << 24) | (cdb[11] << 16) |
- (cdb[12] << 8) | cdb[13];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case RECEIVE_DIAGNOSTIC:
- case SEND_DIAGNOSTIC:
- size = (cdb[3] << 8) | cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
-/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
-#if 0
- case GPCMD_READ_CD:
- sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- size = (2336 * sectors);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
-#endif
- case READ_TOC:
- size = cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case REQUEST_SENSE:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_request_sense;
- break;
- case READ_ELEMENT_STATUS:
- size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case WRITE_BUFFER:
- size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case RESERVE:
- case RESERVE_10:
- /*
- * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
- * Assume the passthrough or $FABRIC_MOD will tell us about it.
- */
- if (cdb[0] == RESERVE_10)
- size = (cdb[7] << 8) | cdb[8];
- else
- size = cmd->data_length;
-
- /*
- * Setup the legacy emulated handler for SPC-2 and
- * >= SPC-3 compatible reservation handling (CRH=1)
- * Otherwise, we assume the underlying SCSI logic is
- * is running in SPC_PASSTHROUGH, and wants reservations
- * emulation disabled.
- */
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_cmd = target_scsi2_reservation_reserve;
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case RELEASE:
- case RELEASE_10:
- /*
- * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
- * Assume the passthrough or $FABRIC_MOD will tell us about it.
- */
- if (cdb[0] == RELEASE_10)
- size = (cdb[7] << 8) | cdb[8];
- else
- size = cmd->data_length;
-
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_cmd = target_scsi2_reservation_release;
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case SYNCHRONIZE_CACHE:
- case SYNCHRONIZE_CACHE_16:
- /*
- * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
- */
- if (cdb[0] == SYNCHRONIZE_CACHE) {
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- cmd->t_task_lba = transport_lba_32(cdb);
- } else {
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- cmd->t_task_lba = transport_lba_64(cdb);
- }
- if (sector_ret)
- goto out_unsupported_cdb;
-
- size = transport_get_size(sectors, cdb, cmd);
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
-
- if (passthrough)
+ spin_lock(&dev->delayed_cmd_lock);
+ if (list_empty(&dev->delayed_cmd_list)) {
+ spin_unlock(&dev->delayed_cmd_lock);
break;
-
- /*
- * Check to ensure that LBA + Range does not exceed past end of
- * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
- */
- if ((cmd->t_task_lba != 0) || (sectors != 0)) {
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- goto out_invalid_cdb_field;
}
- cmd->execute_cmd = target_emulate_synchronize_cache;
- break;
- case UNMAP:
- size = get_unaligned_be16(&cdb[7]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_unmap;
- break;
- case WRITE_SAME_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
- }
-
- cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
-
- if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_write_same;
- break;
- case WRITE_SAME:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
+ cmd = list_entry(dev->delayed_cmd_list.next,
+ struct se_cmd, se_delayed_node);
+ list_del(&cmd->se_delayed_node);
+ spin_unlock(&dev->delayed_cmd_lock);
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
- }
+ __target_execute_cmd(cmd);
- cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- /*
- * Follow sbcr26 with WRITE_SAME (10) and check for the existence
- * of byte 1 bit 3 UNMAP instead of original reserved field
- */
- if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_write_same;
- break;
- case ALLOW_MEDIUM_REMOVAL:
- case ERASE:
- case REZERO_UNIT:
- case SEEK_10:
- case SPACE:
- case START_STOP:
- case TEST_UNIT_READY:
- case VERIFY:
- case WRITE_FILEMARKS:
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_noop;
- break;
- case GPCMD_CLOSE_TRACK:
- case INITIALIZE_ELEMENT_STATUS:
- case GPCMD_LOAD_UNLOAD:
- case GPCMD_SET_SPEED:
- case MOVE_MEDIUM:
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case REPORT_LUNS:
- cmd->execute_cmd = target_report_luns;
- size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- /*
- * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
- * See spc4r17 section 5.3
- */
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case GET_EVENT_STATUS_NOTIFICATION:
- size = (cdb[7] << 8) | cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case ATA_16:
- /* Only support ATA passthrough to pSCSI backends.. */
- if (!passthrough)
- goto out_unsupported_cdb;
-
- /* T_LENGTH */
- switch (cdb[2] & 0x3) {
- case 0x0:
- sectors = 0;
- break;
- case 0x1:
- sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
+ if (cmd->sam_task_attr == MSG_ORDERED_TAG)
break;
- case 0x2:
- sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
- break;
- case 0x3:
- pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
- goto out_invalid_cdb_field;
- }
-
- /* BYTE_BLOCK */
- if (cdb[2] & 0x4) {
- /* BLOCK T_TYPE: 512 or sector */
- size = sectors * ((cdb[2] & 0x10) ?
- dev->se_sub_dev->se_dev_attrib.block_size : 512);
- } else {
- /* BYTE */
- size = sectors;
- }
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- default:
- pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
- " 0x%02x, sending CHECK_CONDITION.\n",
- cmd->se_tfo->get_fabric_name(), cdb[0]);
- goto out_unsupported_cdb;
- }
-
- if (cmd->unknown_data_length)
- cmd->data_length = size;
-
- if (size != cmd->data_length) {
- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
- " %u does not match SCSI CDB Length: %u for SAM Opcode:"
- " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
- cmd->data_length, size, cdb[0]);
-
- cmd->cmd_spdtl = size;
-
- if (cmd->data_direction == DMA_TO_DEVICE) {
- pr_err("Rejecting underflow/overflow"
- " WRITE data\n");
- goto out_invalid_cdb_field;
- }
- /*
- * Reject READ_* or WRITE_* with overflow/underflow for
- * type SCF_SCSI_DATA_SG_IO_CDB.
- */
- if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
- pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
- " CDB on non 512-byte sector setup subsystem"
- " plugin: %s\n", dev->transport->name);
- /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
- goto out_invalid_cdb_field;
- }
-
- if (size > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = (size - cmd->data_length);
- } else {
- cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
- cmd->residual_count = (cmd->data_length - size);
- }
- cmd->data_length = size;
}
-
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds fabric_max_sectors:"
- " %u\n", cdb[0], sectors,
- su_dev->se_dev_attrib.fabric_max_sectors);
- goto out_invalid_cdb_field;
- }
- if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds backend hw_max_sectors:"
- " %u\n", cdb[0], sectors,
- su_dev->se_dev_attrib.hw_max_sectors);
- goto out_invalid_cdb_field;
- }
- }
-
- /* reject any command that we don't have a handler for */
- if (!(passthrough || cmd->execute_cmd ||
- (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
- goto out_unsupported_cdb;
-
- transport_set_supported_SAM_opcode(cmd);
- return ret;
-
-out_unsupported_cdb:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
-out_invalid_cdb_field:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
}
/*
@@ -3046,8 +1906,6 @@ out_invalid_cdb_field:
static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_cmd *cmd_p, *cmd_tmp;
- int new_active_tasks = 0;
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
@@ -3069,38 +1927,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
}
- /*
- * Process all commands up to the last received
- * ORDERED task attribute which requires another blocking
- * boundary
- */
- spin_lock(&dev->delayed_cmd_lock);
- list_for_each_entry_safe(cmd_p, cmd_tmp,
- &dev->delayed_cmd_list, se_delayed_node) {
-
- list_del(&cmd_p->se_delayed_node);
- spin_unlock(&dev->delayed_cmd_lock);
- pr_debug("Calling add_tasks() for"
- " cmd_p: 0x%02x Task Attr: 0x%02x"
- " Dormant -> Active, se_ordered_id: %u\n",
- cmd_p->t_task_cdb[0],
- cmd_p->sam_task_attr, cmd_p->se_ordered_id);
-
- target_add_to_execute_list(cmd_p);
- new_active_tasks++;
-
- spin_lock(&dev->delayed_cmd_lock);
- if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
- break;
- }
- spin_unlock(&dev->delayed_cmd_lock);
- /*
- * If new tasks have become active, wake up the transport thread
- * to do the processing of the Active tasks.
- */
- if (new_active_tasks != 0)
- wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
+ target_restart_delayed_cmds(dev);
}
static void transport_complete_qf(struct se_cmd *cmd)
@@ -3359,31 +2187,27 @@ int transport_generic_map_mem_to_cmd(
if (!sgl || !sgl_count)
return 0;
- if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
- /*
- * Reject SCSI data overflow with map_mem_to_cmd() as incoming
- * scatterlists already have been set to follow what the fabric
- * passes for the original expected data transfer length.
- */
- if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
- pr_warn("Rejecting SCSI DATA overflow for fabric using"
- " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
- cmd->t_data_sg = sgl;
- cmd->t_data_nents = sgl_count;
+ cmd->t_data_sg = sgl;
+ cmd->t_data_nents = sgl_count;
- if (sgl_bidi && sgl_bidi_count) {
- cmd->t_bidi_data_sg = sgl_bidi;
- cmd->t_bidi_data_nents = sgl_bidi_count;
- }
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ if (sgl_bidi && sgl_bidi_count) {
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
}
-
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
@@ -3455,7 +2279,7 @@ transport_generic_get_mem(struct se_cmd *cmd)
cmd->t_data_nents = nents;
sg_init_table(cmd->t_data_sg, nents);
- zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
+ zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE);
@@ -3486,7 +2310,6 @@ out:
*/
int transport_generic_new_cmd(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
int ret = 0;
/*
@@ -3502,8 +2325,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
}
/* Workaround for handling zero-length control CDBs */
- if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
- !cmd->data_length) {
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_COMPLETE;
cmd->transport_state |= CMD_T_ACTIVE;
@@ -3521,52 +2343,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
return 0;
}
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
-
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- return -EINVAL;
-
- BUG_ON(cmd->data_length % attr->block_size);
- BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
- attr->hw_max_sectors);
- }
-
atomic_inc(&cmd->t_fe_count);
/*
- * For WRITEs, let the fabric know its buffer is ready.
- *
- * The command will be added to the execution queue after its write
- * data has arrived.
+ * If this command is not a write we can execute it right here,
+ * for write buffers we need to notify the fabric driver first
+ * and let it call back once the write buffers are ready.
*/
- if (cmd->data_direction == DMA_TO_DEVICE) {
- target_add_to_state_list(cmd);
- return transport_generic_write_pending(cmd);
+ target_add_to_state_list(cmd);
+ if (cmd->data_direction != DMA_TO_DEVICE) {
+ target_execute_cmd(cmd);
+ return 0;
}
- /*
- * Everything else but a WRITE, add the command to the execution queue.
- */
- transport_execute_tasks(cmd);
- return 0;
+
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ transport_cmd_check_stop(cmd, false);
+
+ ret = cmd->se_tfo->write_pending(cmd);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
+
+ if (ret < 0)
+ return ret;
+ return 1;
out_fail:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
+queue_full:
+ pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+ transport_handle_queue_full(cmd, cmd->se_dev);
+ return 0;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
-/* transport_generic_process_write():
- *
- *
- */
-void transport_generic_process_write(struct se_cmd *cmd)
-{
- transport_execute_tasks(cmd);
-}
-EXPORT_SYMBOL(transport_generic_process_write);
-
static void transport_write_pending_qf(struct se_cmd *cmd)
{
int ret;
@@ -3579,43 +2394,6 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
}
}
-static int transport_generic_write_pending(struct se_cmd *cmd)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->t_state = TRANSPORT_WRITE_PENDING;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- /*
- * Clear the se_cmd for WRITE_PENDING status in order to set
- * CMD_T_ACTIVE so that transport_generic_handle_data can be called
- * from HW target mode interrupt code. This is safe to be called
- * with transport_off=1 before the cmd->se_tfo->write_pending
- * because the se_cmd->se_lun pointer is not being cleared.
- */
- transport_cmd_check_stop(cmd, 1, 0);
-
- /*
- * Call the fabric write_pending function here to let the
- * frontend know that WRITE buffers are ready.
- */
- ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- else if (ret < 0)
- return ret;
-
- return 1;
-
-queue_full:
- pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
- cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
- transport_handle_queue_full(cmd, cmd->se_dev);
- return 0;
-}
-
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -3642,10 +2420,11 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
- bool ack_kref)
+static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+ bool ack_kref)
{
unsigned long flags;
+ int ret = 0;
kref_init(&se_cmd->cmd_kref);
/*
@@ -3659,11 +2438,17 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
}
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (se_sess->sess_tearing_down) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
se_cmd->check_release = 1;
+
+out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ return ret;
}
-EXPORT_SYMBOL(target_get_sess_cmd);
static void target_release_cmd_kref(struct kref *kref)
{
@@ -3698,28 +2483,27 @@ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
}
EXPORT_SYMBOL(target_put_sess_cmd);
-/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
- * @se_sess: session to split
+/* target_sess_cmd_list_set_waiting - Flag all commands in
+ * sess_cmd_list to complete cmd_wait_comp. Set
+ * sess_tearing_down so no more commands are queued.
+ * @se_sess: session to flag
*/
-void target_splice_sess_cmd_list(struct se_session *se_sess)
+void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
struct se_cmd *se_cmd;
unsigned long flags;
- WARN_ON(!list_empty(&se_sess->sess_wait_list));
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
-
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- se_sess->sess_tearing_down = 1;
- list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+ WARN_ON(se_sess->sess_tearing_down);
+ se_sess->sess_tearing_down = 1;
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+ list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
se_cmd->cmd_wait_set = 1;
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
-EXPORT_SYMBOL(target_splice_sess_cmd_list);
+EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
/* target_wait_for_sess_cmds - Wait for outstanding descriptors
* @se_sess: session to wait for active I/O
@@ -3733,7 +2517,7 @@ void target_wait_for_sess_cmds(
bool rc = false;
list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
+ &se_sess->sess_cmd_list, se_cmd_list) {
list_del(&se_cmd->se_cmd_list);
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
@@ -3785,26 +2569,20 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_cmd_check_stop(cmd, 1, 0);
+ transport_cmd_check_stop(cmd, false);
return -EPERM;
}
cmd->transport_state |= CMD_T_LUN_FE_STOP;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
-
// XXX: audit task_flags checks.
spin_lock_irqsave(&cmd->t_state_lock, flags);
if ((cmd->transport_state & CMD_T_BUSY) &&
(cmd->transport_state & CMD_T_SENT)) {
if (!target_stop_cmd(cmd, &flags))
ret++;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- } else {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- target_remove_from_execute_list(cmd);
}
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
pr_debug("ConfigFS: cmd: %p stop tasks ret:"
" %d\n", cmd, ret);
@@ -3815,7 +2593,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
cmd->se_tfo->get_task_tag(cmd));
}
- transport_remove_cmd_from_queue(cmd);
return 0;
}
@@ -3834,11 +2611,6 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
struct se_cmd, se_lun_node);
list_del_init(&cmd->se_lun_node);
- /*
- * This will notify iscsi_target_transport.c:
- * transport_cmd_check_stop() that a LUN shutdown is in
- * progress for the iscsi_cmd_t.
- */
spin_lock(&cmd->t_state_lock);
pr_debug("SE_LUN[%d] - Setting cmd->transport"
"_lun_stop for ITT: 0x%08x\n",
@@ -3905,7 +2677,7 @@ check_cond:
spin_unlock_irqrestore(&cmd->t_state_lock,
cmd_flags);
- transport_cmd_check_stop(cmd, 1, 0);
+ transport_cmd_check_stop(cmd, false);
complete(&cmd->transport_lun_fe_stop_comp);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
@@ -3961,10 +2733,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
- /*
- * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
- * has been set in transport_set_supported_SAM_opcode().
- */
+
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -4022,8 +2791,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
-
wait_for_completion(&cmd->t_transport_stop_comp);
spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -4206,6 +2973,15 @@ int transport_send_check_condition_and_sense(
/* WRITE PROTECTED */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
break;
+ case TCM_ADDRESS_OUT_OF_RANGE:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
+ break;
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
/* CURRENT ERROR */
buffer[offset] = 0x70;
@@ -4306,8 +3082,9 @@ void transport_send_task_abort(struct se_cmd *cmd)
cmd->se_tfo->queue_status(cmd);
}
-static int transport_generic_do_tmr(struct se_cmd *cmd)
+static void target_tmr_work(struct work_struct *work)
{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret;
@@ -4343,80 +3120,13 @@ static int transport_generic_do_tmr(struct se_cmd *cmd)
cmd->se_tfo->queue_tm_rsp(cmd);
transport_cmd_check_stop_to_fabric(cmd);
- return 0;
}
-/* transport_processing_thread():
- *
- *
- */
-static int transport_processing_thread(void *param)
+int transport_generic_handle_tmr(
+ struct se_cmd *cmd)
{
- int ret;
- struct se_cmd *cmd;
- struct se_device *dev = param;
-
- while (!kthread_should_stop()) {
- ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
- atomic_read(&dev->dev_queue_obj.queue_cnt) ||
- kthread_should_stop());
- if (ret < 0)
- goto out;
-
-get_cmd:
- cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
- if (!cmd)
- continue;
-
- switch (cmd->t_state) {
- case TRANSPORT_NEW_CMD:
- BUG();
- break;
- case TRANSPORT_NEW_CMD_MAP:
- if (!cmd->se_tfo->new_cmd_map) {
- pr_err("cmd->se_tfo->new_cmd_map is"
- " NULL for TRANSPORT_NEW_CMD_MAP\n");
- BUG();
- }
- ret = cmd->se_tfo->new_cmd_map(cmd);
- if (ret < 0) {
- transport_generic_request_failure(cmd);
- break;
- }
- ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- transport_generic_request_failure(cmd);
- break;
- }
- break;
- case TRANSPORT_PROCESS_WRITE:
- transport_generic_process_write(cmd);
- break;
- case TRANSPORT_PROCESS_TMR:
- transport_generic_do_tmr(cmd);
- break;
- case TRANSPORT_COMPLETE_QF_WP:
- transport_write_pending_qf(cmd);
- break;
- case TRANSPORT_COMPLETE_QF_OK:
- transport_complete_qf(cmd);
- break;
- default:
- pr_err("Unknown t_state: %d for ITT: 0x%08x "
- "i_state: %d on SE LUN: %u\n",
- cmd->t_state,
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd),
- cmd->se_lun->unpacked_lun);
- BUG();
- }
-
- goto get_cmd;
- }
-
-out:
- WARN_ON(!list_empty(&dev->state_list));
- WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
- dev->process_thread = NULL;
+ INIT_WORK(&cmd->work, target_tmr_work);
+ queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0;
}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index f03fb97..b9cb500 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -215,7 +215,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
*/
if ((ep->xid <= lport->lro_xid) &&
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
- if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) &&
+ if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
lport->tt.ddp_target(lport, ep->xid,
se_cmd->t_data_sg,
se_cmd->t_data_nents))
@@ -230,6 +230,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+ if (cmd->aborted)
+ return ~0;
return fc_seq_exch(cmd->seq)->rxid;
}
@@ -541,9 +543,11 @@ static void ft_send_work(struct work_struct *work)
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
- target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
- &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
- ntohl(fcp->fc_dl), task_attr, data_dir, 0);
+ if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
+ &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
+ ntohl(fcp->fc_dl), task_attr, data_dir, 0))
+ goto err;
+
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
return;
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 071a505..ad36ede1 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -183,6 +183,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
return ft_queue_status(se_cmd);
}
+static void ft_execute_work(struct work_struct *work)
+{
+ struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
+
+ target_execute_cmd(&cmd->se_cmd);
+}
+
/*
* Receive write data frame.
*/
@@ -307,8 +314,10 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
cmd->write_data_len += tlen;
}
last_frame:
- if (cmd->write_data_len == se_cmd->data_length)
- transport_generic_handle_data(se_cmd);
+ if (cmd->write_data_len == se_cmd->data_length) {
+ INIT_WORK(&cmd->work, ft_execute_work);
+ queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
+ }
drop:
fc_frame_free(fp);
}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index cb99da9..87901fa 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -58,7 +58,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
struct ft_tport *tport;
int i;
- tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+ tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+ lockdep_is_held(&ft_lport_lock));
if (tport && tport->tpg)
return tport;
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 35819e3..6cc4358 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1033,7 +1033,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
- tty_lock(tty);
+ tty_lock();
tmp.line = tty->index;
tmp.port = state->port;
tmp.flags = state->tport.flags;
@@ -1042,7 +1042,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
tmp.close_delay = state->tport.close_delay;
tmp.closing_wait = state->tport.closing_wait;
tmp.custom_divisor = state->custom_divisor;
- tty_unlock(tty);
+ tty_unlock();
if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
return -EFAULT;
return 0;
@@ -1059,12 +1059,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
return -EFAULT;
- tty_lock(tty);
+ tty_lock();
change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
new_serial.custom_divisor != state->custom_divisor;
if (new_serial.irq || new_serial.port != state->port ||
new_serial.xmit_fifo_size != state->xmit_fifo_size) {
- tty_unlock(tty);
+ tty_unlock();
return -EINVAL;
}
@@ -1074,7 +1074,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
(new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
((new_serial.flags & ~ASYNC_USR_MASK) !=
(port->flags & ~ASYNC_USR_MASK))) {
- tty_unlock(tty);
+ tty_unlock();
return -EPERM;
}
port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1084,7 +1084,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
}
if (new_serial.baud_base < 9600) {
- tty_unlock(tty);
+ tty_unlock();
return -EINVAL;
}
@@ -1116,7 +1116,7 @@ check_and_exit:
}
} else
retval = startup(tty, state);
- tty_unlock(tty);
+ tty_unlock();
return retval;
}
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 6984e1a..e61cabd 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1599,7 +1599,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
* If the port is the middle of closing, bail out now
*/
if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
- wait_event_interruptible_tty(tty, info->port.close_wait,
+ wait_event_interruptible_tty(info->port.close_wait,
!(info->port.flags & ASYNC_CLOSING));
return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
}
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index ced26c8..0d2ea0c 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -401,7 +401,7 @@ out:
}
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
-void __init udbg_init_debug_opal(void)
+void __init udbg_init_debug_opal_raw(void)
{
u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
hvc_opal_privs[index] = &hvc_opal_boot_priv;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index d3d91da..1e456dc 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -209,29 +209,28 @@ static int xen_hvm_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO);
if (!info)
return -ENOMEM;
- }
-
- /* already configured */
- if (info->intf != NULL)
+ } else if (info->intf != NULL) {
+ /* already configured */
return 0;
-
- r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
- if (r < 0) {
- kfree(info);
- return -ENODEV;
}
+ /*
+ * If the toolstack (or the hypervisor) hasn't set these values, the
+ * default value is 0. Even though mfn = 0 and evtchn = 0 are
+ * theoretically correct values, in practice they never are and they
+ * mean that a legacy toolstack hasn't initialized the pv console correctly.
+ */
+ r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
+ if (r < 0 || v == 0)
+ goto err;
info->evtchn = v;
- hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
- if (r < 0) {
- kfree(info);
- return -ENODEV;
- }
+ v = 0;
+ r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
+ if (r < 0 || v == 0)
+ goto err;
mfn = v;
info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
- if (info->intf == NULL) {
- kfree(info);
- return -ENODEV;
- }
+ if (info->intf == NULL)
+ goto err;
info->vtermno = HVC_COOKIE;
spin_lock(&xencons_lock);
@@ -239,6 +238,9 @@ static int xen_hvm_console_init(void)
spin_unlock(&xencons_lock);
return 0;
+err:
+ kfree(info);
+ return -ENODEV;
}
static int xen_pv_console_init(void)
@@ -256,12 +258,10 @@ static int xen_pv_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO);
if (!info)
return -ENOMEM;
- }
-
- /* already configured */
- if (info->intf != NULL)
+ } else if (info->intf != NULL) {
+ /* already configured */
return 0;
-
+ }
info->evtchn = xen_start_info->console.domU.evtchn;
info->intf = mfn_to_virt(xen_start_info->console.domU.mfn);
info->vtermno = HVC_COOKIE;
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 656ad93..5c6c314 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1065,8 +1065,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
TRACE_L("read()");
- /* FIXME: should use a private lock */
- tty_lock(tty);
+ tty_lock();
pClient = findClient(pInfo, task_pid(current));
if (pClient) {
@@ -1078,7 +1077,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
goto unlock;
}
/* block until there is a message: */
- wait_event_interruptible_tty(tty, pInfo->read_wait,
+ wait_event_interruptible_tty(pInfo->read_wait,
(pMsg = remove_msg(pInfo, pClient)));
}
@@ -1108,7 +1107,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
}
ret = -EPERM;
unlock:
- tty_unlock(tty);
+ tty_unlock();
return ret;
}
@@ -1157,7 +1156,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
pHeader->locks = 0;
pHeader->owner = NULL;
- tty_lock(tty);
+ tty_lock();
pClient = findClient(pInfo, task_pid(current));
if (pClient) {
@@ -1176,7 +1175,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
add_tx_queue(pInfo, pHeader);
trigger_transmit(pInfo);
- tty_unlock(tty);
+ tty_unlock();
return 0;
}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 65c7c62..5505ffc 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -47,7 +47,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
wake_up_interruptible(&tty->read_wait);
wake_up_interruptible(&tty->write_wait);
tty->packet = 0;
- /* Review - krefs on tty_link ?? */
if (!tty->link)
return;
tty->link->packet = 0;
@@ -63,9 +62,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
mutex_unlock(&devpts_mutex);
}
#endif
- tty_unlock(tty);
+ tty_unlock();
tty_vhangup(tty->link);
- tty_lock(tty);
+ tty_lock();
}
}
@@ -623,27 +622,26 @@ static int ptmx_open(struct inode *inode, struct file *filp)
return retval;
/* find a device that is not in use. */
- mutex_lock(&devpts_mutex);
+ tty_lock();
index = devpts_new_index(inode);
+ tty_unlock();
if (index < 0) {
retval = index;
goto err_file;
}
- mutex_unlock(&devpts_mutex);
-
mutex_lock(&tty_mutex);
+ mutex_lock(&devpts_mutex);
tty = tty_init_dev(ptm_driver, index);
+ mutex_unlock(&devpts_mutex);
+ tty_lock();
+ mutex_unlock(&tty_mutex);
if (IS_ERR(tty)) {
retval = PTR_ERR(tty);
goto out;
}
- /* The tty returned here is locked so we can safely
- drop the mutex */
- mutex_unlock(&tty_mutex);
-
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
tty_add_file(tty, filp);
@@ -656,17 +654,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
if (retval)
goto err_release;
- tty_unlock(tty);
+ tty_unlock();
return 0;
err_release:
- tty_unlock(tty);
+ tty_unlock();
tty_release(inode, filp);
return retval;
out:
- mutex_unlock(&tty_mutex);
devpts_kill_index(inode, index);
+ tty_unlock();
err_file:
- mutex_unlock(&devpts_mutex);
tty_free_file(filp);
return retval;
}
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 47d061b..6e1958a 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
/**
* serial8250_register_8250_port - register a serial port
- * @port: serial port template
+ * @up: serial port template
*
* Configure the serial port specified by the request. If the
* port exists and is in use, it is hung up and unregistered
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4ad721f..c17923e 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -133,6 +133,10 @@ struct pl011_dmatx_data {
struct uart_amba_port {
struct uart_port port;
struct clk *clk;
+ /* Two optional pin states - default & sleep */
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_sleep;
const struct vendor_data *vendor;
unsigned int dmacr; /* dma control reg */
unsigned int im; /* interrupt mask */
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port)
unsigned int cr;
int retval;
+ /* Optionaly enable pins to be muxed in and configured */
+ if (!IS_ERR(uap->pins_default)) {
+ retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+ if (retval)
+ dev_err(port->dev,
+ "could not set default pins\n");
+ }
+
retval = clk_prepare(uap->clk);
if (retval)
goto out;
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int cr;
+ int retval;
/*
* disable all interrupts
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port)
*/
clk_disable(uap->clk);
clk_unprepare(uap->clk);
+ /* Optionally let pins go into sleep states */
+ if (!IS_ERR(uap->pins_sleep)) {
+ retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
+ if (retval)
+ dev_err(port->dev,
+ "could not set pins to sleep state\n");
+ }
+
if (uap->port.dev->platform_data) {
struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (!uap)
return -ENODEV;
+ /* Allow pins to be muxed in and configured */
+ if (!IS_ERR(uap->pins_default)) {
+ ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+ if (ret)
+ dev_err(uap->port.dev,
+ "could not set default pins\n");
+ }
+
ret = clk_prepare(uap->clk);
if (ret)
return ret;
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
- struct pinctrl *pinctrl;
void __iomem *base;
int i, ret;
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
goto free;
}
- pinctrl = devm_pinctrl_get_select_default(&dev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
+ uap->pinctrl = devm_pinctrl_get(&dev->dev);
+ if (IS_ERR(uap->pinctrl)) {
+ ret = PTR_ERR(uap->pinctrl);
goto unmap;
}
+ uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
+ PINCTRL_STATE_DEFAULT);
+ if (IS_ERR(uap->pins_default))
+ dev_err(&dev->dev, "could not get default pinstate\n");
+
+ uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
+ PINCTRL_STATE_SLEEP);
+ if (IS_ERR(uap->pins_sleep))
+ dev_dbg(&dev->dev, "could not get sleep pinstate\n");
uap->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk)) {
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 7264d4d..80b6b1b 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3976,7 +3976,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
- wait_event_interruptible_tty(tty, info->close_wait,
+ wait_event_interruptible_tty(info->close_wait,
!(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
if (info->flags & ASYNC_HUP_NOTIFY)
@@ -4052,9 +4052,9 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
printk("block_til_ready blocking: ttyS%d, count = %d\n",
info->line, info->count);
#endif
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&info->open_wait, &wait);
@@ -4115,7 +4115,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
*/
if (tty_hung_up_p(filp) ||
(info->flags & ASYNC_CLOSING)) {
- wait_event_interruptible_tty(tty, info->close_wait,
+ wait_event_interruptible_tty(info->close_wait,
!(info->flags & ASYNC_CLOSING));
#ifdef SERIAL_DO_RESTART
return ((info->flags & ASYNC_HUP_NOTIFY) ?
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 4ef7473..d5c689d6 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -169,7 +169,6 @@
#define SERIAL_IMX_MAJOR 207
#define MINOR_START 16
#define DEV_NAME "ttymxc"
-#define MAX_INTERNAL_IRQ MXC_INTERNAL_IRQS
/*
* This determines how often we check the modem status signals
@@ -741,10 +740,7 @@ static int imx_startup(struct uart_port *port)
/* do not use RTS IRQ on IrDA */
if (!USE_IRDA(sport)) {
- retval = request_irq(sport->rtsirq, imx_rtsint,
- (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
- IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING,
+ retval = request_irq(sport->rtsirq, imx_rtsint, 0,
DRIVER_NAME, sport);
if (retval)
goto error_out3;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 96c1cac..02da071 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -31,16 +31,19 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <lantiq_soc.h>
#define PORT_LTQ_ASC 111
#define MAXPORTS 2
#define UART_DUMMY_UER_RX 1
-#define DRVNAME "ltq_asc"
+#define DRVNAME "lantiq,asc"
#ifdef __BIG_ENDIAN
#define LTQ_ASC_TBUF (0x0020 + 3)
#define LTQ_ASC_RBUF (0x0024 + 3)
@@ -114,6 +117,9 @@ static DEFINE_SPINLOCK(ltq_asc_lock);
struct ltq_uart_port {
struct uart_port port;
+ /* clock used to derive divider */
+ struct clk *fpiclk;
+ /* clock gating of the ASC core */
struct clk *clk;
unsigned int tx_irq;
unsigned int rx_irq;
@@ -316,7 +322,9 @@ lqasc_startup(struct uart_port *port)
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
int retval;
- port->uartclk = clk_get_rate(ltq_port->clk);
+ if (ltq_port->clk)
+ clk_enable(ltq_port->clk);
+ port->uartclk = clk_get_rate(ltq_port->fpiclk);
ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
port->membase + LTQ_ASC_CLC);
@@ -382,6 +390,8 @@ lqasc_shutdown(struct uart_port *port)
port->membase + LTQ_ASC_RXFCON);
ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
port->membase + LTQ_ASC_TXFCON);
+ if (ltq_port->clk)
+ clk_disable(ltq_port->clk);
}
static void
@@ -630,7 +640,7 @@ lqasc_console_setup(struct console *co, char *options)
port = &ltq_port->port;
- port->uartclk = clk_get_rate(ltq_port->clk);
+ port->uartclk = clk_get_rate(ltq_port->fpiclk);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -668,37 +678,32 @@ static struct uart_driver lqasc_reg = {
static int __init
lqasc_probe(struct platform_device *pdev)
{
+ struct device_node *node = pdev->dev.of_node;
struct ltq_uart_port *ltq_port;
struct uart_port *port;
- struct resource *mmres, *irqres;
- int tx_irq, rx_irq, err_irq;
- struct clk *clk;
+ struct resource *mmres, irqres[3];
+ int line = 0;
int ret;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mmres || !irqres)
+ ret = of_irq_to_resource_table(node, irqres, 3);
+ if (!mmres || (ret != 3)) {
+ dev_err(&pdev->dev,
+ "failed to get memory/irq for serial port\n");
return -ENODEV;
+ }
- if (pdev->id >= MAXPORTS)
- return -EBUSY;
+ /* check if this is the console port */
+ if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC))
+ line = 1;
- if (lqasc_port[pdev->id] != NULL)
+ if (lqasc_port[line]) {
+ dev_err(&pdev->dev, "port %d already allocated\n", line);
return -EBUSY;
-
- clk = clk_get(&pdev->dev, "fpi");
- if (IS_ERR(clk)) {
- pr_err("failed to get fpi clk\n");
- return -ENOENT;
}
- tx_irq = platform_get_irq_byname(pdev, "tx");
- rx_irq = platform_get_irq_byname(pdev, "rx");
- err_irq = platform_get_irq_byname(pdev, "err");
- if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
- return -ENODEV;
-
- ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
+ ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
+ GFP_KERNEL);
if (!ltq_port)
return -ENOMEM;
@@ -709,19 +714,26 @@ lqasc_probe(struct platform_device *pdev)
port->ops = &lqasc_pops;
port->fifosize = 16;
port->type = PORT_LTQ_ASC,
- port->line = pdev->id;
+ port->line = line;
port->dev = &pdev->dev;
-
- port->irq = tx_irq; /* unused, just to be backward-compatibe */
+ /* unused, just to be backward-compatible */
+ port->irq = irqres[0].start;
port->mapbase = mmres->start;
- ltq_port->clk = clk;
+ ltq_port->fpiclk = clk_get_fpi();
+ if (IS_ERR(ltq_port->fpiclk)) {
+ pr_err("failed to get fpi clk\n");
+ return -ENOENT;
+ }
- ltq_port->tx_irq = tx_irq;
- ltq_port->rx_irq = rx_irq;
- ltq_port->err_irq = err_irq;
+ /* not all asc ports have clock gates, lets ignore the return code */
+ ltq_port->clk = clk_get(&pdev->dev, NULL);
- lqasc_port[pdev->id] = ltq_port;
+ ltq_port->tx_irq = irqres[0].start;
+ ltq_port->rx_irq = irqres[1].start;
+ ltq_port->err_irq = irqres[2].start;
+
+ lqasc_port[line] = ltq_port;
platform_set_drvdata(pdev, ltq_port);
ret = uart_add_one_port(&lqasc_reg, port);
@@ -729,10 +741,17 @@ lqasc_probe(struct platform_device *pdev)
return ret;
}
+static const struct of_device_id ltq_asc_match[] = {
+ { .compatible = DRVNAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_asc_match);
+
static struct platform_driver lqasc_driver = {
.driver = {
.name = DRVNAME,
.owner = THIS_MODULE,
+ .of_match_table = ltq_asc_match,
},
};
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index ec56d83..2e341b8 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -33,6 +33,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/of_device.h>
#include <asm/cacheflush.h>
@@ -675,6 +676,30 @@ static struct uart_driver auart_driver = {
#endif
};
+/*
+ * This function returns 1 if pdev isn't a device instatiated by dt, 0 if it
+ * could successfully get all information from dt or a negative errno.
+ */
+static int serial_mxs_probe_dt(struct mxs_auart_port *s,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (!np)
+ /* no device tree device */
+ return 1;
+
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id: %d\n", ret);
+ return ret;
+ }
+ s->port.line = ret;
+
+ return 0;
+}
+
static int __devinit mxs_auart_probe(struct platform_device *pdev)
{
struct mxs_auart_port *s;
@@ -689,6 +714,12 @@ static int __devinit mxs_auart_probe(struct platform_device *pdev)
goto out;
}
+ ret = serial_mxs_probe_dt(s, pdev);
+ if (ret > 0)
+ s->port.line = pdev->id < 0 ? 0 : pdev->id;
+ else if (ret < 0)
+ goto out_free;
+
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
@@ -711,7 +742,6 @@ static int __devinit mxs_auart_probe(struct platform_device *pdev)
s->port.membase = ioremap(r->start, resource_size(r));
s->port.ops = &mxs_auart_ops;
s->port.iotype = UPIO_MEM;
- s->port.line = pdev->id < 0 ? 0 : pdev->id;
s->port.fifosize = 16;
s->port.uartclk = clk_get_rate(s->clk);
s->port.type = PORT_IMX;
@@ -728,7 +758,7 @@ static int __devinit mxs_auart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, s);
- auart_port[pdev->id] = s;
+ auart_port[s->port.line] = s;
mxs_auart_reset(&s->port);
@@ -769,12 +799,19 @@ static int __devexit mxs_auart_remove(struct platform_device *pdev)
return 0;
}
+static struct of_device_id mxs_auart_dt_ids[] = {
+ { .compatible = "fsl,imx23-auart", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_auart_dt_ids);
+
static struct platform_driver mxs_auart_driver = {
.probe = mxs_auart_probe,
.remove = __devexit_p(mxs_auart_remove),
.driver = {
.name = "mxs-auart",
.owner = THIS_MODULE,
+ .of_match_table = mxs_auart_dt_ids,
},
};
@@ -807,3 +844,4 @@ module_init(mxs_auart_init);
module_exit(mxs_auart_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Freescale MXS application uart driver");
+MODULE_ALIAS("platform:mxs-auart");
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 0be8a2f..f76b1688 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 34bd345..6ae2a58 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&up->port.lock, flags);
}
-#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL)
+#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/*
* Wait for transmitter & holding register to empty
*/
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4604153..1bd9163 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev,
return 0;
}
+static void sci_cleanup_single(struct sci_port *port)
+{
+ sci_free_gpios(port);
+
+ clk_put(port->iclk);
+ clk_put(port->fclk);
+
+ pm_runtime_disable(port->port.dev);
+}
+
#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
static void serial_console_putchar(struct uart_port *port, int ch)
{
@@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev)
cpufreq_unregister_notifier(&port->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
- sci_free_gpios(port);
-
uart_remove_one_port(&sci_uart_driver, &port->port);
- clk_put(port->iclk);
- clk_put(port->fclk);
+ sci_cleanup_single(port);
- pm_runtime_disable(&dev->dev);
return 0;
}
@@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev,
index+1, SCI_NPORTS);
dev_notice(&dev->dev, "Consider bumping "
"CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
- return 0;
+ return -EINVAL;
}
ret = sci_init_single(dev, sciport, index, p);
if (ret)
return ret;
- return uart_add_one_port(&sci_uart_driver, &sciport->port);
+ ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
+ if (ret) {
+ sci_cleanup_single(sciport);
+ return ret;
+ }
+
+ return 0;
}
static int __devinit sci_probe(struct platform_device *dev)
@@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev)
ret = sci_probe_single(dev, dev->id, p, sp);
if (ret)
- goto err_unreg;
+ return ret;
sp->freq_transition.notifier_call = sci_notifier;
ret = cpufreq_register_notifier(&sp->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
- if (unlikely(ret < 0))
- goto err_unreg;
+ if (unlikely(ret < 0)) {
+ sci_cleanup_single(sp);
+ return ret;
+ }
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_gdb_detach();
#endif
return 0;
-
-err_unreg:
- sci_remove(dev);
- return ret;
}
static int sci_suspend(struct device *dev)
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 4001eee..92c00b2 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -57,6 +57,7 @@
#include <linux/ioport.h>
#include <linux/irqflags.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/major.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 5ed0daa..593d40a 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -3338,9 +3338,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
printk("%s(%d):block_til_ready blocking on %s count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 45b43f1..aa1debf 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -3336,9 +3336,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
}
DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 4a1e4f0..a3dddc1 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3357,9 +3357,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
printk("%s(%d):%s block_til_ready() count=%d\n",
__FILE__,__LINE__, tty->driver->name, port->count );
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9e930c0..b425c79 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -185,7 +185,6 @@ void free_tty_struct(struct tty_struct *tty)
put_device(tty->dev);
kfree(tty->write_buf);
tty_buffer_free_all(tty);
- tty->magic = 0xDEADDEAD;
kfree(tty);
}
@@ -574,7 +573,7 @@ void __tty_hangup(struct tty_struct *tty)
}
spin_unlock(&redirect_lock);
- tty_lock(tty);
+ tty_lock();
/* some functions below drop BTM, so we need this bit */
set_bit(TTY_HUPPING, &tty->flags);
@@ -667,7 +666,7 @@ void __tty_hangup(struct tty_struct *tty)
clear_bit(TTY_HUPPING, &tty->flags);
tty_ldisc_enable(tty);
- tty_unlock(tty);
+ tty_unlock();
if (f)
fput(f);
@@ -1104,12 +1103,12 @@ void tty_write_message(struct tty_struct *tty, char *msg)
{
if (tty) {
mutex_lock(&tty->atomic_write_lock);
- tty_lock(tty);
+ tty_lock();
if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
- tty_unlock(tty);
+ tty_unlock();
tty->ops->write(tty, msg, strlen(msg));
} else
- tty_unlock(tty);
+ tty_unlock();
tty_write_unlock(tty);
}
return;
@@ -1404,7 +1403,6 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
}
initialize_tty_struct(tty, driver, idx);
- tty_lock(tty);
retval = tty_driver_install_tty(driver, tty);
if (retval < 0)
goto err_deinit_tty;
@@ -1417,11 +1415,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
retval = tty_ldisc_setup(tty, tty->link);
if (retval)
goto err_release_tty;
- /* Return the tty locked so that it cannot vanish under the caller */
return tty;
err_deinit_tty:
- tty_unlock(tty);
deinitialize_tty_struct(tty);
free_tty_struct(tty);
err_module_put:
@@ -1430,7 +1426,6 @@ err_module_put:
/* call the tty release_tty routine to clean out this slot */
err_release_tty:
- tty_unlock(tty);
printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, "
"clearing slot %d\n", idx);
release_tty(tty, idx);
@@ -1633,7 +1628,7 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty_paranoia_check(tty, inode, __func__))
return 0;
- tty_lock(tty);
+ tty_lock();
check_tty_count(tty, __func__);
__tty_fasync(-1, filp, 0);
@@ -1642,11 +1637,10 @@ int tty_release(struct inode *inode, struct file *filp)
pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER);
devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
- /* Review: parallel close */
o_tty = tty->link;
if (tty_release_checks(tty, o_tty, idx)) {
- tty_unlock(tty);
+ tty_unlock();
return 0;
}
@@ -1658,7 +1652,7 @@ int tty_release(struct inode *inode, struct file *filp)
if (tty->ops->close)
tty->ops->close(tty, filp);
- tty_unlock(tty);
+ tty_unlock();
/*
* Sanity check: if tty->count is going to zero, there shouldn't be
* any waiters on tty->read_wait or tty->write_wait. We test the
@@ -1681,7 +1675,7 @@ int tty_release(struct inode *inode, struct file *filp)
opens on /dev/tty */
mutex_lock(&tty_mutex);
- tty_lock_pair(tty, o_tty);
+ tty_lock();
tty_closing = tty->count <= 1;
o_tty_closing = o_tty &&
(o_tty->count <= (pty_master ? 1 : 0));
@@ -1712,7 +1706,7 @@ int tty_release(struct inode *inode, struct file *filp)
printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
__func__, tty_name(tty, buf));
- tty_unlock_pair(tty, o_tty);
+ tty_unlock();
mutex_unlock(&tty_mutex);
schedule();
}
@@ -1775,7 +1769,7 @@ int tty_release(struct inode *inode, struct file *filp)
/* check whether both sides are closing ... */
if (!tty_closing || (o_tty && !o_tty_closing)) {
- tty_unlock_pair(tty, o_tty);
+ tty_unlock();
return 0;
}
@@ -1788,16 +1782,14 @@ int tty_release(struct inode *inode, struct file *filp)
tty_ldisc_release(tty, o_tty);
/*
* The release_tty function takes care of the details of clearing
- * the slots and preserving the termios structure. The tty_unlock_pair
- * should be safe as we keep a kref while the tty is locked (so the
- * unlock never unlocks a freed tty).
+ * the slots and preserving the termios structure.
*/
release_tty(tty, idx);
- tty_unlock_pair(tty, o_tty);
/* Make this pty number available for reallocation */
if (devpts)
devpts_kill_index(inode, idx);
+ tty_unlock();
return 0;
}
@@ -1901,9 +1893,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
* Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
* tty->count should protect the rest.
* ->siglock protects ->signal/->sighand
- *
- * Note: the tty_unlock/lock cases without a ref are only safe due to
- * tty_mutex
*/
static int tty_open(struct inode *inode, struct file *filp)
@@ -1927,7 +1916,8 @@ retry_open:
retval = 0;
mutex_lock(&tty_mutex);
- /* This is protected by the tty_mutex */
+ tty_lock();
+
tty = tty_open_current_tty(device, filp);
if (IS_ERR(tty)) {
retval = PTR_ERR(tty);
@@ -1948,19 +1938,17 @@ retry_open:
}
if (tty) {
- tty_lock(tty);
retval = tty_reopen(tty);
- if (retval < 0) {
- tty_unlock(tty);
+ if (retval)
tty = ERR_PTR(retval);
- }
- } else /* Returns with the tty_lock held for now */
+ } else
tty = tty_init_dev(driver, index);
mutex_unlock(&tty_mutex);
if (driver)
tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
+ tty_unlock();
retval = PTR_ERR(tty);
goto err_file;
}
@@ -1989,7 +1977,7 @@ retry_open:
printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
retval, tty->name);
#endif
- tty_unlock(tty); /* need to call tty_release without BTM */
+ tty_unlock(); /* need to call tty_release without BTM */
tty_release(inode, filp);
if (retval != -ERESTARTSYS)
return retval;
@@ -2001,15 +1989,17 @@ retry_open:
/*
* Need to reset f_op in case a hangup happened.
*/
+ tty_lock();
if (filp->f_op == &hung_up_tty_fops)
filp->f_op = &tty_fops;
+ tty_unlock();
goto retry_open;
}
- tty_unlock(tty);
+ tty_unlock();
mutex_lock(&tty_mutex);
- tty_lock(tty);
+ tty_lock();
spin_lock_irq(&current->sighand->siglock);
if (!noctty &&
current->signal->leader &&
@@ -2017,10 +2007,11 @@ retry_open:
tty->session == NULL)
__proc_set_tty(current, tty);
spin_unlock_irq(&current->sighand->siglock);
- tty_unlock(tty);
+ tty_unlock();
mutex_unlock(&tty_mutex);
return 0;
err_unlock:
+ tty_unlock();
mutex_unlock(&tty_mutex);
/* after locks to avoid deadlock */
if (!IS_ERR_OR_NULL(driver))
@@ -2103,13 +2094,10 @@ out:
static int tty_fasync(int fd, struct file *filp, int on)
{
- struct tty_struct *tty = file_tty(filp);
int retval;
-
- tty_lock(tty);
+ tty_lock();
retval = __tty_fasync(fd, filp, on);
- tty_unlock(tty);
-
+ tty_unlock();
return retval;
}
@@ -2946,7 +2934,6 @@ void initialize_tty_struct(struct tty_struct *tty,
tty->pgrp = NULL;
tty->overrun_time = jiffies;
tty_buffer_init(tty);
- mutex_init(&tty->legacy_mutex);
mutex_init(&tty->termios_mutex);
mutex_init(&tty->ldisc_mutex);
init_waitqueue_head(&tty->write_wait);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index ba8be39..9911eb6 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -568,7 +568,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
- tty_lock(tty);
+ tty_lock();
/*
* We need to look at the tty locking here for pty/tty pairs
* when both sides try to change in parallel.
@@ -582,12 +582,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
*/
if (tty->ldisc->ops->num == ldisc) {
- tty_unlock(tty);
+ tty_unlock();
tty_ldisc_put(new_ldisc);
return 0;
}
- tty_unlock(tty);
+ tty_unlock();
/*
* Problem: What do we do if this blocks ?
* We could deadlock here
@@ -595,7 +595,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
tty_wait_until_sent(tty, 0);
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/*
@@ -605,10 +605,10 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
mutex_unlock(&tty->ldisc_mutex);
- tty_unlock(tty);
+ tty_unlock();
wait_event(tty_ldisc_wait,
test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
}
@@ -623,7 +623,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
o_ldisc = tty->ldisc;
- tty_unlock(tty);
+ tty_unlock();
/*
* Make sure we don't change while someone holds a
* reference to the line discipline. The TTY_LDISC bit
@@ -650,7 +650,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
retval = tty_ldisc_wait_idle(tty, 5 * HZ);
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/* handle wait idle failure locked */
@@ -665,7 +665,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
clear_bit(TTY_LDISC_CHANGING, &tty->flags);
mutex_unlock(&tty->ldisc_mutex);
tty_ldisc_put(new_ldisc);
- tty_unlock(tty);
+ tty_unlock();
return -EIO;
}
@@ -708,7 +708,7 @@ enable:
if (o_work)
schedule_work(&o_tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
- tty_unlock(tty);
+ tty_unlock();
return retval;
}
@@ -816,11 +816,11 @@ void tty_ldisc_hangup(struct tty_struct *tty)
* need to wait for another function taking the BTM
*/
clear_bit(TTY_LDISC, &tty->flags);
- tty_unlock(tty);
+ tty_unlock();
cancel_work_sync(&tty->buf.work);
mutex_unlock(&tty->ldisc_mutex);
retry:
- tty_lock(tty);
+ tty_lock();
mutex_lock(&tty->ldisc_mutex);
/* At this point we have a closed ldisc and we want to
@@ -831,7 +831,7 @@ retry:
if (atomic_read(&tty->ldisc->users) != 1) {
char cur_n[TASK_COMM_LEN], tty_n[64];
long timeout = 3 * HZ;
- tty_unlock(tty);
+ tty_unlock();
while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
timeout = MAX_SCHEDULE_TIMEOUT;
@@ -894,23 +894,6 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_enable(tty);
return 0;
}
-
-static void tty_ldisc_kill(struct tty_struct *tty)
-{
- mutex_lock(&tty->ldisc_mutex);
- /*
- * Now kill off the ldisc
- */
- tty_ldisc_close(tty, tty->ldisc);
- tty_ldisc_put(tty->ldisc);
- /* Force an oops if we mess this up */
- tty->ldisc = NULL;
-
- /* Ensure the next open requests the N_TTY ldisc */
- tty_set_termios_ldisc(tty, N_TTY);
- mutex_unlock(&tty->ldisc_mutex);
-}
-
/**
* tty_ldisc_release - release line discipline
* @tty: tty being shut down
@@ -929,19 +912,27 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
* race with the set_ldisc code path.
*/
- tty_unlock_pair(tty, o_tty);
+ tty_unlock();
tty_ldisc_halt(tty);
tty_ldisc_flush_works(tty);
- if (o_tty) {
- tty_ldisc_halt(o_tty);
- tty_ldisc_flush_works(o_tty);
- }
- tty_lock_pair(tty, o_tty);
+ tty_lock();
+ mutex_lock(&tty->ldisc_mutex);
+ /*
+ * Now kill off the ldisc
+ */
+ tty_ldisc_close(tty, tty->ldisc);
+ tty_ldisc_put(tty->ldisc);
+ /* Force an oops if we mess this up */
+ tty->ldisc = NULL;
+
+ /* Ensure the next open requests the N_TTY ldisc */
+ tty_set_termios_ldisc(tty, N_TTY);
+ mutex_unlock(&tty->ldisc_mutex);
- tty_ldisc_kill(tty);
+ /* This will need doing differently if we need to lock */
if (o_tty)
- tty_ldisc_kill(o_tty);
+ tty_ldisc_release(o_tty, NULL);
/* And the memory resources remaining (buffers, termios) will be
disposed of when the kref hits zero */
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 69adc80..9ff986c 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -4,59 +4,29 @@
#include <linux/semaphore.h>
#include <linux/sched.h>
-/* Legacy tty mutex glue */
+/*
+ * The 'big tty mutex'
+ *
+ * This mutex is taken and released by tty_lock() and tty_unlock(),
+ * replacing the older big kernel lock.
+ * It can no longer be taken recursively, and does not get
+ * released implicitly while sleeping.
+ *
+ * Don't use in new code.
+ */
+static DEFINE_MUTEX(big_tty_mutex);
/*
* Getting the big tty mutex.
*/
-
-void __lockfunc tty_lock(struct tty_struct *tty)
+void __lockfunc tty_lock(void)
{
- if (tty->magic != TTY_MAGIC) {
- printk(KERN_ERR "L Bad %p\n", tty);
- WARN_ON(1);
- return;
- }
- tty_kref_get(tty);
- mutex_lock(&tty->legacy_mutex);
+ mutex_lock(&big_tty_mutex);
}
EXPORT_SYMBOL(tty_lock);
-void __lockfunc tty_unlock(struct tty_struct *tty)
+void __lockfunc tty_unlock(void)
{
- if (tty->magic != TTY_MAGIC) {
- printk(KERN_ERR "U Bad %p\n", tty);
- WARN_ON(1);
- return;
- }
- mutex_unlock(&tty->legacy_mutex);
- tty_kref_put(tty);
+ mutex_unlock(&big_tty_mutex);
}
EXPORT_SYMBOL(tty_unlock);
-
-/*
- * Getting the big tty mutex for a pair of ttys with lock ordering
- * On a non pty/tty pair tty2 can be NULL which is just fine.
- */
-void __lockfunc tty_lock_pair(struct tty_struct *tty,
- struct tty_struct *tty2)
-{
- if (tty < tty2) {
- tty_lock(tty);
- tty_lock(tty2);
- } else {
- if (tty2 && tty2 != tty)
- tty_lock(tty2);
- tty_lock(tty);
- }
-}
-EXPORT_SYMBOL(tty_lock_pair);
-
-void __lockfunc tty_unlock_pair(struct tty_struct *tty,
- struct tty_struct *tty2)
-{
- tty_unlock(tty);
- if (tty2 && tty2 != tty)
- tty_unlock(tty2);
-}
-EXPORT_SYMBOL(tty_unlock_pair);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index d9cca95..bf6e238 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -230,7 +230,7 @@ int tty_port_block_til_ready(struct tty_port *port,
/* block if port is in the process of being closed */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
- wait_event_interruptible_tty(tty, port->close_wait,
+ wait_event_interruptible_tty(port->close_wait,
!(port->flags & ASYNC_CLOSING));
if (port->flags & ASYNC_HUP_NOTIFY)
return -EAGAIN;
@@ -296,9 +296,9 @@ int tty_port_block_til_ready(struct tty_port *port,
retval = -ERESTARTSYS;
break;
}
- tty_unlock(tty);
+ tty_unlock();
schedule();
- tty_lock(tty);
+ tty_lock();
}
finish_wait(&port->open_wait, &wait);
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index c691eea..f5ed3d7 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -46,7 +46,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
obj-$(CONFIG_USB_SERIAL) += serial/
obj-$(CONFIG_USB) += misc/
-obj-$(CONFIG_USB) += phy/
+obj-$(CONFIG_USB_COMMON) += phy/
obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/
obj-$(CONFIG_USB_ATM) += atm/
diff --git a/drivers/usb/atm/xusbatm.c b/drivers/usb/atm/xusbatm.c
index 14ec9f0..b3b1bb7 100644
--- a/drivers/usb/atm/xusbatm.c
+++ b/drivers/usb/atm/xusbatm.c
@@ -20,7 +20,7 @@
******************************************************************************/
#include <linux/module.h>
-#include <linux/etherdevice.h> /* for random_ether_addr() */
+#include <linux/etherdevice.h> /* for eth_random_addr() */
#include "usbatm.h"
@@ -163,7 +163,7 @@ static int xusbatm_atm_start(struct usbatm_data *usbatm,
atm_dbg(usbatm, "%s entered\n", __func__);
/* use random MAC as we've no way to get it from the device */
- random_ether_addr(atm_dev->esi);
+ eth_random_addr(atm_dev->esi);
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f2a120e..36a2a0b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
usb_autopm_put_interface(acm->control);
+ /*
+ * Unthrottle device in case the TTY was closed while throttled.
+ */
+ spin_lock_irq(&acm->read_lock);
+ acm->throttled = 0;
+ acm->throttle_req = 0;
+ spin_unlock_irq(&acm->read_lock);
+
if (acm_submit_read_urbs(acm, GFP_KERNEL))
goto error_submit_read_urbs;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index ea8b304..ee46927 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
},
+ {
+ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
+ USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = HUAWEI_VENDOR_ID,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */
+ },
{ }
};
@@ -491,6 +500,8 @@ retry:
goto retry;
}
if (!desc->reslength) { /* zero length read */
+ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+ clear_bit(WDM_READ, &desc->flags);
spin_unlock_irq(&desc->iuspin);
goto retry;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 57ed9e4..622b4a4 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev)
pci_save_state(pci_dev);
- /*
- * Some systems crash if an EHCI controller is in D3 during
- * a sleep transition. We have to leave such controllers in D0.
- */
- if (hcd->broken_pci_sleep) {
- dev_dbg(dev, "Staying in PCI D0\n");
- return retval;
- }
-
/* If the root hub is dead rather than suspended, disallow remote
* wakeup. usb_hc_died() should ensure that both hosts are marked as
* dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 04fb834..8fb4849 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2324,12 +2324,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm);
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
{
return hub_is_superspeed(hub->hdev) &&
- (portstatus & USB_PORT_STAT_LINK_STATE) ==
- USB_SS_PORT_LS_SS_INACTIVE;
+ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_SS_INACTIVE) ||
+ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_COMP_MOD)) ;
}
static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2365,7 +2369,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
*
* See https://bugzilla.kernel.org/show_bug.cgi?id=41752
*/
- if (hub_port_inactive(hub, portstatus)) {
+ if (hub_port_warm_reset_required(hub, portstatus)) {
int ret;
if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -3379,7 +3383,7 @@ int usb_disable_lpm(struct usb_device *udev)
return 0;
udev->lpm_disable_count++;
- if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0))
+ if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
return 0;
/* If LPM is enabled, attempt to disable it. */
@@ -4408,9 +4412,7 @@ static void hub_events(void)
/* Warm reset a USB3 protocol port if it's in
* SS.Inactive state.
*/
- if (hub_is_superspeed(hub->hdev) &&
- (portstatus & USB_PORT_STAT_LINK_STATE)
- == USB_SS_PORT_LS_SS_INACTIVE) {
+ if (hub_port_warm_reset_required(hub, portstatus)) {
dev_dbg(hub_dev, "warm reset port %d\n", i);
hub_port_reset(hub, i, NULL,
HUB_BH_RESET_TIME, true);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b548cf1..bdd1c67 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1838,7 +1838,6 @@ free_interfaces:
intfc = cp->intf_cache[i];
intf->altsetting = intfc->altsetting;
intf->num_altsetting = intfc->num_altsetting;
- intf->intf_assoc = find_iad(dev, cp, i);
kref_get(&intfc->ref);
alt = usb_altnum_to_altsetting(intf, 0);
@@ -1851,6 +1850,8 @@ free_interfaces:
if (!alt)
alt = &intf->altsetting[0];
+ intf->intf_assoc =
+ find_iad(dev, cp, alt->desc.bInterfaceNumber);
intf->cur_altsetting = alt;
usb_enable_interface(dev, intf, true);
intf->dev.parent = &dev->dev;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3df1a19..ec70df7 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1091,7 +1091,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
/* wait until it is processed */
dwc3_stop_active_transfer(dwc, dep->number);
- goto out0;
+ goto out1;
}
dev_err(dwc->dev, "request %p was not queued to %s\n",
request, ep->name);
@@ -1099,6 +1099,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
goto out0;
}
+out1:
/* giveback the request */
dwc3_gadget_giveback(dep, req, -ECONNRESET);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index bddc8fd..271ca16 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -185,7 +185,7 @@ config USB_FUSB300
config USB_OMAP
tristate "OMAP USB Device Controller"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP1
select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
select USB_OTG_UTILS if ARCH_OMAP
help
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index e23bf79..9a9bced 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
spin_lock_irqsave(&ep->udc->lock, flags);
- if (ep->ep.desc) {
- spin_unlock_irqrestore(&ep->udc->lock, flags);
- DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
- return -EBUSY;
- }
-
ep->ep.desc = desc;
ep->ep.maxpacket = maxpacket;
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 51881f3..b09452d 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct qe_ep, ep);
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] ||
+ if (!_ep || !desc || _ep->name == ep_name[0] ||
(desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
diff --git a/drivers/usb/gadget/fsl_qe_udc.h b/drivers/usb/gadget/fsl_qe_udc.h
index 4c07ca9..7026919 100644
--- a/drivers/usb/gadget/fsl_qe_udc.h
+++ b/drivers/usb/gadget/fsl_qe_udc.h
@@ -153,10 +153,10 @@ struct usb_ep_para{
#define USB_BUSMODE_DTB 0x02
/* Endpoint basic handle */
-#define ep_index(EP) ((EP)->desc->bEndpointAddress & 0xF)
+#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress & 0xF)
#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
#define ep_is_in(EP) ((ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
- USB_DIR_IN) : ((EP)->desc->bEndpointAddress \
+ USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
& USB_DIR_IN) == USB_DIR_IN)
/* ep0 transfer state */
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 2831685..bc6f9bb 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct fsl_ep, ep);
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| (desc->bDescriptorType != USB_DT_ENDPOINT))
return -EINVAL;
@@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
/* for ep0: the desc defined here;
* for other eps, gadget layer called ep_enable with defined desc
*/
- udc_controller->eps[0].desc = &fsl_ep0_desc;
+ udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
/* setup the udc->eps[] for non-control endpoints and link
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 5cd7b7e..f61a967 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
/*
* ### internal used help routines.
*/
-#define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF)
+#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF)
#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
- USB_DIR_IN ):((EP)->desc->bEndpointAddress \
+ USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
& USB_DIR_IN)==USB_DIR_IN)
#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \
&udc->eps[pipe])
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index b241e6c..3d28fb9 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
diff --git a/drivers/usb/gadget/lpc32xx_udc.c b/drivers/usb/gadget/lpc32xx_udc.c
index 262acfd..2ab0388 100644
--- a/drivers/usb/gadget/lpc32xx_udc.c
+++ b/drivers/usb/gadget/lpc32xx_udc.c
@@ -61,6 +61,7 @@
#include <mach/irqs.h>
#include <mach/board.h>
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+#include <linux/debugfs.h>
#include <linux/seq_file.h>
#endif
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index dbcd132..117a4bb 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
ep = container_of(_ep, struct mv_ep, ep);
udc = ep->udc;
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 7ba3246..89cbd2b 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -44,7 +44,8 @@
#include <asm/mach-types.h>
#include <plat/dma.h>
-#include <plat/usb.h>
+
+#include <mach/usb.h>
#include "omap_udc.h"
@@ -153,7 +154,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
u16 maxp;
/* catch various bogus parameters */
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->maxpacket < usb_endpoint_maxp(desc)) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index d7c8cb3..f7ff9e8 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
struct pxa25x_udc *dev;
ep = container_of (_ep, struct pxa25x_ep, ep);
- if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name
+ if (!_ep || !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| ep->bEndpointAddress != desc->bEndpointAddress
|| ep->fifo_size < usb_endpoint_maxp (desc)) {
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 36c6836..236b271 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
u32 ecr = 0;
hsep = our_ep(_ep);
- if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name
+ if (!_ep || !desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT
|| hsep->bEndpointAddress != desc->bEndpointAddress
|| ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 3de71d3..f2e51f5 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
ep = to_s3c2410_ep(_ep);
- if (!_ep || !desc || ep->ep.desc
+ if (!_ep || !desc
|| _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index c46439c..5444866 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -294,7 +294,7 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
pr_err("%s(%d)\n", __func__, __LINE__);
wait_for_completion(&cmd->write_complete);
- transport_generic_process_write(se_cmd);
+ target_execute_cmd(se_cmd);
cleanup:
return ret;
}
@@ -725,7 +725,7 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
}
wait_for_completion(&cmd->write_complete);
- transport_generic_process_write(se_cmd);
+ target_execute_cmd(se_cmd);
cleanup:
return ret;
}
@@ -1065,16 +1065,20 @@ static void usbg_cmd_work(struct work_struct *work)
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense);
-
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- usbg_cleanup_cmd(cmd);
- return;
+ goto out;
}
- target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+ if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE);
+ 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
+ goto out;
+
+ return;
+
+out:
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+ usbg_cleanup_cmd(cmd);
}
static int usbg_submit_command(struct f_uas *fu,
@@ -1177,16 +1181,20 @@ static void bot_cmd_work(struct work_struct *work)
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense);
-
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- usbg_cleanup_cmd(cmd);
- return;
+ goto out;
}
- target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+ if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0);
+ cmd->data_len, cmd->prio_attr, dir, 0) < 0)
+ goto out;
+
+ return;
+
+out:
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+ usbg_cleanup_cmd(cmd);
}
static int bot_submit_command(struct f_uas *fu,
@@ -1400,19 +1408,6 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static int usbg_new_cmd(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- int ret;
-
- ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf);
- if (ret)
- return ret;
-
- return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0);
-}
-
static void usbg_cmd_release(struct kref *ref)
{
struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
@@ -1902,7 +1897,6 @@ static struct target_core_fabric_ops usbg_ops = {
.tpg_alloc_fabric_acl = usbg_alloc_fabric_acl,
.tpg_release_fabric_acl = usbg_release_fabric_acl,
.tpg_get_inst_index = usbg_tpg_get_inst_index,
- .new_cmd_map = usbg_new_cmd,
.release_cmd = usbg_release_cmd,
.shutdown_session = usbg_shutdown_session,
.close_session = usbg_close_session,
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 47cf48b..b9e1925 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -724,7 +724,7 @@ static int get_ether_addr(const char *str, u8 *dev_addr)
if (is_valid_ether_addr(dev_addr))
return 0;
}
- random_ether_addr(dev_addr);
+ eth_random_addr(dev_addr);
return 1;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 83e58df..dcfaaa9 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -308,7 +308,7 @@ config USB_OHCI_HCD
config USB_OHCI_HCD_OMAP1
bool "OHCI support for OMAP1/2 chips"
- depends on USB_OHCI_HCD && (ARCH_OMAP1 || ARCH_OMAP2)
+ depends on USB_OHCI_HCD && ARCH_OMAP1
default y
---help---
Enables support for the OHCI controller on OMAP1/2 chips.
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b100f5f..1d9401e 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd)
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+#if defined(CONFIG_PPC_PS3)
hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
+#endif
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
@@ -1347,6 +1349,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_msm_driver
#endif
+#ifdef CONFIG_TILE_USB
+#include "ehci-tilegx.c"
+#define PLATFORM_DRIVER ehci_hcd_tilegx_driver
+#endif
+
#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
#include "ehci-pmcmsp.c"
#define PLATFORM_DRIVER ehci_hcd_msp_driver
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a44294d..c304354 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -43,6 +43,7 @@
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/gpio.h>
+#include <linux/clk.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -55,6 +56,15 @@
#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
+/* Errata i693 */
+static struct clk *utmi_p1_fck;
+static struct clk *utmi_p2_fck;
+static struct clk *xclk60mhsp1_ck;
+static struct clk *xclk60mhsp2_ck;
+static struct clk *usbhost_p1_fck;
+static struct clk *usbhost_p2_fck;
+static struct clk *init_60m_fclk;
+
/*-------------------------------------------------------------------------*/
static const struct hc_driver ehci_omap_hc_driver;
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
return __raw_readl(base + reg);
}
+/* Erratum i693 workaround sequence */
+static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
+{
+ int ret = 0;
+
+ /* Switch to the internal 60 MHz clock */
+ ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
+ if (ret != 0)
+ ehci_err(ehci, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+
+ ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
+ if (ret != 0)
+ ehci_err(ehci, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+
+ clk_enable(usbhost_p1_fck);
+ clk_enable(usbhost_p2_fck);
+
+ /* Wait 1ms and switch back to the external clock */
+ mdelay(1);
+ ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
+ if (ret != 0)
+ ehci_err(ehci, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+
+ ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
+ if (ret != 0)
+ ehci_err(ehci, "xclk60mhsp2_ck set parent"
+ "failed error:%d\n", ret);
+
+ clk_disable(usbhost_p1_fck);
+ clk_disable(usbhost_p2_fck);
+}
+
static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
{
struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
}
}
+static int omap_ehci_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ u32 __iomem *status_reg = &ehci->regs->port_status[
+ (wIndex & 0xff) - 1];
+ u32 temp;
+ unsigned long flags;
+ int retval = 0;
+
+ spin_lock_irqsave(&ehci->lock, flags);
+
+ if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
+ temp = ehci_readl(ehci, status_reg);
+ if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
+ retval = -EPIPE;
+ goto done;
+ }
+
+ temp &= ~PORT_WKCONN_E;
+ temp |= PORT_WKDISC_E | PORT_WKOC_E;
+ ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
+
+ omap_ehci_erratum_i693(ehci);
+
+ set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&ehci->lock, flags);
+
+ /* Handle the hub control events here */
+ return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+done:
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ return retval;
+}
+
static void disable_put_regulator(
struct ehci_hcd_omap_platform_data *pdata)
{
@@ -192,14 +281,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
}
+ /* Hold PHYs in reset while initializing EHCI controller */
if (pdata->phy_reset) {
if (gpio_is_valid(pdata->reset_gpio_port[0]))
- gpio_request_one(pdata->reset_gpio_port[0],
- GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+ gpio_set_value_cansleep(pdata->reset_gpio_port[0], 0);
if (gpio_is_valid(pdata->reset_gpio_port[1]))
- gpio_request_one(pdata->reset_gpio_port[1],
- GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+ gpio_set_value_cansleep(pdata->reset_gpio_port[1], 0);
/* Hold the PHY in RESET for enough time till DIR is high */
udelay(10);
@@ -241,6 +329,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
ehci_reset(omap_ehci);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret) {
+ dev_err(dev, "failed to add hcd with err %d\n", ret);
+ goto err_add_hcd;
+ }
if (pdata->phy_reset) {
/* Hold the PHY in RESET for enough time till
@@ -255,17 +348,79 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
gpio_set_value_cansleep(pdata->reset_gpio_port[1], 1);
}
- ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (ret) {
- dev_err(dev, "failed to add hcd with err %d\n", ret);
+ /* root ports should always stay powered */
+ ehci_port_power(omap_ehci, 1);
+
+ /* get clocks */
+ utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(utmi_p1_fck)) {
+ ret = PTR_ERR(utmi_p1_fck);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
goto err_add_hcd;
}
- /* root ports should always stay powered */
- ehci_port_power(omap_ehci, 1);
+ xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+ if (IS_ERR(xclk60mhsp1_ck)) {
+ ret = PTR_ERR(xclk60mhsp1_ck);
+ dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+ goto err_utmi_p1_fck;
+ }
+
+ utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(utmi_p2_fck)) {
+ ret = PTR_ERR(utmi_p2_fck);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_xclk60mhsp1_ck;
+ }
+
+ xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+ if (IS_ERR(xclk60mhsp2_ck)) {
+ ret = PTR_ERR(xclk60mhsp2_ck);
+ dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+ goto err_utmi_p2_fck;
+ }
+
+ usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+ if (IS_ERR(usbhost_p1_fck)) {
+ ret = PTR_ERR(usbhost_p1_fck);
+ dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+ goto err_xclk60mhsp2_ck;
+ }
+
+ usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+ if (IS_ERR(usbhost_p2_fck)) {
+ ret = PTR_ERR(usbhost_p2_fck);
+ dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+ goto err_usbhost_p1_fck;
+ }
+
+ init_60m_fclk = clk_get(dev, "init_60m_fclk");
+ if (IS_ERR(init_60m_fclk)) {
+ ret = PTR_ERR(init_60m_fclk);
+ dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+ goto err_usbhost_p2_fck;
+ }
return 0;
+err_usbhost_p2_fck:
+ clk_put(usbhost_p2_fck);
+
+err_usbhost_p1_fck:
+ clk_put(usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+ clk_put(xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+ clk_put(utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+ clk_put(xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+ clk_put(utmi_p1_fck);
+
err_add_hcd:
disable_put_regulator(pdata);
pm_runtime_put_sync(dev);
@@ -294,6 +449,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
disable_put_regulator(dev->platform_data);
iounmap(hcd->regs);
usb_put_hcd(hcd);
+
+ clk_put(utmi_p1_fck);
+ clk_put(utmi_p2_fck);
+ clk_put(xclk60mhsp1_ck);
+ clk_put(xclk60mhsp2_ck);
+ clk_put(usbhost_p1_fck);
+ clk_put(usbhost_p2_fck);
+ clk_put(init_60m_fclk);
+
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -364,7 +528,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
+ .hub_control = omap_ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bc94d7b..1234817 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
hcd->has_tt = 1;
tdi_reset(ehci);
}
- if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
- /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
- if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
- ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
- hcd->broken_pci_sleep = 1;
- device_set_wakeup_capable(&pdev->dev, false);
- }
- }
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index ca819cd..e7cb392 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- if (pdev->dev.platform_data != NULL)
- pdata = pdev->dev.platform_data;
+ pdata = pdev->dev.platform_data;
/* initialize hcd */
hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 6854823..ab8a3bf 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -46,8 +46,8 @@ static void tegra_ehci_power_up(struct usb_hcd *hcd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
- clk_enable(tegra->emc_clk);
- clk_enable(tegra->clk);
+ clk_prepare_enable(tegra->emc_clk);
+ clk_prepare_enable(tegra->clk);
tegra_usb_phy_power_on(tegra->phy);
tegra->host_resumed = 1;
}
@@ -58,8 +58,8 @@ static void tegra_ehci_power_down(struct usb_hcd *hcd)
tegra->host_resumed = 0;
tegra_usb_phy_power_off(tegra->phy);
- clk_disable(tegra->clk);
- clk_disable(tegra->emc_clk);
+ clk_disable_unprepare(tegra->clk);
+ clk_disable_unprepare(tegra->emc_clk);
}
static int tegra_ehci_internal_port_reset(
@@ -671,7 +671,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_clk;
}
- err = clk_enable(tegra->clk);
+ err = clk_prepare_enable(tegra->clk);
if (err)
goto fail_clken;
@@ -682,7 +682,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_emc_clk;
}
- clk_enable(tegra->emc_clk);
+ clk_prepare_enable(tegra->emc_clk);
clk_set_rate(tegra->emc_clk, 400000000);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -782,10 +782,10 @@ fail:
fail_phy:
iounmap(hcd->regs);
fail_io:
- clk_disable(tegra->emc_clk);
+ clk_disable_unprepare(tegra->emc_clk);
clk_put(tegra->emc_clk);
fail_emc_clk:
- clk_disable(tegra->clk);
+ clk_disable_unprepare(tegra->clk);
fail_clken:
clk_put(tegra->clk);
fail_clk:
@@ -820,10 +820,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
tegra_usb_phy_close(tegra->phy);
iounmap(hcd->regs);
- clk_disable(tegra->clk);
+ clk_disable_unprepare(tegra->clk);
clk_put(tegra->clk);
- clk_disable(tegra->emc_clk);
+ clk_disable_unprepare(tegra->emc_clk);
clk_put(tegra->emc_clk);
kfree(tegra);
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
new file mode 100644
index 0000000..1d215cd
--- /dev/null
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Tilera TILE-Gx USB EHCI host controller driver.
+ */
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/usb/tilegx.h>
+#include <linux/usb.h>
+
+#include <asm/homecache.h>
+
+#include <gxio/iorpc_usb_host.h>
+#include <gxio/usb_host.h>
+
+static void tilegx_start_ehc(void)
+{
+}
+
+static void tilegx_stop_ehc(void)
+{
+}
+
+static int tilegx_ehci_setup(struct usb_hcd *hcd)
+{
+ int ret = ehci_init(hcd);
+
+ /*
+ * Some drivers do:
+ *
+ * struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ * ehci->need_io_watchdog = 0;
+ *
+ * here, but since this is a new driver we're going to leave the
+ * watchdog enabled. Later we may try to turn it off and see
+ * whether we run into any problems.
+ */
+
+ return ret;
+}
+
+static const struct hc_driver ehci_tilegx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tile-Gx EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * Generic hardware linkage.
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * Basic lifecycle operations.
+ */
+ .reset = tilegx_ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * Managing I/O requests and associated device resources.
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * Scheduling support.
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * Root hub support.
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+ pte_t pte = { 0 };
+ int my_cpu = smp_processor_id();
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Try to initialize our GXIO context; if we can't, the device
+ * doesn't exist.
+ */
+ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 1) != 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ /*
+ * We don't use rsrc_start to map in our registers, but seems like
+ * we ought to set it to something, so we use the register VA.
+ */
+ hcd->rsrc_start =
+ (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+ hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
+ hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+
+ tilegx_start_ehc();
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+ ehci->regs =
+ hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase));
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = readl(&ehci->caps->hcs_params);
+
+ /* Create our IRQs and register them. */
+ pdata->irq = create_irq();
+ if (pdata->irq < 0) {
+ ret = -ENXIO;
+ goto err_no_irq;
+ }
+
+ tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);
+
+ /* Configure interrupts. */
+ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
+ cpu_x(my_cpu), cpu_y(my_cpu),
+ KERNEL_PL, pdata->irq);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ /* Register all of our memory. */
+ pte = pte_set_home(pte, PAGE_HOME_HASH);
+ ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ return ret;
+ }
+
+err_have_irq:
+ destroy_irq(pdata->irq);
+err_no_irq:
+ tilegx_stop_ehc();
+ usb_put_hcd(hcd);
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ return ret;
+}
+
+static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ tilegx_stop_ehc();
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ destroy_irq(pdata->irq);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void ehci_hcd_tilegx_drv_shutdown(struct platform_device *pdev)
+{
+ usb_hcd_platform_shutdown(pdev);
+ ehci_hcd_tilegx_drv_remove(pdev);
+}
+
+static struct platform_driver ehci_hcd_tilegx_driver = {
+ .probe = ehci_hcd_tilegx_drv_probe,
+ .remove = ehci_hcd_tilegx_drv_remove,
+ .shutdown = ehci_hcd_tilegx_drv_shutdown,
+ .driver = {
+ .name = "tilegx-ehci",
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:tilegx-ehci");
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 9c2cc46..e9713d5 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
*
* Properly shutdown the hcd, call driver's shutdown routine.
*/
-static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
+static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
{
struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
-
- return 0;
}
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index e0adf5c..2b1e8d8 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1100,6 +1100,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_octeon_driver
#endif
+#ifdef CONFIG_TILE_USB
+#include "ohci-tilegx.c"
+#define PLATFORM_DRIVER ohci_hcd_tilegx_driver
+#endif
+
#ifdef CONFIG_USB_CNS3XXX_OHCI
#include "ohci-cns3xxx.c"
#define PLATFORM_DRIVER ohci_hcd_cns3xxx_driver
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 836772d..2f3619e 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
}
/* Carry out the final steps of resuming the controller device */
-static void ohci_finish_controller_resume(struct usb_hcd *hcd)
+static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int port;
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 9ce35d0..b02c344 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -20,14 +20,15 @@
#include <linux/clk.h>
#include <linux/gpio.h>
-#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/mach-types.h>
#include <plat/mux.h>
-#include <mach/irqs.h>
#include <plat/fpga.h>
-#include <plat/usb.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/usb.h>
/* OMAP-1510 OHCI has its own MMU for DMA */
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
new file mode 100644
index 0000000..1ae7b28
--- /dev/null
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Tilera TILE-Gx USB OHCI host controller driver.
+ */
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/usb/tilegx.h>
+#include <linux/usb.h>
+
+#include <asm/homecache.h>
+
+#include <gxio/iorpc_usb_host.h>
+#include <gxio/usb_host.h>
+
+static void tilegx_start_ohc(void)
+{
+}
+
+static void tilegx_stop_ohc(void)
+{
+}
+
+static int tilegx_ohci_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ret = ohci_init(ohci);
+ if (ret < 0)
+ return ret;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ dev_err(hcd->self.controller, "can't start %s\n",
+ hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver ohci_tilegx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tile-Gx OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * Generic hardware linkage.
+ */
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY | HCD_LOCAL_MEM | HCD_USB11,
+
+ /*
+ * Basic lifecycle operations.
+ */
+ .start = tilegx_ohci_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * Managing I/O requests and associated device resources.
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * Scheduling support.
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * Root hub support.
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+ pte_t pte = { 0 };
+ int my_cpu = smp_processor_id();
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Try to initialize our GXIO context; if we can't, the device
+ * doesn't exist.
+ */
+ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 0) != 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ /*
+ * We don't use rsrc_start to map in our registers, but seems like
+ * we ought to set it to something, so we use the register VA.
+ */
+ hcd->rsrc_start =
+ (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+ hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
+ hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+
+ tilegx_start_ohc();
+
+ /* Create our IRQs and register them. */
+ pdata->irq = create_irq();
+ if (pdata->irq < 0) {
+ ret = -ENXIO;
+ goto err_no_irq;
+ }
+
+ tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);
+
+ /* Configure interrupts. */
+ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
+ cpu_x(my_cpu), cpu_y(my_cpu),
+ KERNEL_PL, pdata->irq);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ /* Register all of our memory. */
+ pte = pte_set_home(pte, PAGE_HOME_HASH);
+ ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ return ret;
+ }
+
+err_have_irq:
+ destroy_irq(pdata->irq);
+err_no_irq:
+ tilegx_stop_ohc();
+ usb_put_hcd(hcd);
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ return ret;
+}
+
+static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tilegx_usb_platform_data* pdata = pdev->dev.platform_data;
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ tilegx_stop_ohc();
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ destroy_irq(pdata->irq);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void ohci_hcd_tilegx_drv_shutdown(struct platform_device *pdev)
+{
+ usb_hcd_platform_shutdown(pdev);
+ ohci_hcd_tilegx_drv_remove(pdev);
+}
+
+static struct platform_driver ohci_hcd_tilegx_driver = {
+ .probe = ohci_hcd_tilegx_drv_probe,
+ .remove = ohci_hcd_tilegx_drv_remove,
+ .shutdown = ohci_hcd_tilegx_drv_shutdown,
+ .driver = {
+ .name = "tilegx-ohci",
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:tilegx-ohci");
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 2732ef6..7b01094 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -462,6 +462,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
}
}
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+ u32 pls = status_reg & PORT_PLS_MASK;
+
+ /* resume state is a xHCI internal state.
+ * Do not report it to usb core.
+ */
+ if (pls == XDEV_RESUME)
+ return;
+
+ /* When the CAS bit is set then warm reset
+ * should be performed on port
+ */
+ if (status_reg & PORT_CAS) {
+ /* The CAS bit can be set while the port is
+ * in any link state.
+ * Only roothubs have CAS bit, so we
+ * pretend to be in compliance mode
+ * unless we're already in compliance
+ * or the inactive state.
+ */
+ if (pls != USB_SS_PORT_LS_COMP_MOD &&
+ pls != USB_SS_PORT_LS_SS_INACTIVE) {
+ pls = USB_SS_PORT_LS_COMP_MOD;
+ }
+ /* Return also connection bit -
+ * hub state machine resets port
+ * when this bit is set.
+ */
+ pls |= USB_PORT_STAT_CONNECTION;
+ }
+ /* update status field */
+ *status |= pls;
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -606,13 +642,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
else
status |= USB_PORT_STAT_POWER;
}
- /* Port Link State */
+ /* Update Port Link State for super speed ports*/
if (hcd->speed == HCD_USB3) {
- /* resume state is a xHCI internal state.
- * Do not report it to usb core.
- */
- if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
- status |= (temp & PORT_PLS_MASK);
+ xhci_hub_report_link_state(&status, temp);
}
if (bus_state->port_c_suspend & (1 << wIndex))
status |= 1 << USB_PORT_FEAT_C_SUSPEND;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ec4338e..77689bd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int slot_id)
{
- struct list_head *tt;
struct list_head *tt_list_head;
- struct list_head *tt_next;
- struct xhci_tt_bw_info *tt_info;
+ struct xhci_tt_bw_info *tt_info, *next;
+ bool slot_found = false;
/* If the device never made it past the Set Address stage,
* it may not have the real_port set correctly.
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
}
tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
- if (list_empty(tt_list_head))
- return;
-
- list_for_each(tt, tt_list_head) {
- tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
- if (tt_info->slot_id == slot_id)
+ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
+ /* Multi-TT hubs will have more than one entry */
+ if (tt_info->slot_id == slot_id) {
+ slot_found = true;
+ list_del(&tt_info->tt_list);
+ kfree(tt_info);
+ } else if (slot_found) {
break;
+ }
}
- /* Cautionary measure in case the hub was disconnected before we
- * stored the TT information.
- */
- if (tt_info->slot_id != slot_id)
- return;
-
- tt_next = tt->next;
- tt_info = list_entry(tt, struct xhci_tt_bw_info,
- tt_list);
- /* Multi-TT hubs will have more than one entry */
- do {
- list_del(tt);
- kfree(tt_info);
- tt = tt_next;
- if (list_empty(tt_list_head))
- break;
- tt_next = tt->next;
- tt_info = list_entry(tt, struct xhci_tt_bw_info,
- tt_list);
- } while (tt_info->slot_id == slot_id);
}
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
struct dev_info *dev_info, *next;
- struct list_head *tt_list_head;
- struct list_head *tt;
- struct list_head *endpoints;
- struct list_head *ep, *q;
- struct xhci_tt_bw_info *tt_info;
- struct xhci_interval_bw_table *bwt;
- struct xhci_virt_ep *virt_ep;
-
unsigned long flags;
int size;
- int i;
+ int i, j, num_ports;
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
}
spin_unlock_irqrestore(&xhci->lock, flags);
- bwt = &xhci->rh_bw->bw_table;
- for (i = 0; i < XHCI_MAX_INTERVAL; i++) {
- endpoints = &bwt->interval_bw[i].endpoints;
- list_for_each_safe(ep, q, endpoints) {
- virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list);
- list_del(&virt_ep->bw_endpoint_list);
- kfree(virt_ep);
+ num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
+ for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
+ struct list_head *ep = &bwt->interval_bw[j].endpoints;
+ while (!list_empty(ep))
+ list_del_init(ep->next);
}
}
- tt_list_head = &xhci->rh_bw->tts;
- list_for_each_safe(tt, q, tt_list_head) {
- tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list);
- list_del(tt);
- kfree(tt_info);
+ for (i = 0; i < num_ports; i++) {
+ struct xhci_tt_bw_info *tt, *n;
+ list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
+ list_del(&tt->tt_list);
+ kfree(tt);
+ }
}
xhci->num_usb2_ports = 0;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 23b4aef..8275645 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -885,6 +885,17 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
+ /* If we get two back-to-back stalls, and the first stalled transfer
+ * ends just before a link TRB, the dequeue pointer will be left on
+ * the link TRB by the code in the while loop. So we have to update
+ * the dequeue pointer one segment further, or we'll jump off
+ * the segment into la-la-land.
+ */
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index afdc73e..a979cd0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_CSS;
xhci_writel(xhci, command, &xhci->op_regs->command);
- if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
- xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+ if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC save state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command |= CMD_CRS;
xhci_writel(xhci, command, &xhci->op_regs->command);
if (handshake(xhci, &xhci->op_regs->status,
- STS_RESTORE, 0, 10*100)) {
- xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+ STS_RESTORE, 0, 10 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
}
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
default:
dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
__func__);
- return -EINVAL;
+ return USB3_LPM_DISABLED;
}
if (sel <= max_sel_pel && pel <= max_sel_pel)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index de3d6e3..55c0785 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -341,7 +341,11 @@ struct xhci_op_regs {
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
/* wake on connect (enable) */
#define PORT_WKCONN_E (1 << 25)
/* wake on disconnect (enable) */
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 768b4b5..9d63ba4 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -34,6 +34,7 @@
#include <linux/dma-mapping.h>
#include <mach/cputype.h>
+#include <mach/hardware.h>
#include <asm/mach-types.h>
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 046c844..371baa0 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -15,7 +15,7 @@
*/
/* Integrated highspeed/otg PHY */
-#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34)
+#define USBPHY_CTL_PADDR 0x01c40034
#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */
#define USBPHY_PHYCLKGD BIT(8)
#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */
@@ -27,7 +27,7 @@
#define USBPHY_OTGPDWN BIT(1)
#define USBPHY_PHYPDWN BIT(0)
-#define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48)
+#define DM355_DEEPSLEEP_PADDR 0x01c40048
#define DRVVBUS_FORCE BIT(2)
#define DRVVBUS_OVERRIDE BIT(1)
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f42c29b..95918da 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
}
musb_ep->desc = NULL;
+ musb_ep->end_point.desc = NULL;
/* abort all pending DMA and requests */
nuke(musb_ep, -ESHUTDOWN);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ef8d744..e090c79 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -375,11 +375,21 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
*/
if (list_empty(&qh->hep->urb_list)) {
struct list_head *head;
+ struct dma_controller *dma = musb->dma_controller;
- if (is_in)
+ if (is_in) {
ep->rx_reinit = 1;
- else
+ if (ep->rx_channel) {
+ dma->channel_release(ep->rx_channel);
+ ep->rx_channel = NULL;
+ }
+ } else {
ep->tx_reinit = 1;
+ if (ep->tx_channel) {
+ dma->channel_release(ep->tx_channel);
+ ep->tx_channel = NULL;
+ }
+ }
/* Clobber old pointers to this qh */
musb_ep_set_qh(ep, is_in, NULL);
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index 70cf5d7..e0558df 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -36,9 +36,9 @@
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <plat/usb.h>
#include <plat/mux.h>
+#include <mach/usb.h>
#ifndef DEBUG
#undef VERBOSE
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c
index d2a9a8e..0eabb04 100644
--- a/drivers/usb/otg/twl6030-usb.c
+++ b/drivers/usb/otg/twl6030-usb.c
@@ -305,9 +305,8 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
regulator_enable(twl->usb3v3);
twl->asleep = 1;
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR, 0x1);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
- 0x10);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_CLR);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
status = USB_EVENT_ID;
otg->default_a = true;
twl->phy.state = OTG_STATE_A_IDLE;
@@ -316,12 +315,10 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
atomic_notifier_call_chain(&twl->phy.notifier, status,
otg->gadget);
} else {
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_CLR,
- 0x10);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET,
- 0x1);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
}
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_LATCH_CLR, status);
+ twl6030_writeb(twl, TWL_MODULE_USB, status, USB_ID_INT_LATCH_CLR);
return IRQ_HANDLED;
}
@@ -343,7 +340,7 @@ static int twl6030_enable_irq(struct usb_phy *x)
{
struct twl6030_usb *twl = phy_to_twl(x);
- twl6030_writeb(twl, TWL_MODULE_USB, USB_ID_INT_EN_HI_SET, 0x1);
+ twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
twl6030_interrupt_unmask(0x05, REG_INT_MSK_LINE_C);
twl6030_interrupt_unmask(0x05, REG_INT_MSK_STS_C);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 3cfabcb..e7cf84f 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -2,11 +2,11 @@
# Physical Layer USB driver configuration
#
comment "USB Physical Layer drivers"
- depends on USB
+ depends on USB || USB_GADGET
config USB_ISP1301
tristate "NXP ISP1301 USB transceiver support"
- depends on USB
+ depends on USB || USB_GADGET
depends on I2C
help
Say Y here to add support for the NXP ISP1301 USB transceiver driver.
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 1b19262..1e71079 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
+ { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
{ USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
{ USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
{ USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
@@ -92,6 +93,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
+ { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -133,7 +135,13 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
{ USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
+ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
+ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */
{ USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */
+ { USB_DEVICE(0x166A, 0x0304) }, /* Clipsal 5000CT2 C-Bus Black and White Touchscreen */
+ { USB_DEVICE(0x166A, 0x0305) }, /* Clipsal C-5000CT2 C-Bus Spectrum Colour Touchscreen */
+ { USB_DEVICE(0x166A, 0x0401) }, /* Clipsal L51xx C-Bus Architectural Dimmer */
+ { USB_DEVICE(0x166A, 0x0101) }, /* Clipsal 5560884 C-Bus Multi-room Audio Matrix Switcher */
{ USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */
{ USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
@@ -145,7 +153,11 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
+ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
+ { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c084ea..bc912e5 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
{ USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f3c7c78..5661c7e 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -784,6 +784,7 @@
#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
+#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
/*
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 105a6d8..9b026bf 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
-/* we want to look at all devices, as the vendor/product id can change
- * depending on the command line argument */
-static const struct usb_device_id generic_serial_ids[] = {
- {.driver_info = 42},
- {}
-};
-
/* All of the device info needed for the Generic Serial Converter */
struct usb_serial_driver usb_serial_generic_device = {
.driver = {
@@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug)
USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
/* register our generic driver with ourselves */
- retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids);
+ retval = usb_serial_register_drivers(serial_drivers,
+ "usbserial_generic", generic_device_ids);
#endif
return retval;
}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index d0ec1aa..a71fa0a 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
MCT_U232_SET_REQUEST_TYPE,
0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
WDR_TIMEOUT);
- if (rc < 0)
- dev_err(&serial->dev->dev,
- "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+ kfree(buf);
+
dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
- kfree(buf);
- return rc;
+ if (rc < 0) {
+ dev_err(&serial->dev->dev,
+ "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
+ return rc;
+ }
+ return 0;
} /* mct_u232_set_modem_ctrl */
static int mct_u232_get_modem_stat(struct usb_serial *serial,
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 81423f7..d47eb06 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -222,14 +222,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
metro_priv->throttled = 0;
spin_unlock_irqrestore(&metro_priv->lock, flags);
- /*
- * Force low_latency on so that our tty_push actually forces the data
- * through, otherwise it is scheduled, and with high data rates (like
- * with OHCI) data can get lost.
- */
- if (tty)
- tty->low_latency = 1;
-
/* Clear the urb pipe. */
usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 29160f8..57eca24 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -190,7 +190,7 @@
static int device_type;
-static const struct usb_device_id id_table[] __devinitconst = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1aae902..417ab1b 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -47,6 +47,7 @@
/* Function prototypes */
static int option_probe(struct usb_serial *serial,
const struct usb_device_id *id);
+static void option_release(struct usb_serial *serial);
static int option_send_setup(struct usb_serial_port *port);
static void option_instat_callback(struct urb *urb);
@@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb);
#define HUAWEI_PRODUCT_E14AC 0x14AC
#define HUAWEI_PRODUCT_K3806 0x14AE
#define HUAWEI_PRODUCT_K4605 0x14C6
+#define HUAWEI_PRODUCT_K5005 0x14C8
#define HUAWEI_PRODUCT_K3770 0x14C9
#define HUAWEI_PRODUCT_K3771 0x14CA
#define HUAWEI_PRODUCT_K4510 0x14CB
@@ -234,6 +236,7 @@ static void option_instat_callback(struct urb *urb);
#define NOVATELWIRELESS_PRODUCT_G1 0xA001
#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
+#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
/* AMOI PRODUCTS */
#define AMOI_VENDOR_ID 0x1614
@@ -425,7 +428,7 @@ static void option_instat_callback(struct urb *urb);
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
-/* YUGA products www.yuga-info.com*/
+/* YUGA products www.yuga-info.com gavin.kx@qq.com */
#define YUGA_VENDOR_ID 0x257A
#define YUGA_PRODUCT_CEM600 0x1601
#define YUGA_PRODUCT_CEM610 0x1602
@@ -442,6 +445,8 @@ static void option_instat_callback(struct urb *urb);
#define YUGA_PRODUCT_CEU516 0x160C
#define YUGA_PRODUCT_CEU528 0x160D
#define YUGA_PRODUCT_CEU526 0x160F
+#define YUGA_PRODUCT_CEU881 0x161F
+#define YUGA_PRODUCT_CEU882 0x162F
#define YUGA_PRODUCT_CWM600 0x2601
#define YUGA_PRODUCT_CWM610 0x2602
@@ -457,23 +462,26 @@ static void option_instat_callback(struct urb *urb);
#define YUGA_PRODUCT_CWU518 0x260B
#define YUGA_PRODUCT_CWU516 0x260C
#define YUGA_PRODUCT_CWU528 0x260D
+#define YUGA_PRODUCT_CWU581 0x260E
#define YUGA_PRODUCT_CWU526 0x260F
-
-#define YUGA_PRODUCT_CLM600 0x2601
-#define YUGA_PRODUCT_CLM610 0x2602
-#define YUGA_PRODUCT_CLM500 0x2603
-#define YUGA_PRODUCT_CLM510 0x2604
-#define YUGA_PRODUCT_CLM800 0x2605
-#define YUGA_PRODUCT_CLM900 0x2606
-
-#define YUGA_PRODUCT_CLU718 0x2607
-#define YUGA_PRODUCT_CLU716 0x2608
-#define YUGA_PRODUCT_CLU728 0x2609
-#define YUGA_PRODUCT_CLU726 0x260A
-#define YUGA_PRODUCT_CLU518 0x260B
-#define YUGA_PRODUCT_CLU516 0x260C
-#define YUGA_PRODUCT_CLU528 0x260D
-#define YUGA_PRODUCT_CLU526 0x260F
+#define YUGA_PRODUCT_CWU582 0x261F
+#define YUGA_PRODUCT_CWU583 0x262F
+
+#define YUGA_PRODUCT_CLM600 0x3601
+#define YUGA_PRODUCT_CLM610 0x3602
+#define YUGA_PRODUCT_CLM500 0x3603
+#define YUGA_PRODUCT_CLM510 0x3604
+#define YUGA_PRODUCT_CLM800 0x3605
+#define YUGA_PRODUCT_CLM900 0x3606
+
+#define YUGA_PRODUCT_CLU718 0x3607
+#define YUGA_PRODUCT_CLU716 0x3608
+#define YUGA_PRODUCT_CLU728 0x3609
+#define YUGA_PRODUCT_CLU726 0x360A
+#define YUGA_PRODUCT_CLU518 0x360B
+#define YUGA_PRODUCT_CLU516 0x360C
+#define YUGA_PRODUCT_CLU528 0x360D
+#define YUGA_PRODUCT_CLU526 0x360F
/* Viettel products */
#define VIETTEL_VENDOR_ID 0x2262
@@ -489,6 +497,19 @@ static void option_instat_callback(struct urb *urb);
/* MediaTek products */
#define MEDIATEK_VENDOR_ID 0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM 0x7101
+#define MEDIATEK_PRODUCT_7208_2COM 0x7102
+#define MEDIATEK_PRODUCT_FP_1COM 0x0003
+#define MEDIATEK_PRODUCT_FP_2COM 0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
+
+/* Cellient products */
+#define CELLIENT_VENDOR_ID 0x2692
+#define CELLIENT_PRODUCT_MEN200 0x9005
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
@@ -542,6 +563,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
.reserved = BIT(1),
};
+static const struct option_blacklist_info net_intf2_blacklist = {
+ .reserved = BIT(2),
+};
+
static const struct option_blacklist_info net_intf3_blacklist = {
.reserved = BIT(3),
};
@@ -666,6 +691,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -722,6 +752,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) },
+ /* Novatel Ovation MC551 a.k.a. Verizon USB551L */
+ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) },
{ USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) },
@@ -1080,6 +1112,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1209,6 +1243,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
+ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
{ USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
@@ -1216,6 +1255,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1245,7 +1296,7 @@ static struct usb_serial_driver option_1port_device = {
.ioctl = usb_wwan_ioctl,
.attach = usb_wwan_startup,
.disconnect = usb_wwan_disconnect,
- .release = usb_wwan_release,
+ .release = option_release,
.read_int_callback = option_instat_callback,
#ifdef CONFIG_PM
.suspend = usb_wwan_suspend,
@@ -1259,35 +1310,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
static bool debug;
-/* per port private data */
-
-#define N_IN_URB 4
-#define N_OUT_URB 4
-#define IN_BUFLEN 4096
-#define OUT_BUFLEN 4096
-
-struct option_port_private {
- /* Input endpoints and buffer for this port */
- struct urb *in_urbs[N_IN_URB];
- u8 *in_buffer[N_IN_URB];
- /* Output endpoints and buffer for this port */
- struct urb *out_urbs[N_OUT_URB];
- u8 *out_buffer[N_OUT_URB];
- unsigned long out_busy; /* Bit vector of URBs in use */
- int opened;
- struct usb_anchor delayed;
-
- /* Settings for the port */
- int rts_state; /* Handshaking pins (outputs) */
- int dtr_state;
- int cts_state; /* Handshaking pins (inputs) */
- int dsr_state;
- int dcd_state;
- int ri_state;
-
- unsigned long tx_start_time[N_OUT_URB];
-};
-
module_usb_serial_driver(serial_drivers, option_ids);
static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
@@ -1356,12 +1378,22 @@ static int option_probe(struct usb_serial *serial,
return 0;
}
+static void option_release(struct usb_serial *serial)
+{
+ struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
+
+ usb_wwan_release(serial);
+
+ kfree(priv);
+}
+
static void option_instat_callback(struct urb *urb)
{
int err;
int status = urb->status;
struct usb_serial_port *port = urb->context;
- struct option_port_private *portdata = usb_get_serial_port_data(port);
+ struct usb_wwan_port_private *portdata =
+ usb_get_serial_port_data(port);
dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
@@ -1421,7 +1453,7 @@ static int option_send_setup(struct usb_serial_port *port)
struct usb_serial *serial = port->serial;
struct usb_wwan_intf_private *intfdata =
(struct usb_wwan_intf_private *) serial->private;
- struct option_port_private *portdata;
+ struct usb_wwan_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 0d5fe59..996015c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
{USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
+ {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */
{USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+ {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
+ {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */
+ {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
{USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
{ } /* Terminating entry */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ba54a0a..d423d36 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ /* AT&T Direct IP LTE modems */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
+ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
+ },
{ USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6a1b609..27483f9 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -659,12 +659,14 @@ exit:
static struct usb_serial_driver *search_serial_device(
struct usb_interface *iface)
{
- const struct usb_device_id *id;
+ const struct usb_device_id *id = NULL;
struct usb_serial_driver *drv;
+ struct usb_driver *driver = to_usb_driver(iface->dev.driver);
/* Check if the usb id matches a known device */
list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
- id = get_iface_id(drv, iface);
+ if (drv->usb_driver == driver)
+ id = get_iface_id(drv, iface);
if (id)
return drv;
}
@@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface,
if (retval) {
dbg("sub driver rejected device");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return retval;
}
@@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface,
*/
if (num_bulk_in == 0 || num_bulk_out == 0) {
dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return -ENODEV;
}
@@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface,
if (num_ports == 0) {
dev_err(&interface->dev,
"Generic device with no bulk out, not allowed.\n");
- kfree(serial);
+ usb_serial_put(serial);
module_put(type->driver.owner);
return -EIO;
}
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index a324a5d..11418da 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -202,6 +202,12 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_NO_READ_CAPACITY_16)
sdev->no_read_capacity_16 = 1;
+ /*
+ * Many devices do not respond properly to READ_CAPACITY_16.
+ * Tell the SCSI layer to try READ_CAPACITY_10 first.
+ */
+ sdev->try_rc_10_first = 1;
+
/* assume SPC3 or latter devices support sense size > 18 */
if (sdev->scsi_level > SCSI_SPC_2)
us->fflags |= US_FL_SANE_SENSE;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f82a739..072cbba 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -823,14 +823,14 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
return vhost_net_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
- features = VHOST_FEATURES;
+ features = VHOST_NET_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
- if (features & ~VHOST_FEATURES)
+ if (features & ~VHOST_NET_FEATURES)
return -EOPNOTSUPP;
return vhost_net_set_features(n, features);
case VHOST_RESET_OWNER:
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 3de00d9..91d6f06 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -261,14 +261,14 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
return vhost_test_run(n, test);
case VHOST_GET_FEATURES:
- features = VHOST_FEATURES;
+ features = VHOST_NET_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
- if (features & ~VHOST_FEATURES)
+ if (features & ~VHOST_NET_FEATURES)
return -EOPNOTSUPP;
return vhost_test_set_features(n, features);
case VHOST_RESET_OWNER:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 94dbd25..ef82a0d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -64,7 +64,7 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
-static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
INIT_LIST_HEAD(&work->node);
work->fn = fn;
@@ -137,8 +137,7 @@ void vhost_poll_flush(struct vhost_poll *poll)
vhost_work_flush(poll->dev, &poll->work);
}
-static inline void vhost_work_queue(struct vhost_dev *dev,
- struct vhost_work *work)
+void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned long flags;
@@ -191,7 +190,9 @@ static int vhost_worker(void *data)
struct vhost_dev *dev = data;
struct vhost_work *work = NULL;
unsigned uninitialized_var(seq);
+ mm_segment_t oldfs = get_fs();
+ set_fs(USER_DS);
use_mm(dev->mm);
for (;;) {
@@ -229,6 +230,7 @@ static int vhost_worker(void *data)
}
unuse_mm(dev->mm);
+ set_fs(oldfs);
return 0;
}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8de1fd5..1125af3 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -43,6 +43,9 @@ struct vhost_poll {
struct vhost_dev *dev;
};
+void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
+void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
+
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev);
void vhost_poll_start(struct vhost_poll *poll, struct file *file);
@@ -201,7 +204,8 @@ enum {
VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) |
- (1ULL << VHOST_F_LOG_ALL) |
+ (1ULL << VHOST_F_LOG_ALL),
+ VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
(1ULL << VIRTIO_NET_F_MRG_RXBUF),
};
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index a290be5..0217f74 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2210,7 +2210,7 @@ config FB_XILINX
config FB_COBALT
tristate "Cobalt server LCD frame buffer support"
- depends on FB && MIPS_COBALT
+ depends on FB && (MIPS_COBALT || MIPS_SEAD3)
config FB_SH7760
bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2382,6 +2382,39 @@ config FB_BROADSHEET
and could also have been called by other names when coupled with
a bridge adapter.
+config FB_AUO_K190X
+ tristate "AUO-K190X EPD controller support"
+ depends on FB
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ select FB_DEFERRED_IO
+ help
+ Provides support for epaper controllers from the K190X series
+ of AUO. These controllers can be used to drive epaper displays
+ from Sipix.
+
+ This option enables the common support, shared by the individual
+ controller drivers. You will also have to enable the driver
+ for the controller type used in your device.
+
+config FB_AUO_K1900
+ tristate "AUO-K1900 EPD controller support"
+ depends on FB && FB_AUO_K190X
+ help
+ This driver implements support for the AUO K1900 epd-controller.
+ This controller can drive Sipix epaper displays but can only do
+ serial updates, reducing the number of possible frames per second.
+
+config FB_AUO_K1901
+ tristate "AUO-K1901 EPD controller support"
+ depends on FB && FB_AUO_K190X
+ help
+ This driver implements support for the AUO K1901 epd-controller.
+ This controller can drive Sipix epaper displays and supports
+ concurrent updates, making higher frames per second possible.
+
config FB_JZ4740
tristate "JZ4740 LCD framebuffer support"
depends on FB && MACH_JZ4740
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9356add..ee8dafb 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -118,6 +118,9 @@ obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
obj-$(CONFIG_FB_MAXINE) += maxinefb.o
obj-$(CONFIG_FB_METRONOME) += metronomefb.o
obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o
+obj-$(CONFIG_FB_AUO_K190X) += auo_k190x.o
+obj-$(CONFIG_FB_AUO_K1900) += auo_k1900fb.o
+obj-$(CONFIG_FB_AUO_K1901) += auo_k1901fb.o
obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
obj-$(CONFIG_FB_SH7760) += sh7760fb.o
obj-$(CONFIG_FB_IMX) += imxfb.o
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c
new file mode 100644
index 0000000..c36cf96
--- /dev/null
+++ b/drivers/video/auo_k1900fb.c
@@ -0,0 +1,198 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1900 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1900 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2 4 step gray (2bit) - FIXME: add strange refresh
+ * mode3 2 step gray (1bit) - FIXME: add strange refresh
+ * mode4 handwriting mode (strange behaviour)
+ * mode5 automatic selection of update mode
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1900 specific commands
+ */
+
+#define AUOK1900_CMD_PARTIALDISP 0x1001
+#define AUOK1900_CMD_ROTATION 0x1006
+#define AUOK1900_CMD_LUT_STOP 0x1009
+
+#define AUOK1900_INIT_TEMP_AVERAGE (1 << 13)
+#define AUOK1900_INIT_ROTATE(_x) ((_x & 0x3) << 10)
+#define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2)
+
+static void auok1900_init(struct auok190xfb_par *par)
+{
+ struct auok190x_board *board = par->board;
+ u16 init_param = 0;
+
+ init_param |= AUOK1900_INIT_TEMP_AVERAGE;
+ init_param |= AUOK1900_INIT_ROTATE(par->rotation);
+ init_param |= AUOK190X_INIT_INVERSE_WHITE;
+ init_param |= AUOK190X_INIT_FORMAT0;
+ init_param |= AUOK1900_INIT_RESOLUTION(par->resolution);
+ init_param |= AUOK190X_INIT_SHIFT_RIGHT;
+
+ auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+ /* let the controller finish */
+ board->wait_for_rdy(par);
+}
+
+static void auok1900_update_region(struct auok190xfb_par *par, int mode,
+ u16 y1, u16 y2)
+{
+ struct device *dev = par->info->device;
+ unsigned char *buf = (unsigned char *)par->info->screen_base;
+ int xres = par->info->var.xres;
+ u16 args[4];
+
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&(par->io_lock));
+
+ /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+ y1 &= 0xfffe;
+ y2 &= 0xfffe;
+
+ dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+ 1, y1+1, xres, y2-y1, mode);
+
+ /* to FIX handle different partial update modes */
+ args[0] = mode | 1;
+ args[1] = y1 + 1;
+ args[2] = xres;
+ args[3] = y2 - y1;
+ buf += y1 * xres;
+ auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args,
+ ((y2 - y1) * xres)/2, (u16 *) buf);
+ auok190x_send_command(par, AUOK190X_CMD_DATA_STOP);
+
+ par->update_cnt++;
+
+ mutex_unlock(&(par->io_lock));
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par,
+ u16 y1, u16 y2)
+{
+ int mode;
+
+ if (par->update_mode < 0) {
+ mode = AUOK190X_UPDATE_MODE(1);
+ par->last_mode = -1;
+ } else {
+ mode = AUOK190X_UPDATE_MODE(par->update_mode);
+ par->last_mode = par->update_mode;
+ }
+
+ if (par->flash)
+ mode |= AUOK190X_UPDATE_NONFLASH;
+
+ auok1900_update_region(par, mode, y1, y2);
+}
+
+static void auok1900fb_dpy_update(struct auok190xfb_par *par)
+{
+ int mode;
+
+ if (par->update_mode < 0) {
+ mode = AUOK190X_UPDATE_MODE(0);
+ par->last_mode = -1;
+ } else {
+ mode = AUOK190X_UPDATE_MODE(par->update_mode);
+ par->last_mode = par->update_mode;
+ }
+
+ if (par->flash)
+ mode |= AUOK190X_UPDATE_NONFLASH;
+
+ auok1900_update_region(par, mode, 0, par->info->var.yres);
+ par->update_cnt = 0;
+}
+
+static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
+{
+ return (par->update_cnt > 10);
+}
+
+static int __devinit auok1900fb_probe(struct platform_device *pdev)
+{
+ struct auok190x_init_data init;
+ struct auok190x_board *board;
+
+ /* pick up board specific routines */
+ board = pdev->dev.platform_data;
+ if (!board)
+ return -EINVAL;
+
+ /* fill temporary init struct for common init */
+ init.id = "auo_k1900fb";
+ init.board = board;
+ init.update_partial = auok1900fb_dpy_update_pages;
+ init.update_all = auok1900fb_dpy_update;
+ init.need_refresh = auok1900fb_need_refresh;
+ init.init = auok1900_init;
+
+ return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1900fb_remove(struct platform_device *pdev)
+{
+ return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1900fb_driver = {
+ .probe = auok1900fb_probe,
+ .remove = __devexit_p(auok1900fb_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "auo_k1900fb",
+ .pm = &auok190x_pm,
+ },
+};
+module_platform_driver(auok1900fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c
new file mode 100644
index 0000000..1c054c1
--- /dev/null
+++ b/drivers/video/auo_k1901fb.c
@@ -0,0 +1,251 @@
+/*
+ * auok190xfb.c -- FB driver for AUO-K1901 controllers
+ *
+ * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on broadsheetfb.c
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the AUO-K1901 display controller.
+ *
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions.
+ *
+ * The controller supports different update modes:
+ * mode0+1 16 step gray (4bit)
+ * mode2+3 4 step gray (2bit)
+ * mode4+5 2 step gray (1bit)
+ * - mode4 is described as "without LUT"
+ * mode7 automatic selection of update mode
+ *
+ * The most interesting difference to the K1900 is the ability to do screen
+ * updates in an asynchronous fashion. Where the K1900 needs to wait for the
+ * current update to complete, the K1901 can process later updates already.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+/*
+ * AUO-K1901 specific commands
+ */
+
+#define AUOK1901_CMD_LUT_INTERFACE 0x0005
+#define AUOK1901_CMD_DMA_START 0x1001
+#define AUOK1901_CMD_CURSOR_START 0x1007
+#define AUOK1901_CMD_CURSOR_STOP AUOK190X_CMD_DATA_STOP
+#define AUOK1901_CMD_DDMA_START 0x1009
+
+#define AUOK1901_INIT_GATE_PULSE_LOW (0 << 14)
+#define AUOK1901_INIT_GATE_PULSE_HIGH (1 << 14)
+#define AUOK1901_INIT_SINGLE_GATE (0 << 13)
+#define AUOK1901_INIT_DOUBLE_GATE (1 << 13)
+
+/* Bits to pixels
+ * Mode 15-12 11-8 7-4 3-0
+ * format2 2 T 1 T
+ * format3 1 T 2 T
+ * format4 T 2 T 1
+ * format5 T 1 T 2
+ *
+ * halftone modes:
+ * format6 2 2 1 1
+ * format7 1 1 2 2
+ */
+#define AUOK1901_INIT_FORMAT2 (1 << 7)
+#define AUOK1901_INIT_FORMAT3 ((1 << 7) | (1 << 6))
+#define AUOK1901_INIT_FORMAT4 (1 << 8)
+#define AUOK1901_INIT_FORMAT5 ((1 << 8) | (1 << 6))
+#define AUOK1901_INIT_FORMAT6 ((1 << 8) | (1 << 7))
+#define AUOK1901_INIT_FORMAT7 ((1 << 8) | (1 << 7) | (1 << 6))
+
+/* res[4] to bit 10
+ * res[3-0] to bits 5-2
+ */
+#define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \
+ | ((_res & 0xf) << 2))
+
+/*
+ * portrait / landscape orientation in AUOK1901_CMD_DMA_START
+ */
+#define AUOK1901_DMA_ROTATE90(_rot) ((_rot & 1) << 13)
+
+/*
+ * equivalent to 1 << 11, needs the ~ to have same rotation like K1900
+ */
+#define AUOK1901_DDMA_ROTATE180(_rot) ((~_rot & 2) << 10)
+
+static void auok1901_init(struct auok190xfb_par *par)
+{
+ struct auok190x_board *board = par->board;
+ u16 init_param = 0;
+
+ init_param |= AUOK190X_INIT_INVERSE_WHITE;
+ init_param |= AUOK190X_INIT_FORMAT0;
+ init_param |= AUOK1901_INIT_RESOLUTION(par->resolution);
+ init_param |= AUOK190X_INIT_SHIFT_LEFT;
+
+ auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
+
+ /* let the controller finish */
+ board->wait_for_rdy(par);
+}
+
+static void auok1901_update_region(struct auok190xfb_par *par, int mode,
+ u16 y1, u16 y2)
+{
+ struct device *dev = par->info->device;
+ unsigned char *buf = (unsigned char *)par->info->screen_base;
+ int xres = par->info->var.xres;
+ u16 args[5];
+
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&(par->io_lock));
+
+ /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
+ y1 &= 0xfffe;
+ y2 &= 0xfffe;
+
+ dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
+ 1, y1+1, xres, y2-y1, mode);
+
+ /* K1901: first transfer the region data */
+ args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1;
+ args[1] = y1 + 1;
+ args[2] = xres;
+ args[3] = y2 - y1;
+ buf += y1 * xres;
+ auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4,
+ args, ((y2 - y1) * xres)/2,
+ (u16 *) buf);
+ auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP);
+
+ /* K1901: second tell the controller to update the region with mode */
+ args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation);
+ args[1] = 1;
+ args[2] = y1 + 1;
+ args[3] = xres;
+ args[4] = y2 - y1;
+ auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args);
+
+ par->update_cnt++;
+
+ mutex_unlock(&(par->io_lock));
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par,
+ u16 y1, u16 y2)
+{
+ int mode;
+
+ if (par->update_mode < 0) {
+ mode = AUOK190X_UPDATE_MODE(1);
+ par->last_mode = -1;
+ } else {
+ mode = AUOK190X_UPDATE_MODE(par->update_mode);
+ par->last_mode = par->update_mode;
+ }
+
+ if (par->flash)
+ mode |= AUOK190X_UPDATE_NONFLASH;
+
+ auok1901_update_region(par, mode, y1, y2);
+}
+
+static void auok1901fb_dpy_update(struct auok190xfb_par *par)
+{
+ int mode;
+
+ /* When doing full updates, wait for the controller to be ready
+ * This will hopefully catch some hangs of the K1901
+ */
+ par->board->wait_for_rdy(par);
+
+ if (par->update_mode < 0) {
+ mode = AUOK190X_UPDATE_MODE(0);
+ par->last_mode = -1;
+ } else {
+ mode = AUOK190X_UPDATE_MODE(par->update_mode);
+ par->last_mode = par->update_mode;
+ }
+
+ if (par->flash)
+ mode |= AUOK190X_UPDATE_NONFLASH;
+
+ auok1901_update_region(par, mode, 0, par->info->var.yres);
+ par->update_cnt = 0;
+}
+
+static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
+{
+ return (par->update_cnt > 10);
+}
+
+static int __devinit auok1901fb_probe(struct platform_device *pdev)
+{
+ struct auok190x_init_data init;
+ struct auok190x_board *board;
+
+ /* pick up board specific routines */
+ board = pdev->dev.platform_data;
+ if (!board)
+ return -EINVAL;
+
+ /* fill temporary init struct for common init */
+ init.id = "auo_k1901fb";
+ init.board = board;
+ init.update_partial = auok1901fb_dpy_update_pages;
+ init.update_all = auok1901fb_dpy_update;
+ init.need_refresh = auok1901fb_need_refresh;
+ init.init = auok1901_init;
+
+ return auok190x_common_probe(pdev, &init);
+}
+
+static int __devexit auok1901fb_remove(struct platform_device *pdev)
+{
+ return auok190x_common_remove(pdev);
+}
+
+static struct platform_driver auok1901fb_driver = {
+ .probe = auok1901fb_probe,
+ .remove = __devexit_p(auok1901fb_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "auo_k1901fb",
+ .pm = &auok190x_pm,
+ },
+};
+module_platform_driver(auok1901fb_driver);
+
+MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
new file mode 100644
index 0000000..77da6a2
--- /dev/null
+++ b/drivers/video/auo_k190x.c
@@ -0,0 +1,1046 @@
+/*
+ * Common code for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/fb.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/auo_k190xfb.h>
+
+#include "auo_k190x.h"
+
+struct panel_info {
+ int w;
+ int h;
+};
+
+/* table of panel specific parameters to be indexed into by the board drivers */
+static struct panel_info panel_table[] = {
+ /* standard 6" */
+ [AUOK190X_RESOLUTION_800_600] = {
+ .w = 800,
+ .h = 600,
+ },
+ /* standard 9" */
+ [AUOK190X_RESOLUTION_1024_768] = {
+ .w = 1024,
+ .h = 768,
+ },
+};
+
+/*
+ * private I80 interface to the board driver
+ */
+
+static void auok190x_issue_data(struct auok190xfb_par *par, u16 data)
+{
+ par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+ par->board->set_hdb(par, data);
+ par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+}
+
+static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data)
+{
+ par->board->set_ctl(par, AUOK190X_I80_DC, 0);
+ auok190x_issue_data(par, data);
+ par->board->set_ctl(par, AUOK190X_I80_DC, 1);
+}
+
+static int auok190x_issue_pixels(struct auok190xfb_par *par, int size,
+ u16 *data)
+{
+ struct device *dev = par->info->device;
+ int i;
+ u16 tmp;
+
+ if (size & 3) {
+ dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n",
+ size);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < (size >> 1); i++) {
+ par->board->set_ctl(par, AUOK190X_I80_WR, 0);
+
+ /* simple reduction of 8bit staticgray to 4bit gray
+ * combines 4 * 4bit pixel values into a 16bit value
+ */
+ tmp = (data[2*i] & 0xF0) >> 4;
+ tmp |= (data[2*i] & 0xF000) >> 8;
+ tmp |= (data[2*i+1] & 0xF0) << 4;
+ tmp |= (data[2*i+1] & 0xF000);
+
+ par->board->set_hdb(par, tmp);
+ par->board->set_ctl(par, AUOK190X_I80_WR, 1);
+ }
+
+ return 0;
+}
+
+static u16 auok190x_read_data(struct auok190xfb_par *par)
+{
+ u16 data;
+
+ par->board->set_ctl(par, AUOK190X_I80_OE, 0);
+ data = par->board->get_hdb(par);
+ par->board->set_ctl(par, AUOK190X_I80_OE, 1);
+
+ return data;
+}
+
+/*
+ * Command interface for the controller drivers
+ */
+
+void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data)
+{
+ par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+ auok190x_issue_cmd(par, data);
+ par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command_nowait);
+
+void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv)
+{
+ int i;
+
+ par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+ auok190x_issue_cmd(par, cmd);
+
+ for (i = 0; i < argc; i++)
+ auok190x_issue_data(par, argv[i]);
+ par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait);
+
+int auok190x_send_command(struct auok190xfb_par *par, u16 data)
+{
+ int ret;
+
+ ret = par->board->wait_for_rdy(par);
+ if (ret)
+ return ret;
+
+ auok190x_send_command_nowait(par, data);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_command);
+
+int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv)
+{
+ int ret;
+
+ ret = par->board->wait_for_rdy(par);
+ if (ret)
+ return ret;
+
+ auok190x_send_cmdargs_nowait(par, cmd, argc, argv);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs);
+
+int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv)
+{
+ int i, ret;
+
+ ret = par->board->wait_for_rdy(par);
+ if (ret)
+ return ret;
+
+ par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+ auok190x_issue_cmd(par, cmd);
+
+ for (i = 0; i < argc; i++)
+ argv[i] = auok190x_read_data(par);
+ par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_read_cmdargs);
+
+void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv, int size, u16 *data)
+{
+ int i;
+
+ par->board->set_ctl(par, AUOK190X_I80_CS, 0);
+
+ auok190x_issue_cmd(par, cmd);
+
+ for (i = 0; i < argc; i++)
+ auok190x_issue_data(par, argv[i]);
+
+ auok190x_issue_pixels(par, size, data);
+
+ par->board->set_ctl(par, AUOK190X_I80_CS, 1);
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait);
+
+int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv, int size, u16 *data)
+{
+ int ret;
+
+ ret = par->board->wait_for_rdy(par);
+ if (ret)
+ return ret;
+
+ auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels);
+
+/*
+ * fbdefio callbacks - common on both controllers.
+ */
+
+static void auok190xfb_dpy_first_io(struct fb_info *info)
+{
+ /* tell runtime-pm that we wish to use the device in a short time */
+ pm_runtime_get(info->device);
+}
+
+/* this is called back from the deferred io workqueue */
+static void auok190xfb_dpy_deferred_io(struct fb_info *info,
+ struct list_head *pagelist)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct auok190xfb_par *par = info->par;
+ u16 yres = info->var.yres;
+ u16 xres = info->var.xres;
+ u16 y1 = 0, h = 0;
+ int prev_index = -1;
+ struct page *cur;
+ int h_inc;
+ int threshold;
+
+ if (!list_empty(pagelist))
+ /* the device resume should've been requested through first_io,
+ * if the resume did not finish until now, wait for it.
+ */
+ pm_runtime_barrier(info->device);
+ else
+ /* We reached this via the fsync or some other way.
+ * In either case the first_io function did not run,
+ * so we runtime_resume the device here synchronously.
+ */
+ pm_runtime_get_sync(info->device);
+
+ /* Do a full screen update every n updates to prevent
+ * excessive darkening of the Sipix display.
+ * If we do this, there is no need to walk the pages.
+ */
+ if (par->need_refresh(par)) {
+ par->update_all(par);
+ goto out;
+ }
+
+ /* height increment is fixed per page */
+ h_inc = DIV_ROUND_UP(PAGE_SIZE , xres);
+
+ /* calculate number of pages from pixel height */
+ threshold = par->consecutive_threshold / h_inc;
+ if (threshold < 1)
+ threshold = 1;
+
+ /* walk the written page list and swizzle the data */
+ list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+ if (prev_index < 0) {
+ /* just starting so assign first page */
+ y1 = (cur->index << PAGE_SHIFT) / xres;
+ h = h_inc;
+ } else if ((cur->index - prev_index) <= threshold) {
+ /* page is within our threshold for single updates */
+ h += h_inc * (cur->index - prev_index);
+ } else {
+ /* page not consecutive, issue previous update first */
+ par->update_partial(par, y1, y1 + h);
+
+ /* start over with our non consecutive page */
+ y1 = (cur->index << PAGE_SHIFT) / xres;
+ h = h_inc;
+ }
+ prev_index = cur->index;
+ }
+
+ /* if we still have any pages to update we do so now */
+ if (h >= yres)
+ /* its a full screen update, just do it */
+ par->update_all(par);
+ else
+ par->update_partial(par, y1, min((u16) (y1 + h), yres));
+
+out:
+ pm_runtime_mark_last_busy(info->device);
+ pm_runtime_put_autosuspend(info->device);
+}
+
+/*
+ * framebuffer operations
+ */
+
+/*
+ * this is the slow path from userspace. they can seek and write to
+ * the fb. it's inefficient to do anything less than a full screen draw
+ */
+static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct auok190xfb_par *par = info->par;
+ unsigned long p = *ppos;
+ void *dst;
+ int err = 0;
+ unsigned long total_size;
+
+ if (info->state != FBINFO_STATE_RUNNING)
+ return -EPERM;
+
+ total_size = info->fix.smem_len;
+
+ if (p > total_size)
+ return -EFBIG;
+
+ if (count > total_size) {
+ err = -EFBIG;
+ count = total_size;
+ }
+
+ if (count + p > total_size) {
+ if (!err)
+ err = -ENOSPC;
+
+ count = total_size - p;
+ }
+
+ dst = (void *)(info->screen_base + p);
+
+ if (copy_from_user(dst, buf, count))
+ err = -EFAULT;
+
+ if (!err)
+ *ppos += count;
+
+ par->update_all(par);
+
+ return (err) ? err : count;
+}
+
+static void auok190xfb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
+{
+ struct auok190xfb_par *par = info->par;
+
+ sys_fillrect(info, rect);
+
+ par->update_all(par);
+}
+
+static void auok190xfb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct auok190xfb_par *par = info->par;
+
+ sys_copyarea(info, area);
+
+ par->update_all(par);
+}
+
+static void auok190xfb_imageblit(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct auok190xfb_par *par = info->par;
+
+ sys_imageblit(info, image);
+
+ par->update_all(par);
+}
+
+static int auok190xfb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if (info->var.xres != var->xres || info->var.yres != var->yres ||
+ info->var.xres_virtual != var->xres_virtual ||
+ info->var.yres_virtual != var->yres_virtual) {
+ pr_info("%s: Resolution not supported: X%u x Y%u\n",
+ __func__, var->xres, var->yres);
+ return -EINVAL;
+ }
+
+ /*
+ * Memory limit
+ */
+
+ if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
+ pr_info("%s: Memory Limit requested yres_virtual = %u\n",
+ __func__, var->yres_virtual);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct fb_ops auok190xfb_ops = {
+ .owner = THIS_MODULE,
+ .fb_read = fb_sys_read,
+ .fb_write = auok190xfb_write,
+ .fb_fillrect = auok190xfb_fillrect,
+ .fb_copyarea = auok190xfb_copyarea,
+ .fb_imageblit = auok190xfb_imageblit,
+ .fb_check_var = auok190xfb_check_var,
+};
+
+/*
+ * Controller-functions common to both K1900 and K1901
+ */
+
+static int auok190x_read_temperature(struct auok190xfb_par *par)
+{
+ struct device *dev = par->info->device;
+ u16 data[4];
+ int temp;
+
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&(par->io_lock));
+
+ auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+ mutex_unlock(&(par->io_lock));
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ /* sanitize and split of half-degrees for now */
+ temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1);
+
+ /* handle positive and negative temperatures */
+ if (temp >= 201)
+ return (255 - temp + 1) * (-1);
+ else
+ return temp;
+}
+
+static void auok190x_identify(struct auok190xfb_par *par)
+{
+ struct device *dev = par->info->device;
+ u16 data[4];
+
+ pm_runtime_get_sync(dev);
+
+ mutex_lock(&(par->io_lock));
+
+ auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
+
+ mutex_unlock(&(par->io_lock));
+
+ par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK;
+
+ par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]);
+ par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]);
+ par->panel_model = AUOK190X_VERSION_MODEL(data[2]);
+
+ par->tcon_version = AUOK190X_VERSION_TCON(data[3]);
+ par->lut_version = AUOK190X_VERSION_LUT(data[3]);
+
+ dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x",
+ par->panel_size_int, par->panel_size_float, par->panel_model,
+ par->epd_type, par->tcon_version, par->lut_version);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+/*
+ * Sysfs functions
+ */
+
+static ssize_t update_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+
+ return sprintf(buf, "%d\n", par->update_mode);
+}
+
+static ssize_t update_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+ int mode, ret;
+
+ ret = kstrtoint(buf, 10, &mode);
+ if (ret)
+ return ret;
+
+ par->update_mode = mode;
+
+ /* if we enter a better mode, do a full update */
+ if (par->last_mode > 1 && mode < par->last_mode)
+ par->update_all(par);
+
+ return count;
+}
+
+static ssize_t flash_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+
+ return sprintf(buf, "%d\n", par->flash);
+}
+
+static ssize_t flash_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+ int flash, ret;
+
+ ret = kstrtoint(buf, 10, &flash);
+ if (ret)
+ return ret;
+
+ if (flash > 0)
+ par->flash = 1;
+ else
+ par->flash = 0;
+
+ return count;
+}
+
+static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct fb_info *info = dev_get_drvdata(dev);
+ struct auok190xfb_par *par = info->par;
+ int temp;
+
+ temp = auok190x_read_temperature(par);
+ return sprintf(buf, "%d\n", temp);
+}
+
+static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store);
+static DEVICE_ATTR(flash, 0644, flash_show, flash_store);
+static DEVICE_ATTR(temp, 0644, temp_show, NULL);
+
+static struct attribute *auok190x_attributes[] = {
+ &dev_attr_update_mode.attr,
+ &dev_attr_flash.attr,
+ &dev_attr_temp.attr,
+ NULL
+};
+
+static const struct attribute_group auok190x_attr_group = {
+ .attrs = auok190x_attributes,
+};
+
+static int auok190x_power(struct auok190xfb_par *par, bool on)
+{
+ struct auok190x_board *board = par->board;
+ int ret;
+
+ if (on) {
+ /* We should maintain POWER up for at least 80ms before set
+ * RST_N and SLP_N to high (TCON spec 20100803_v35 p59)
+ */
+ ret = regulator_enable(par->regulator);
+ if (ret)
+ return ret;
+
+ msleep(200);
+ gpio_set_value(board->gpio_nrst, 1);
+ gpio_set_value(board->gpio_nsleep, 1);
+ msleep(200);
+ } else {
+ regulator_disable(par->regulator);
+ gpio_set_value(board->gpio_nrst, 0);
+ gpio_set_value(board->gpio_nsleep, 0);
+ }
+
+ return 0;
+}
+
+/*
+ * Recovery - powercycle the controller
+ */
+
+static void auok190x_recover(struct auok190xfb_par *par)
+{
+ auok190x_power(par, 0);
+ msleep(100);
+ auok190x_power(par, 1);
+
+ par->init(par);
+
+ /* wait for init to complete */
+ par->board->wait_for_rdy(par);
+}
+
+/*
+ * Power-management
+ */
+
+#ifdef CONFIG_PM
+static int auok190x_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+ u16 standby_param;
+
+ /* take and keep the lock until we are resumed, as the controller
+ * will never reach the non-busy state when in standby mode
+ */
+ mutex_lock(&(par->io_lock));
+
+ if (par->standby) {
+ dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n");
+ mutex_unlock(&(par->io_lock));
+ return 0;
+ }
+
+ /* according to runtime_pm.txt runtime_suspend only means, that the
+ * device will not process data and will not communicate with the CPU
+ * As we hold the lock, this stays true even without standby
+ */
+ if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+ dev_dbg(dev, "runtime suspend without standby\n");
+ goto finish;
+ } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) {
+ /* for some TCON versions STANDBY expects a parameter (0) but
+ * it seems the real tcon version has to be determined yet.
+ */
+ dev_dbg(dev, "runtime suspend with additional empty param\n");
+ standby_param = 0;
+ auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1,
+ &standby_param);
+ } else {
+ dev_dbg(dev, "runtime suspend without param\n");
+ auok190x_send_command(par, AUOK190X_CMD_STANDBY);
+ }
+
+ msleep(64);
+
+finish:
+ par->standby = 1;
+
+ return 0;
+}
+
+static int auok190x_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+
+ if (!par->standby) {
+ dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n");
+ return 0;
+ }
+
+ if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+ dev_dbg(dev, "runtime resume without standby\n");
+ } else {
+ /* when in standby, controller is always busy
+ * and only accepts the wakeup command
+ */
+ dev_dbg(dev, "runtime resume from standby\n");
+ auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP);
+
+ msleep(160);
+
+ /* wait for the controller to be ready and release the lock */
+ board->wait_for_rdy(par);
+ }
+
+ par->standby = 0;
+
+ mutex_unlock(&(par->io_lock));
+
+ return 0;
+}
+
+static int auok190x_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+ int ret;
+
+ dev_dbg(dev, "suspend\n");
+ if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+ /* suspend via powering off the ic */
+ dev_dbg(dev, "suspend with broken standby\n");
+
+ auok190x_power(par, 0);
+ } else {
+ dev_dbg(dev, "suspend using sleep\n");
+
+ /* the sleep state can only be entered from the standby state.
+ * pm_runtime_get_noresume gets called before the suspend call.
+ * So the devices usage count is >0 but it is not necessarily
+ * active.
+ */
+ if (!pm_runtime_status_suspended(dev)) {
+ ret = auok190x_runtime_suspend(dev);
+ if (ret < 0) {
+ dev_err(dev, "auok190x_runtime_suspend failed with %d\n",
+ ret);
+ return ret;
+ }
+ par->manual_standby = 1;
+ }
+
+ gpio_direction_output(board->gpio_nsleep, 0);
+ }
+
+ msleep(100);
+
+ return 0;
+}
+
+static int auok190x_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+
+ dev_dbg(dev, "resume\n");
+ if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
+ dev_dbg(dev, "resume with broken standby\n");
+
+ auok190x_power(par, 1);
+
+ par->init(par);
+ } else {
+ dev_dbg(dev, "resume from sleep\n");
+
+ /* device should be in runtime suspend when we were suspended
+ * and pm_runtime_put_sync gets called after this function.
+ * So there is no need to touch the standby mode here at all.
+ */
+ gpio_direction_output(board->gpio_nsleep, 1);
+ msleep(100);
+
+ /* an additional init call seems to be necessary after sleep */
+ auok190x_runtime_resume(dev);
+ par->init(par);
+
+ /* if we were runtime-suspended before, suspend again*/
+ if (!par->manual_standby)
+ auok190x_runtime_suspend(dev);
+ else
+ par->manual_standby = 0;
+ }
+
+ return 0;
+}
+#endif
+
+const struct dev_pm_ops auok190x_pm = {
+ SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume)
+};
+EXPORT_SYMBOL_GPL(auok190x_pm);
+
+/*
+ * Common probe and remove code
+ */
+
+int __devinit auok190x_common_probe(struct platform_device *pdev,
+ struct auok190x_init_data *init)
+{
+ struct auok190x_board *board = init->board;
+ struct auok190xfb_par *par;
+ struct fb_info *info;
+ struct panel_info *panel;
+ int videomemorysize, ret;
+ unsigned char *videomemory;
+
+ /* check board contents */
+ if (!board->init || !board->cleanup || !board->wait_for_rdy
+ || !board->set_ctl || !board->set_hdb || !board->get_hdb
+ || !board->setup_irq)
+ return -EINVAL;
+
+ info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev);
+ if (!info)
+ return -ENOMEM;
+
+ par = info->par;
+ par->info = info;
+ par->board = board;
+ par->recover = auok190x_recover;
+ par->update_partial = init->update_partial;
+ par->update_all = init->update_all;
+ par->need_refresh = init->need_refresh;
+ par->init = init->init;
+
+ /* init update modes */
+ par->update_cnt = 0;
+ par->update_mode = -1;
+ par->last_mode = -1;
+ par->flash = 0;
+
+ par->regulator = regulator_get(info->device, "vdd");
+ if (IS_ERR(par->regulator)) {
+ ret = PTR_ERR(par->regulator);
+ dev_err(info->device, "Failed to get regulator: %d\n", ret);
+ goto err_reg;
+ }
+
+ ret = board->init(par);
+ if (ret) {
+ dev_err(info->device, "board init failed, %d\n", ret);
+ goto err_board;
+ }
+
+ ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep");
+ if (ret) {
+ dev_err(info->device, "could not request sleep gpio, %d\n",
+ ret);
+ goto err_gpio1;
+ }
+
+ ret = gpio_direction_output(board->gpio_nsleep, 0);
+ if (ret) {
+ dev_err(info->device, "could not set sleep gpio, %d\n", ret);
+ goto err_gpio2;
+ }
+
+ ret = gpio_request(board->gpio_nrst, "AUOK190x reset");
+ if (ret) {
+ dev_err(info->device, "could not request reset gpio, %d\n",
+ ret);
+ goto err_gpio2;
+ }
+
+ ret = gpio_direction_output(board->gpio_nrst, 0);
+ if (ret) {
+ dev_err(info->device, "could not set reset gpio, %d\n", ret);
+ goto err_gpio3;
+ }
+
+ ret = auok190x_power(par, 1);
+ if (ret) {
+ dev_err(info->device, "could not power on the device, %d\n",
+ ret);
+ goto err_gpio3;
+ }
+
+ mutex_init(&par->io_lock);
+
+ init_waitqueue_head(&par->waitq);
+
+ ret = par->board->setup_irq(par->info);
+ if (ret) {
+ dev_err(info->device, "could not setup ready-irq, %d\n", ret);
+ goto err_irq;
+ }
+
+ /* wait for init to complete */
+ par->board->wait_for_rdy(par);
+
+ /*
+ * From here on the controller can talk to us
+ */
+
+ /* initialise fix, var, resolution and rotation */
+
+ strlcpy(info->fix.id, init->id, 16);
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
+ info->fix.xpanstep = 0;
+ info->fix.ypanstep = 0;
+ info->fix.ywrapstep = 0;
+ info->fix.accel = FB_ACCEL_NONE;
+
+ info->var.bits_per_pixel = 8;
+ info->var.grayscale = 1;
+ info->var.red.length = 8;
+ info->var.green.length = 8;
+ info->var.blue.length = 8;
+
+ panel = &panel_table[board->resolution];
+
+ /* if 90 degree rotation, switch width and height */
+ if (board->rotation & 1) {
+ info->var.xres = panel->h;
+ info->var.yres = panel->w;
+ info->var.xres_virtual = panel->h;
+ info->var.yres_virtual = panel->w;
+ info->fix.line_length = panel->h;
+ } else {
+ info->var.xres = panel->w;
+ info->var.yres = panel->h;
+ info->var.xres_virtual = panel->w;
+ info->var.yres_virtual = panel->h;
+ info->fix.line_length = panel->w;
+ }
+
+ par->resolution = board->resolution;
+ par->rotation = board->rotation;
+
+ /* videomemory handling */
+
+ videomemorysize = roundup((panel->w * panel->h), PAGE_SIZE);
+ videomemory = vmalloc(videomemorysize);
+ if (!videomemory) {
+ ret = -ENOMEM;
+ goto err_irq;
+ }
+
+ memset(videomemory, 0, videomemorysize);
+ info->screen_base = (char *)videomemory;
+ info->fix.smem_len = videomemorysize;
+
+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
+ info->fbops = &auok190xfb_ops;
+
+ /* deferred io init */
+
+ info->fbdefio = devm_kzalloc(info->device,
+ sizeof(struct fb_deferred_io),
+ GFP_KERNEL);
+ if (!info->fbdefio) {
+ dev_err(info->device, "Failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err_defio;
+ }
+
+ dev_dbg(info->device, "targetting %d frames per second\n", board->fps);
+ info->fbdefio->delay = HZ / board->fps;
+ info->fbdefio->first_io = auok190xfb_dpy_first_io,
+ info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io,
+ fb_deferred_io_init(info);
+
+ /* color map */
+
+ ret = fb_alloc_cmap(&info->cmap, 256, 0);
+ if (ret < 0) {
+ dev_err(info->device, "Failed to allocate colormap\n");
+ goto err_cmap;
+ }
+
+ /* controller init */
+
+ par->consecutive_threshold = 100;
+ par->init(par);
+ auok190x_identify(par);
+
+ platform_set_drvdata(pdev, info);
+
+ ret = register_framebuffer(info);
+ if (ret < 0)
+ goto err_regfb;
+
+ ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group);
+ if (ret)
+ goto err_sysfs;
+
+ dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n",
+ info->node, info->var.xres, info->var.yres,
+ videomemorysize >> 10);
+
+ /* increase autosuspend_delay when we use alternative methods
+ * for runtime_pm
+ */
+ par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN)
+ ? 1000 : 200;
+
+ pm_runtime_set_active(info->device);
+ pm_runtime_enable(info->device);
+ pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay);
+ pm_runtime_use_autosuspend(info->device);
+
+ return 0;
+
+err_sysfs:
+ unregister_framebuffer(info);
+err_regfb:
+ fb_dealloc_cmap(&info->cmap);
+err_cmap:
+ fb_deferred_io_cleanup(info);
+ kfree(info->fbdefio);
+err_defio:
+ vfree((void *)info->screen_base);
+err_irq:
+ auok190x_power(par, 0);
+err_gpio3:
+ gpio_free(board->gpio_nrst);
+err_gpio2:
+ gpio_free(board->gpio_nsleep);
+err_gpio1:
+ board->cleanup(par);
+err_board:
+ regulator_put(par->regulator);
+err_reg:
+ framebuffer_release(info);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_probe);
+
+int __devexit auok190x_common_remove(struct platform_device *pdev)
+{
+ struct fb_info *info = platform_get_drvdata(pdev);
+ struct auok190xfb_par *par = info->par;
+ struct auok190x_board *board = par->board;
+
+ pm_runtime_disable(info->device);
+
+ sysfs_remove_group(&info->device->kobj, &auok190x_attr_group);
+
+ unregister_framebuffer(info);
+
+ fb_dealloc_cmap(&info->cmap);
+
+ fb_deferred_io_cleanup(info);
+ kfree(info->fbdefio);
+
+ vfree((void *)info->screen_base);
+
+ auok190x_power(par, 0);
+
+ gpio_free(board->gpio_nrst);
+ gpio_free(board->gpio_nsleep);
+
+ board->cleanup(par);
+
+ regulator_put(par->regulator);
+
+ framebuffer_release(info);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(auok190x_common_remove);
+
+MODULE_DESCRIPTION("Common code for AUO-K190X controllers");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.h b/drivers/video/auo_k190x.h
new file mode 100644
index 0000000..e35af1f
--- /dev/null
+++ b/drivers/video/auo_k190x.h
@@ -0,0 +1,129 @@
+/*
+ * Private common definitions for AUO-K190X framebuffer drivers
+ *
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * I80 interface specific defines
+ */
+
+#define AUOK190X_I80_CS 0x01
+#define AUOK190X_I80_DC 0x02
+#define AUOK190X_I80_WR 0x03
+#define AUOK190X_I80_OE 0x04
+
+/*
+ * AUOK190x commands, common to both controllers
+ */
+
+#define AUOK190X_CMD_INIT 0x0000
+#define AUOK190X_CMD_STANDBY 0x0001
+#define AUOK190X_CMD_WAKEUP 0x0002
+#define AUOK190X_CMD_TCON_RESET 0x0003
+#define AUOK190X_CMD_DATA_STOP 0x1002
+#define AUOK190X_CMD_LUT_START 0x1003
+#define AUOK190X_CMD_DISP_REFRESH 0x1004
+#define AUOK190X_CMD_DISP_RESET 0x1005
+#define AUOK190X_CMD_PRE_DISPLAY_START 0x100D
+#define AUOK190X_CMD_PRE_DISPLAY_STOP 0x100F
+#define AUOK190X_CMD_FLASH_W 0x2000
+#define AUOK190X_CMD_FLASH_E 0x2001
+#define AUOK190X_CMD_FLASH_STS 0x2002
+#define AUOK190X_CMD_FRAMERATE 0x3000
+#define AUOK190X_CMD_READ_VERSION 0x4000
+#define AUOK190X_CMD_READ_STATUS 0x4001
+#define AUOK190X_CMD_READ_LUT 0x4003
+#define AUOK190X_CMD_DRIVERTIMING 0x5000
+#define AUOK190X_CMD_LBALANCE 0x5001
+#define AUOK190X_CMD_AGINGMODE 0x6000
+#define AUOK190X_CMD_AGINGEXIT 0x6001
+
+/*
+ * Common settings for AUOK190X_CMD_INIT
+ */
+
+#define AUOK190X_INIT_DATA_FILTER (0 << 12)
+#define AUOK190X_INIT_DATA_BYPASS (1 << 12)
+#define AUOK190X_INIT_INVERSE_WHITE (0 << 9)
+#define AUOK190X_INIT_INVERSE_BLACK (1 << 9)
+#define AUOK190X_INIT_SCAN_DOWN (0 << 1)
+#define AUOK190X_INIT_SCAN_UP (1 << 1)
+#define AUOK190X_INIT_SHIFT_LEFT (0 << 0)
+#define AUOK190X_INIT_SHIFT_RIGHT (1 << 0)
+
+/* Common bits to pixels
+ * Mode 15-12 11-8 7-4 3-0
+ * format0 4 3 2 1
+ * format1 3 4 1 2
+ */
+
+#define AUOK190X_INIT_FORMAT0 0
+#define AUOK190X_INIT_FORMAT1 (1 << 6)
+
+/*
+ * settings for AUOK190X_CMD_RESET
+ */
+
+#define AUOK190X_RESET_TCON (0 << 0)
+#define AUOK190X_RESET_NORMAL (1 << 0)
+#define AUOK190X_RESET_PON (1 << 1)
+
+/*
+ * AUOK190X_CMD_VERSION
+ */
+
+#define AUOK190X_VERSION_TEMP_MASK (0x1ff)
+#define AUOK190X_VERSION_EPD_MASK (0xff)
+#define AUOK190X_VERSION_SIZE_INT(_val) ((_val & 0xfc00) >> 10)
+#define AUOK190X_VERSION_SIZE_FLOAT(_val) ((_val & 0x3c0) >> 6)
+#define AUOK190X_VERSION_MODEL(_val) (_val & 0x3f)
+#define AUOK190X_VERSION_LUT(_val) (_val & 0xff)
+#define AUOK190X_VERSION_TCON(_val) ((_val & 0xff00) >> 8)
+
+/*
+ * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901
+ */
+
+#define AUOK190X_UPDATE_MODE(_res) ((_res & 0x7) << 12)
+#define AUOK190X_UPDATE_NONFLASH (1 << 15)
+
+/*
+ * track panel specific parameters for common init
+ */
+
+struct auok190x_init_data {
+ char *id;
+ struct auok190x_board *board;
+
+ void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
+ void (*update_all)(struct auok190xfb_par *par);
+ bool (*need_refresh)(struct auok190xfb_par *par);
+ void (*init)(struct auok190xfb_par *par);
+};
+
+
+extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data);
+extern int auok190x_send_command(struct auok190xfb_par *par, u16 data);
+extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv);
+extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv);
+extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par,
+ u16 cmd, int argc, u16 *argv,
+ int size, u16 *data);
+extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv, int size,
+ u16 *data);
+extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
+ int argc, u16 *argv);
+
+extern int auok190x_common_probe(struct platform_device *pdev,
+ struct auok190x_init_data *init);
+extern int auok190x_common_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops auok190x_pm;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index af16884..2979292 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -88,7 +88,7 @@ config LCD_PLATFORM
config LCD_TOSA
tristate "Sharp SL-6000 LCD Driver"
- depends on SPI && MACH_TOSA
+ depends on I2C && SPI && MACH_TOSA
help
If you have an Sharp SL-6000 Zaurus say Y to enable a driver
for its LCD.
@@ -184,6 +184,18 @@ config BACKLIGHT_GENERIC
known as the Corgi backlight driver. If you have a Sharp Zaurus
SL-C7xx, SL-Cxx00 or SL-6000x say y.
+config BACKLIGHT_LM3533
+ tristate "Backlight Driver for LM3533"
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on MFD_LM3533
+ help
+ Say Y to enable the backlight driver for National Semiconductor / TI
+ LM3533 Lighting Power chips.
+
+ The backlights can be controlled directly, through PWM input, or by
+ the ambient-light-sensor interface. The chip supports 256 brightness
+ levels.
+
config BACKLIGHT_LOCOMO
tristate "Sharp LOCOMO LCD/Backlight Driver"
depends on SHARP_LOCOMO
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 36855ae..a2ac9cf 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
+obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4911ea7..df5db99 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -160,7 +160,7 @@ static ssize_t adp5520_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -214,7 +214,7 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
struct adp5520_bl *data = dev_get_drvdata(dev);
int ret;
- ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret < 0)
return ret;
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 550dbf0..77d1fdb 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -222,7 +222,8 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
struct led_info *cur_led;
int ret, i;
- led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds,
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&client->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -236,7 +237,7 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
if (ret) {
dev_err(&client->dev, "failed to write\n");
- goto err_free;
+ return ret;
}
for (i = 0; i < pdata->num_leds; ++i) {
@@ -291,9 +292,6 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
cancel_work_sync(&led[i].work);
}
- err_free:
- kfree(led);
-
return ret;
}
@@ -309,7 +307,6 @@ static int __devexit adp8860_led_remove(struct i2c_client *client)
cancel_work_sync(&data->led[i].work);
}
- kfree(data->led);
return 0;
}
#else
@@ -451,7 +448,7 @@ static ssize_t adp8860_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -501,7 +498,7 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8860_bl *data = dev_get_drvdata(dev);
- int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
@@ -608,7 +605,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
uint8_t reg_val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -675,13 +672,13 @@ static int __devinit adp8860_probe(struct i2c_client *client,
return -EINVAL;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
if (ret < 0)
- goto out2;
+ return ret;
switch (ADP8860_MANID(reg_val)) {
case ADP8863_MANUFID:
@@ -694,8 +691,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
break;
default:
dev_err(&client->dev, "failed to probe\n");
- ret = -ENODEV;
- goto out2;
+ return -ENODEV;
}
/* It's confirmed that the DEVID field is actually a REVID */
@@ -717,8 +713,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
&client->dev, data, &adp8860_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out2;
+ return PTR_ERR(bl);
}
bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
@@ -756,8 +751,6 @@ out:
&adp8860_bl_attr_group);
out1:
backlight_device_unregister(bl);
-out2:
- kfree(data);
return ret;
}
@@ -776,7 +769,6 @@ static int __devexit adp8860_remove(struct i2c_client *client)
&adp8860_bl_attr_group);
backlight_device_unregister(data->bl);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 9be58c6..edf7f91 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -244,8 +244,8 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
struct led_info *cur_led;
int ret, i;
-
- led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
+ GFP_KERNEL);
if (led == NULL) {
dev_err(&client->dev, "failed to alloc memory\n");
return -ENOMEM;
@@ -253,17 +253,17 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
if (ret)
- goto err_free;
+ return ret;
ret = adp8870_write(client, ADP8870_ISCT1,
(pdata->led_on_time & 0x3) << 6);
if (ret)
- goto err_free;
+ return ret;
ret = adp8870_write(client, ADP8870_ISCF,
FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
if (ret)
- goto err_free;
+ return ret;
for (i = 0; i < pdata->num_leds; ++i) {
cur_led = &pdata->leds[i];
@@ -317,9 +317,6 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
cancel_work_sync(&led[i].work);
}
- err_free:
- kfree(led);
-
return ret;
}
@@ -335,7 +332,6 @@ static int __devexit adp8870_led_remove(struct i2c_client *client)
cancel_work_sync(&data->led[i].work);
}
- kfree(data->led);
return 0;
}
#else
@@ -572,7 +568,7 @@ static ssize_t adp8870_store(struct device *dev, const char *buf,
unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -652,7 +648,7 @@ static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct adp8870_bl *data = dev_get_drvdata(dev);
- int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+ int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
if (ret)
return ret;
@@ -794,7 +790,7 @@ static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
uint8_t reg_val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -874,7 +870,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
return -ENODEV;
}
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -894,8 +890,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
&client->dev, data, &adp8870_bl_ops, &props);
if (IS_ERR(bl)) {
dev_err(&client->dev, "failed to register backlight\n");
- ret = PTR_ERR(bl);
- goto out2;
+ return PTR_ERR(bl);
}
data->bl = bl;
@@ -930,8 +925,6 @@ out:
&adp8870_bl_attr_group);
out1:
backlight_device_unregister(bl);
-out2:
- kfree(data);
return ret;
}
@@ -950,7 +943,6 @@ static int __devexit adp8870_remove(struct i2c_client *client)
&adp8870_bl_attr_group);
backlight_device_unregister(data->bl);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 7bdadc7..3729238 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -482,7 +482,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ams369fg06), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -492,7 +492,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -501,15 +501,13 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL\n");
- goto out_free_lcd;
+ return -EFAULT;
}
ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
&ams369fg06_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -547,8 +545,6 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
out_lcd_unregister:
lcd_device_unregister(ld);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -559,7 +555,6 @@ static int __devexit ams369fg06_remove(struct spi_device *spi)
ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- kfree(lcd);
return 0;
}
@@ -619,7 +614,6 @@ static void ams369fg06_shutdown(struct spi_device *spi)
static struct spi_driver ams369fg06_driver = {
.driver = {
.name = "ams369fg06",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ams369fg06_probe,
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index a523b25..9dc73ac 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -16,6 +16,8 @@
* get at the firmware code in order to figure out what it's actually doing.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -25,6 +27,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/atomic.h>
+#include <linux/apple_bl.h>
static struct backlight_device *apple_backlight_device;
@@ -39,8 +42,6 @@ struct hw_data {
static const struct hw_data *hw_data;
-#define DRIVER "apple_backlight: "
-
/* Module parameters. */
static int debug;
module_param_named(debug, debug, int, 0644);
@@ -60,8 +61,7 @@ static int intel_chipset_send_intensity(struct backlight_device *bd)
int intensity = bd->props.brightness;
if (debug)
- printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
- intensity);
+ pr_debug("setting brightness to %d\n", intensity);
intel_chipset_set_brightness(intensity);
return 0;
@@ -76,8 +76,7 @@ static int intel_chipset_get_intensity(struct backlight_device *bd)
intensity = inb(0xb3) >> 4;
if (debug)
- printk(KERN_DEBUG DRIVER "read brightness of %d\n",
- intensity);
+ pr_debug("read brightness of %d\n", intensity);
return intensity;
}
@@ -107,8 +106,7 @@ static int nvidia_chipset_send_intensity(struct backlight_device *bd)
int intensity = bd->props.brightness;
if (debug)
- printk(KERN_DEBUG DRIVER "setting brightness to %d\n",
- intensity);
+ pr_debug("setting brightness to %d\n", intensity);
nvidia_chipset_set_brightness(intensity);
return 0;
@@ -123,8 +121,7 @@ static int nvidia_chipset_get_intensity(struct backlight_device *bd)
intensity = inb(0x52f) >> 4;
if (debug)
- printk(KERN_DEBUG DRIVER "read brightness of %d\n",
- intensity);
+ pr_debug("read brightness of %d\n", intensity);
return intensity;
}
@@ -149,7 +146,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
host = pci_get_bus_and_slot(0, 0);
if (!host) {
- printk(KERN_ERR DRIVER "unable to find PCI host\n");
+ pr_err("unable to find PCI host\n");
return -ENODEV;
}
@@ -161,7 +158,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
pci_dev_put(host);
if (!hw_data) {
- printk(KERN_ERR DRIVER "unknown hardware\n");
+ pr_err("unknown hardware\n");
return -ENODEV;
}
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bf5b1ec..297db2f 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -5,6 +5,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -123,7 +125,7 @@ static ssize_t backlight_store_power(struct device *dev,
rc = -ENXIO;
mutex_lock(&bd->ops_lock);
if (bd->ops) {
- pr_debug("backlight: set power to %lu\n", power);
+ pr_debug("set power to %lu\n", power);
if (bd->props.power != power) {
bd->props.power = power;
backlight_update_status(bd);
@@ -161,8 +163,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
if (brightness > bd->props.max_brightness)
rc = -EINVAL;
else {
- pr_debug("backlight: set brightness to %lu\n",
- brightness);
+ pr_debug("set brightness to %lu\n", brightness);
bd->props.brightness = brightness;
backlight_update_status(bd);
rc = count;
@@ -378,8 +379,8 @@ static int __init backlight_class_init(void)
{
backlight_class = class_create(THIS_MODULE, "backlight");
if (IS_ERR(backlight_class)) {
- printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
- PTR_ERR(backlight_class));
+ pr_warn("Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(backlight_class));
return PTR_ERR(backlight_class);
}
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 6dab13f..23d7326 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -544,7 +544,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
return -EINVAL;
}
- lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL);
if (!lcd) {
dev_err(&spi->dev, "failed to allocate memory\n");
return -ENOMEM;
@@ -554,10 +554,9 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
lcd, &corgi_lcd_ops);
- if (IS_ERR(lcd->lcd_dev)) {
- ret = PTR_ERR(lcd->lcd_dev);
- goto err_free_lcd;
- }
+ if (IS_ERR(lcd->lcd_dev))
+ return PTR_ERR(lcd->lcd_dev);
+
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
@@ -591,8 +590,6 @@ err_unregister_bl:
backlight_device_unregister(lcd->bl_dev);
err_unregister_lcd:
lcd_device_unregister(lcd->lcd_dev);
-err_free_lcd:
- kfree(lcd);
return ret;
}
@@ -613,7 +610,6 @@ static int __devexit corgi_lcd_remove(struct spi_device *spi)
corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
- kfree(lcd);
return 0;
}
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 22489eb..37bae80 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -27,6 +27,8 @@
* Alan Hourihane <alanh-at-tungstengraphics-dot-com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -180,14 +182,13 @@ static int cr_backlight_probe(struct platform_device *pdev)
lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
CRVML_DEVICE_LPC, NULL);
if (!lpc_dev) {
- printk("INTEL CARILLO RANCH LPC not found.\n");
+ pr_err("INTEL CARILLO RANCH LPC not found.\n");
return -ENODEV;
}
pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en);
if (!(dev_en & CRVML_GPIOEN_BIT)) {
- printk(KERN_ERR
- "Carillo Ranch GPIO device was not enabled.\n");
+ pr_err("Carillo Ranch GPIO device was not enabled.\n");
pci_dev_put(lpc_dev);
return -ENODEV;
}
@@ -270,7 +271,7 @@ static int __init cr_backlight_init(void)
return PTR_ERR(crp);
}
- printk("Carillo Ranch Backlight Driver Initialized.\n");
+ pr_info("Carillo Ranch Backlight Driver Initialized.\n");
return 0;
}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 30e1968..573c7ec 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
DA9034_WLED_ISET(pdata->output_current));
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_brightness;
bl = backlight_device_register(pdev->name, data->da903x_dev, data,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 9ce6170..8c660fc 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -106,7 +108,7 @@ static int genericbl_probe(struct platform_device *pdev)
generic_backlight_device = bd;
- printk("Generic Backlight Driver Initialized.\n");
+ pr_info("Generic Backlight Driver Initialized.\n");
return 0;
}
@@ -120,7 +122,7 @@ static int genericbl_remove(struct platform_device *pdev)
backlight_device_unregister(bd);
- printk("Generic Backlight Driver Unloaded\n");
+ pr_info("Generic Backlight Driver Unloaded\n");
return 0;
}
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 5118a9f..9327cd1 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -220,7 +220,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
/* allocate and initialse our state */
- ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL);
+ ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
if (ili == NULL) {
dev_err(dev, "no memory for device\n");
return -ENOMEM;
@@ -240,8 +240,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
if (IS_ERR(lcd)) {
dev_err(dev, "failed to register lcd device\n");
- ret = PTR_ERR(lcd);
- goto err_free;
+ return PTR_ERR(lcd);
}
ili->lcd = lcd;
@@ -259,20 +258,16 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
err_unregister:
lcd_device_unregister(lcd);
- err_free:
- kfree(ili);
-
return ret;
}
EXPORT_SYMBOL_GPL(ili9320_probe_spi);
-int __devexit ili9320_remove(struct ili9320 *ili)
+int ili9320_remove(struct ili9320 *ili)
{
ili9320_power(ili, FB_BLANK_POWERDOWN);
lcd_device_unregister(ili->lcd);
- kfree(ili);
return 0;
}
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f8af5d..16f593b 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/backlight.h>
#include <linux/device.h>
#include <linux/fb.h>
@@ -38,7 +40,7 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
ret = jornada_ssp_byte(GETBRIGHTNESS);
if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) {
- printk(KERN_ERR "bl : get brightness timeout\n");
+ pr_err("get brightness timeout\n");
jornada_ssp_end();
return -ETIMEDOUT;
} else /* exchange txdummy for value */
@@ -59,7 +61,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
ret = jornada_ssp_byte(BRIGHTNESSOFF);
if (ret != TXDUMMY) {
- printk(KERN_INFO "bl : brightness off timeout\n");
+ pr_info("brightness off timeout\n");
/* turn off backlight */
PPSR &= ~PPC_LDD1;
PPDR |= PPC_LDD1;
@@ -70,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
/* send command to our mcu */
if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
- printk(KERN_INFO "bl : failed to set brightness\n");
+ pr_info("failed to set brightness\n");
ret = -ETIMEDOUT;
goto out;
}
@@ -81,7 +83,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
but due to physical layout it is equal to 0, so we simply
invert the value (MAX VALUE - NEW VALUE). */
if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
- printk(KERN_ERR "bl : set brightness failed\n");
+ pr_err("set brightness failed\n");
ret = -ETIMEDOUT;
}
@@ -113,7 +115,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
- printk(KERN_ERR "bl : failed to register device, err=%x\n", ret);
+ pr_err("failed to register device, err=%x\n", ret);
return ret;
}
@@ -125,7 +127,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
jornada_bl_update_status(bd);
platform_set_drvdata(pdev, bd);
- printk(KERN_INFO "HP Jornada 700 series backlight driver\n");
+ pr_info("HP Jornada 700 series backlight driver\n");
return 0;
}
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index 22d231a..635b305 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -9,6 +9,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/fb.h>
#include <linux/kernel.h>
@@ -44,7 +46,7 @@ static int jornada_lcd_get_contrast(struct lcd_device *dev)
jornada_ssp_start();
if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) {
- printk(KERN_ERR "lcd: get contrast failed\n");
+ pr_err("get contrast failed\n");
jornada_ssp_end();
return -ETIMEDOUT;
} else {
@@ -65,7 +67,7 @@ static int jornada_lcd_set_contrast(struct lcd_device *dev, int value)
/* push the new value */
if (jornada_ssp_byte(value) != TXDUMMY) {
- printk(KERN_ERR "lcd : set contrast failed\n");
+ pr_err("set contrast failed\n");
jornada_ssp_end();
return -ETIMEDOUT;
}
@@ -103,7 +105,7 @@ static int jornada_lcd_probe(struct platform_device *pdev)
if (IS_ERR(lcd_device)) {
ret = PTR_ERR(lcd_device);
- printk(KERN_ERR "lcd : failed to register device\n");
+ pr_err("failed to register device\n");
return ret;
}
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 6022b67..40f606a 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
@@ -159,7 +161,8 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return -EINVAL;
}
- priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
+ priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
+ GFP_KERNEL);
if (priv == NULL) {
dev_err(&spi->dev, "No memory for this device.\n");
@@ -177,7 +180,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 reset gpio.\n");
- goto err;
+ return ret;
}
ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
@@ -185,7 +188,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev,
"Unable to get the lcd l4f00242t03 data en gpio.\n");
- goto err2;
+ goto err;
}
priv->io_reg = regulator_get(&spi->dev, "vdd");
@@ -193,7 +196,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->io_reg);
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
- goto err3;
+ goto err2;
}
priv->core_reg = regulator_get(&spi->dev, "vcore");
@@ -201,14 +204,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
- goto err4;
+ goto err3;
}
priv->ld = lcd_device_register("l4f00242t03",
&spi->dev, priv, &l4f_ops);
if (IS_ERR(priv->ld)) {
ret = PTR_ERR(priv->ld);
- goto err5;
+ goto err4;
}
/* Init the LCD */
@@ -220,16 +223,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
return 0;
-err5:
- regulator_put(priv->core_reg);
err4:
- regulator_put(priv->io_reg);
+ regulator_put(priv->core_reg);
err3:
- gpio_free(pdata->data_enable_gpio);
+ regulator_put(priv->io_reg);
err2:
- gpio_free(pdata->reset_gpio);
+ gpio_free(pdata->data_enable_gpio);
err:
- kfree(priv);
+ gpio_free(pdata->reset_gpio);
return ret;
}
@@ -250,8 +251,6 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
regulator_put(priv->io_reg);
regulator_put(priv->core_reg);
- kfree(priv);
-
return 0;
}
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 79c1b0d..a5d0d02 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -5,6 +5,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
@@ -32,6 +34,8 @@ static int fb_notifier_callback(struct notifier_block *self,
case FB_EVENT_BLANK:
case FB_EVENT_MODE_CHANGE:
case FB_EVENT_MODE_CHANGE_ALL:
+ case FB_EARLY_EVENT_BLANK:
+ case FB_R_EARLY_EVENT_BLANK:
break;
default:
return 0;
@@ -46,6 +50,14 @@ static int fb_notifier_callback(struct notifier_block *self,
if (event == FB_EVENT_BLANK) {
if (ld->ops->set_power)
ld->ops->set_power(ld, *(int *)evdata->data);
+ } else if (event == FB_EARLY_EVENT_BLANK) {
+ if (ld->ops->early_set_power)
+ ld->ops->early_set_power(ld,
+ *(int *)evdata->data);
+ } else if (event == FB_R_EARLY_EVENT_BLANK) {
+ if (ld->ops->r_early_set_power)
+ ld->ops->r_early_set_power(ld,
+ *(int *)evdata->data);
} else {
if (ld->ops->set_mode)
ld->ops->set_mode(ld, evdata->data);
@@ -106,7 +118,7 @@ static ssize_t lcd_store_power(struct device *dev,
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
- pr_debug("lcd: set power to %lu\n", power);
+ pr_debug("set power to %lu\n", power);
ld->ops->set_power(ld, power);
rc = count;
}
@@ -142,7 +154,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
- pr_debug("lcd: set contrast to %lu\n", contrast);
+ pr_debug("set contrast to %lu\n", contrast);
ld->ops->set_contrast(ld, contrast);
rc = count;
}
@@ -253,8 +265,8 @@ static int __init lcd_class_init(void)
{
lcd_class = class_create(THIS_MODULE, "lcd");
if (IS_ERR(lcd_class)) {
- printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n",
- PTR_ERR(lcd_class));
+ pr_warn("Unable to create backlight class; errno = %ld\n",
+ PTR_ERR(lcd_class));
return PTR_ERR(lcd_class);
}
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index efd352b..58f517f 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -707,7 +707,7 @@ static int ld9040_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -717,7 +717,7 @@ static int ld9040_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -726,7 +726,7 @@ static int ld9040_probe(struct spi_device *spi)
lcd->lcd_pd = spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- goto out_free_lcd;
+ return -EFAULT;
}
mutex_init(&lcd->lock);
@@ -734,13 +734,13 @@ static int ld9040_probe(struct spi_device *spi)
ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
if (ret) {
dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- goto out_free_lcd;
+ return ret;
}
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto out_free_lcd;
+ goto out_free_regulator;
}
lcd->ld = ld;
@@ -782,10 +782,9 @@ static int ld9040_probe(struct spi_device *spi)
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
-out_free_lcd:
+out_free_regulator:
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
return ret;
}
@@ -797,7 +796,6 @@ static int __devexit ld9040_remove(struct spi_device *spi)
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
- kfree(lcd);
return 0;
}
@@ -846,7 +844,6 @@ static void ld9040_shutdown(struct spi_device *spi)
static struct spi_driver ld9040_driver = {
.driver = {
.name = "ld9040",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ld9040_probe,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
new file mode 100644
index 0000000..bebeb63
--- /dev/null
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -0,0 +1,423 @@
+/*
+ * lm3533-bl.c -- LM3533 Backlight driver
+ *
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * Author: Johan Hovold <jhovold@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/lm3533.h>
+
+
+#define LM3533_HVCTRLBANK_COUNT 2
+#define LM3533_BL_MAX_BRIGHTNESS 255
+
+#define LM3533_REG_CTRLBANK_AB_BCONF 0x1a
+
+
+struct lm3533_bl {
+ struct lm3533 *lm3533;
+ struct lm3533_ctrlbank cb;
+ struct backlight_device *bd;
+ int id;
+};
+
+
+static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
+{
+ return bl->id;
+}
+
+static int lm3533_bl_update_status(struct backlight_device *bd)
+{
+ struct lm3533_bl *bl = bl_get_data(bd);
+ int brightness = bd->props.brightness;
+
+ if (bd->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+ if (bd->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
+}
+
+static int lm3533_bl_get_brightness(struct backlight_device *bd)
+{
+ struct lm3533_bl *bl = bl_get_data(bd);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static const struct backlight_ops lm3533_bl_ops = {
+ .get_brightness = lm3533_bl_get_brightness,
+ .update_status = lm3533_bl_update_status,
+};
+
+static ssize_t show_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id);
+}
+
+static ssize_t show_als_channel(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ unsigned channel = lm3533_bl_get_ctrlbank_id(bl);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
+}
+
+static ssize_t show_als_en(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+ u8 val;
+ u8 mask;
+ bool enable;
+ int ret;
+
+ ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+ if (ret)
+ return ret;
+
+ mask = 1 << (2 * ctrlbank);
+ enable = val & mask;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
+}
+
+static ssize_t store_als_en(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
+ int enable;
+ u8 val;
+ u8 mask;
+ int ret;
+
+ if (kstrtoint(buf, 0, &enable))
+ return -EINVAL;
+
+ mask = 1 << (2 * ctrlbank);
+
+ if (enable)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+ mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_linear(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ u8 mask;
+ int linear;
+ int ret;
+
+ ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
+ if (ret)
+ return ret;
+
+ mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+ if (val & mask)
+ linear = 1;
+ else
+ linear = 0;
+
+ return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
+}
+
+static ssize_t store_linear(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ unsigned long linear;
+ u8 mask;
+ u8 val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &linear))
+ return -EINVAL;
+
+ mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
+
+ if (linear)
+ val = mask;
+ else
+ val = 0;
+
+ ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
+ mask);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static ssize_t show_pwm(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ int ret;
+
+ ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t store_pwm(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ ret = lm3533_ctrlbank_set_pwm(&bl->cb, val);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+static LM3533_ATTR_RO(als_channel);
+static LM3533_ATTR_RW(als_en);
+static LM3533_ATTR_RO(id);
+static LM3533_ATTR_RW(linear);
+static LM3533_ATTR_RW(pwm);
+
+static struct attribute *lm3533_bl_attributes[] = {
+ &dev_attr_als_channel.attr,
+ &dev_attr_als_en.attr,
+ &dev_attr_id.attr,
+ &dev_attr_linear.attr,
+ &dev_attr_pwm.attr,
+ NULL,
+};
+
+static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct lm3533_bl *bl = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+
+ if (attr == &dev_attr_als_channel.attr ||
+ attr == &dev_attr_als_en.attr) {
+ if (!bl->lm3533->have_als)
+ mode = 0;
+ }
+
+ return mode;
+};
+
+static struct attribute_group lm3533_bl_attribute_group = {
+ .is_visible = lm3533_bl_attr_is_visible,
+ .attrs = lm3533_bl_attributes
+};
+
+static int __devinit lm3533_bl_setup(struct lm3533_bl *bl,
+ struct lm3533_bl_platform_data *pdata)
+{
+ int ret;
+
+ ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current);
+ if (ret)
+ return ret;
+
+ return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm);
+}
+
+static int __devinit lm3533_bl_probe(struct platform_device *pdev)
+{
+ struct lm3533 *lm3533;
+ struct lm3533_bl_platform_data *pdata;
+ struct lm3533_bl *bl;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533 = dev_get_drvdata(pdev->dev.parent);
+ if (!lm3533)
+ return -EINVAL;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (pdev->id < 0 || pdev->id >= LM3533_HVCTRLBANK_COUNT) {
+ dev_err(&pdev->dev, "illegal backlight id %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ if (!bl) {
+ dev_err(&pdev->dev,
+ "failed to allocate memory for backlight\n");
+ return -ENOMEM;
+ }
+
+ bl->lm3533 = lm3533;
+ bl->id = pdev->id;
+
+ bl->cb.lm3533 = lm3533;
+ bl->cb.id = lm3533_bl_get_ctrlbank_id(bl);
+ bl->cb.dev = NULL; /* until registered */
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
+ props.brightness = pdata->default_brightness;
+ bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
+ &lm3533_bl_ops, &props);
+ if (IS_ERR(bd)) {
+ dev_err(&pdev->dev, "failed to register backlight device\n");
+ ret = PTR_ERR(bd);
+ goto err_free;
+ }
+
+ bl->bd = bd;
+ bl->cb.dev = &bl->bd->dev;
+
+ platform_set_drvdata(pdev, bl);
+
+ ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create sysfs attributes\n");
+ goto err_unregister;
+ }
+
+ backlight_update_status(bd);
+
+ ret = lm3533_bl_setup(bl, pdata);
+ if (ret)
+ goto err_sysfs_remove;
+
+ ret = lm3533_ctrlbank_enable(&bl->cb);
+ if (ret)
+ goto err_sysfs_remove;
+
+ return 0;
+
+err_sysfs_remove:
+ sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+err_unregister:
+ backlight_device_unregister(bd);
+err_free:
+ kfree(bl);
+
+ return ret;
+}
+
+static int __devexit lm3533_bl_remove(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bd = bl->bd;
+
+ dev_dbg(&bd->dev, "%s\n", __func__);
+
+ bd->props.power = FB_BLANK_POWERDOWN;
+ bd->props.brightness = 0;
+
+ lm3533_ctrlbank_disable(&bl->cb);
+ sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
+ backlight_device_unregister(bd);
+ kfree(bl);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int lm3533_bl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ return lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static int lm3533_bl_resume(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ return lm3533_ctrlbank_enable(&bl->cb);
+}
+#else
+#define lm3533_bl_suspend NULL
+#define lm3533_bl_resume NULL
+#endif
+
+static void lm3533_bl_shutdown(struct platform_device *pdev)
+{
+ struct lm3533_bl *bl = platform_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ lm3533_ctrlbank_disable(&bl->cb);
+}
+
+static struct platform_driver lm3533_bl_driver = {
+ .driver = {
+ .name = "lm3533-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = lm3533_bl_probe,
+ .remove = __devexit_p(lm3533_bl_remove),
+ .shutdown = lm3533_bl_shutdown,
+ .suspend = lm3533_bl_suspend,
+ .resume = lm3533_bl_resume,
+};
+module_platform_driver(lm3533_bl_driver);
+
+MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
+MODULE_DESCRIPTION("LM3533 Backlight driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lm3533-backlight");
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 4161f9e..a9f2c36 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -168,7 +168,8 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
goto err;
}
- st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
+ st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
+ GFP_KERNEL);
if (st == NULL) {
dev_err(&spi->dev, "No memory for device state\n");
ret = -ENOMEM;
@@ -178,7 +179,7 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
- goto err2;
+ goto err;
}
st->spi = spi;
@@ -193,8 +194,6 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
return 0;
-err2:
- kfree(st);
err:
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
@@ -212,8 +211,6 @@ static int __devexit lms283gf05_remove(struct spi_device *spi)
if (pdata != NULL)
gpio_free(pdata->reset_gpio);
- kfree(st);
-
return 0;
}
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 333949f..6c0f1ac 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -232,23 +232,20 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
struct lcd_device *ld;
int ret;
- lcd = kzalloc(sizeof(struct ltv350qv), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct ltv350qv), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
lcd->spi = spi;
lcd->power = FB_BLANK_POWERDOWN;
- lcd->buffer = kzalloc(8, GFP_KERNEL);
- if (!lcd->buffer) {
- ret = -ENOMEM;
- goto out_free_lcd;
- }
+ lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL);
+ if (!lcd->buffer)
+ return -ENOMEM;
ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_buffer;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
+
lcd->ld = ld;
ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
@@ -261,10 +258,6 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(ld);
-out_free_buffer:
- kfree(lcd->buffer);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -274,8 +267,6 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->ld);
- kfree(lcd->buffer);
- kfree(lcd);
return 0;
}
@@ -310,7 +301,6 @@ static void ltv350qv_shutdown(struct spi_device *spi)
static struct spi_driver ltv350qv_driver = {
.driver = {
.name = "ltv350qv",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 0175bfb..bfdc5fb 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -18,6 +18,8 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -168,7 +170,7 @@ static int omapbl_probe(struct platform_device *pdev)
dev->props.brightness = pdata->default_intensity;
omapbl_update_status(dev);
- printk(KERN_INFO "OMAP LCD backlight initialised\n");
+ pr_info("OMAP LCD backlight initialised\n");
return 0;
}
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index c65853c..c092159 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -111,6 +111,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
if (!pcf_bl)
return -ENOMEM;
+ memset(&bl_props, 0, sizeof(bl_props));
bl_props.type = BACKLIGHT_RAW;
bl_props.max_brightness = 0x3f;
bl_props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 6af183d..69b35f0 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -68,13 +70,13 @@ static int progearbl_probe(struct platform_device *pdev)
pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
if (!pmu_dev) {
- printk("ALI M7101 PMU not found.\n");
+ pr_err("ALI M7101 PMU not found.\n");
return -ENODEV;
}
sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
if (!sb_dev) {
- printk("ALI 1533 SB not found.\n");
+ pr_err("ALI 1533 SB not found.\n");
ret = -ENODEV;
goto put_pmu;
}
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index e264f55..6437ae4 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -741,7 +741,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
struct backlight_device *bd = NULL;
struct backlight_properties props;
- lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -751,7 +751,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
dev_err(&spi->dev, "spi setup failed.\n");
- goto out_free_lcd;
+ return ret;
}
lcd->spi = spi;
@@ -760,14 +760,12 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
if (!lcd->lcd_pd) {
dev_err(&spi->dev, "platform data is NULL.\n");
- goto out_free_lcd;
+ return -EFAULT;
}
ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
- if (IS_ERR(ld)) {
- ret = PTR_ERR(ld);
- goto out_free_lcd;
- }
+ if (IS_ERR(ld))
+ return PTR_ERR(ld);
lcd->ld = ld;
@@ -824,8 +822,6 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
out_lcd_unregister:
lcd_device_unregister(ld);
-out_free_lcd:
- kfree(lcd);
return ret;
}
@@ -838,7 +834,6 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
device_remove_file(&spi->dev, &dev_attr_gamma_mode);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
- kfree(lcd);
return 0;
}
@@ -899,7 +894,6 @@ static void s6e63m0_shutdown(struct spi_device *spi)
static struct spi_driver s6e63m0_driver = {
.driver = {
.name = "s6e63m0",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = s6e63m0_probe,
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 2368b8e..02444d0 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -349,7 +349,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
if (err)
return err;
- lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL);
+ lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
if (!lcd)
return -ENOMEM;
@@ -357,11 +357,9 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
lcd->power = FB_BLANK_POWERDOWN;
lcd->mode = MODE_VGA; /* default to VGA */
- lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
- if (lcd->buf == NULL) {
- kfree(lcd);
+ lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
+ if (lcd->buf == NULL)
return -ENOMEM;
- }
m = &lcd->msg;
x = &lcd->xfer;
@@ -383,15 +381,13 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
break;
default:
dev_err(&spi->dev, "Unsupported model");
- goto out_free;
+ return -EINVAL;
}
lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
lcd, &tdo24m_ops);
- if (IS_ERR(lcd->lcd_dev)) {
- err = PTR_ERR(lcd->lcd_dev);
- goto out_free;
- }
+ if (IS_ERR(lcd->lcd_dev))
+ return PTR_ERR(lcd->lcd_dev);
dev_set_drvdata(&spi->dev, lcd);
err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
@@ -402,9 +398,6 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
out_unregister:
lcd_device_unregister(lcd->lcd_dev);
-out_free:
- kfree(lcd->buf);
- kfree(lcd);
return err;
}
@@ -414,8 +407,6 @@ static int __devexit tdo24m_remove(struct spi_device *spi)
tdo24m_power(lcd, FB_BLANK_POWERDOWN);
lcd_device_unregister(lcd->lcd_dev);
- kfree(lcd->buf);
- kfree(lcd);
return 0;
}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2b241ab..0d54e60 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -82,8 +82,11 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct backlight_properties props;
- struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
+ struct tosa_bl_data *data;
int ret = 0;
+
+ data = devm_kzalloc(&client->dev, sizeof(struct tosa_bl_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -92,7 +95,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
if (ret) {
dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
- goto err_gpio_bl;
+ return ret;
}
ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
if (ret)
@@ -122,8 +125,6 @@ err_reg:
data->bl = NULL;
err_gpio_dir:
gpio_free(TOSA_GPIO_BL_C20MA);
-err_gpio_bl:
- kfree(data);
return ret;
}
@@ -136,8 +137,6 @@ static int __devexit tosa_bl_remove(struct i2c_client *client)
gpio_free(TOSA_GPIO_BL_C20MA);
- kfree(data);
-
return 0;
}
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 2231aec..47823b8 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -174,7 +174,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
int ret;
struct tosa_lcd_data *data;
- data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL);
+ data = devm_kzalloc(&spi->dev, sizeof(struct tosa_lcd_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -187,7 +188,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0)
- goto err_spi;
+ return ret;
data->spi = spi;
dev_set_drvdata(&spi->dev, data);
@@ -224,8 +225,6 @@ err_gpio_dir:
gpio_free(TOSA_GPIO_TG_ON);
err_gpio_tg:
dev_set_drvdata(&spi->dev, NULL);
-err_spi:
- kfree(data);
return ret;
}
@@ -242,7 +241,6 @@ static int __devexit tosa_lcd_remove(struct spi_device *spi)
gpio_free(TOSA_GPIO_TG_ON);
dev_set_drvdata(&spi->dev, NULL);
- kfree(data);
return 0;
}
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 5d365de..9e5517a 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -194,6 +194,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
data->current_brightness = 0;
data->isink_reg = isink_reg;
+ memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = max_isel;
bl = backlight_device_register("wm831x", &pdev->dev, data,
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 633e9fe..d0f121b 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off,
static int
adv7393_write_proc(struct file *file, const char __user * buffer,
- unsigned long count, void *data)
+ size_t count, void *data)
{
struct adv7393fb_device *fbdev = data;
- char line[8];
unsigned int val;
int ret;
- ret = copy_from_user(line, buffer, count);
+ ret = kstrtouint_from_user(buffer, count, 0, &val);
if (ret)
return -EFAULT;
- val = simple_strtoul(line, NULL, 0);
adv7393_write(fbdev->client, val >> 8, val & 0xff);
return count;
@@ -414,14 +412,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
ret = -EBUSY;
- goto out_8;
+ goto free_fbdev;
}
}
if (peripheral_request_list(ppi_pins, DRIVER_NAME)) {
dev_err(&client->dev, "requesting PPI peripheral failed\n");
ret = -EFAULT;
- goto out_8;
+ goto free_gpio;
}
fbdev->fb_mem =
@@ -432,7 +430,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n",
(u32) fbdev->fb_len);
ret = -ENOMEM;
- goto out_7;
+ goto free_ppi_pins;
}
fbdev->info.screen_base = (void *)fbdev->fb_mem;
@@ -464,27 +462,27 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
if (!fbdev->info.pseudo_palette) {
dev_err(&client->dev, "failed to allocate pseudo_palette\n");
ret = -ENOMEM;
- goto out_6;
+ goto free_fb_mem;
}
if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
dev_err(&client->dev, "failed to allocate colormap (%d entries)\n",
BFIN_LCD_NBR_PALETTE_ENTRIES);
ret = -EFAULT;
- goto out_5;
+ goto free_palette;
}
if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) {
dev_err(&client->dev, "unable to request PPI DMA\n");
ret = -EFAULT;
- goto out_4;
+ goto free_cmap;
}
if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0,
"PPI ERROR", fbdev) < 0) {
dev_err(&client->dev, "unable to request PPI ERROR IRQ\n");
ret = -EFAULT;
- goto out_3;
+ goto free_ch_ppi;
}
fbdev->open = 0;
@@ -494,14 +492,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
if (ret) {
dev_err(&client->dev, "i2c attach: init error\n");
- goto out_1;
+ goto free_irq_ppi;
}
if (register_framebuffer(&fbdev->info) < 0) {
dev_err(&client->dev, "unable to register framebuffer\n");
ret = -EFAULT;
- goto out_1;
+ goto free_irq_ppi;
}
dev_info(&client->dev, "fb%d: %s frame buffer device\n",
@@ -512,7 +510,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
if (!entry) {
dev_err(&client->dev, "unable to create /proc entry\n");
ret = -EFAULT;
- goto out_0;
+ goto free_fb;
}
entry->read_proc = adv7393_read_proc;
@@ -521,22 +519,25 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
return 0;
- out_0:
+free_fb:
unregister_framebuffer(&fbdev->info);
- out_1:
+free_irq_ppi:
free_irq(IRQ_PPI_ERROR, fbdev);
- out_3:
+free_ch_ppi:
free_dma(CH_PPI);
- out_4:
- dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
- fbdev->dma_handle);
- out_5:
+free_cmap:
fb_dealloc_cmap(&fbdev->info.cmap);
- out_6:
+free_palette:
kfree(fbdev->info.pseudo_palette);
- out_7:
+free_fb_mem:
+ dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
+ fbdev->dma_handle);
+free_ppi_pins:
peripheral_free_list(ppi_pins);
- out_8:
+free_gpio:
+ if (ANOMALY_05000400)
+ gpio_free(P_IDENT(P_PPI0_FS3));
+free_fbdev:
kfree(fbdev);
return ret;
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index 377dde3..c95b417 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev)
static struct platform_driver broadsheetfb_driver = {
.probe = broadsheetfb_probe,
- .remove = broadsheetfb_remove,
+ .remove = __devexit_p(broadsheetfb_remove),
.driver = {
.owner = THIS_MODULE,
.name = "broadsheetfb",
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index f56699d..eae46f6 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -1,7 +1,8 @@
/*
- * Cobalt server LCD frame buffer driver.
+ * Cobalt/SEAD3 LCD frame buffer driver.
*
* Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -62,6 +63,7 @@
#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
+#ifdef CONFIG_MIPS_COBALT
static inline void lcd_write_control(struct fb_info *info, u8 control)
{
writel((u32)control << 24, info->screen_base);
@@ -81,6 +83,47 @@ static inline u8 lcd_read_data(struct fb_info *info)
{
return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
}
+#else
+
+#define LCD_CTL 0x00
+#define LCD_DATA 0x08
+#define CPLD_STATUS 0x10
+#define CPLD_DATA 0x18
+
+static inline void cpld_wait(struct fb_info *info)
+{
+ do {
+ } while (readl(info->screen_base + CPLD_STATUS) & 1);
+}
+
+static inline void lcd_write_control(struct fb_info *info, u8 control)
+{
+ cpld_wait(info);
+ writel(control, info->screen_base + LCD_CTL);
+}
+
+static inline u8 lcd_read_control(struct fb_info *info)
+{
+ cpld_wait(info);
+ readl(info->screen_base + LCD_CTL);
+ cpld_wait(info);
+ return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+
+static inline void lcd_write_data(struct fb_info *info, u8 data)
+{
+ cpld_wait(info);
+ writel(data, info->screen_base + LCD_DATA);
+}
+
+static inline u8 lcd_read_data(struct fb_info *info)
+{
+ cpld_wait(info);
+ readl(info->screen_base + LCD_DATA);
+ cpld_wait(info);
+ return readl(info->screen_base + CPLD_DATA) & 0xff;
+}
+#endif
static int lcd_busy_wait(struct fb_info *info)
{
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index c2d11fe..e2c96d0 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -224,5 +224,19 @@ config FONT_10x18
big letters. It fits between the sun 12x22 and the normal 8x16 font.
If other fonts are too big or too small for you, say Y, otherwise say N.
+config FONT_AUTOSELECT
+ def_bool y
+ depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
+ depends on !FONT_8x8
+ depends on !FONT_6x11
+ depends on !FONT_7x14
+ depends on !FONT_PEARL_8x8
+ depends on !FONT_ACORN_8x8
+ depends on !FONT_MINI_4x6
+ depends on !FONT_SUN8x16
+ depends on !FONT_SUN12x22
+ depends on !FONT_10x18
+ select FONT_8x16
+
endmenu
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index f8babbe..345d962 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -507,16 +507,16 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
err = fb_alloc_cmap(&info->cmap, 256, 0);
if (err)
- goto failed;
+ goto failed_cmap;
err = ep93xxfb_alloc_videomem(info);
if (err)
- goto failed;
+ goto failed_videomem;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -ENXIO;
- goto failed;
+ goto failed_resource;
}
/*
@@ -532,7 +532,7 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
fbi->mmio_base = ioremap(res->start, resource_size(res));
if (!fbi->mmio_base) {
err = -ENXIO;
- goto failed;
+ goto failed_resource;
}
strcpy(info->fix.id, pdev->name);
@@ -553,24 +553,24 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
if (err == 0) {
dev_err(info->dev, "No suitable video mode found\n");
err = -EINVAL;
- goto failed;
+ goto failed_mode;
}
if (mach_info->setup) {
err = mach_info->setup(pdev);
if (err)
- return err;
+ goto failed_mode;
}
err = ep93xxfb_check_var(&info->var, info);
if (err)
- goto failed;
+ goto failed_check;
fbi->clk = clk_get(info->dev, NULL);
if (IS_ERR(fbi->clk)) {
err = PTR_ERR(fbi->clk);
fbi->clk = NULL;
- goto failed;
+ goto failed_check;
}
ep93xxfb_set_par(info);
@@ -585,15 +585,17 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
return 0;
failed:
- if (fbi->clk)
- clk_put(fbi->clk);
- if (fbi->mmio_base)
- iounmap(fbi->mmio_base);
- ep93xxfb_dealloc_videomem(info);
- if (&info->cmap)
- fb_dealloc_cmap(&info->cmap);
+ clk_put(fbi->clk);
+failed_check:
if (fbi->mach_info->teardown)
fbi->mach_info->teardown(pdev);
+failed_mode:
+ iounmap(fbi->mmio_base);
+failed_resource:
+ ep93xxfb_dealloc_videomem(info);
+failed_videomem:
+ fb_dealloc_cmap(&info->cmap);
+failed_cmap:
kfree(info);
platform_set_drvdata(pdev, NULL);
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index 2a4481c..a36b2d2 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -21,14 +21,14 @@
#include <video/exynos_dp.h>
-#include <plat/cpu.h>
-
#include "exynos_dp_core.h"
static int exynos_dp_init_dp(struct exynos_dp_device *dp)
{
exynos_dp_reset(dp);
+ exynos_dp_swreset(dp);
+
/* SW defined function Normal operation */
exynos_dp_enable_sw_function(dp);
@@ -478,7 +478,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
int lane_count;
u8 buf[5];
- u8 *adjust_request;
+ u8 adjust_request[2];
u8 voltage_swing;
u8 pre_emphasis;
u8 training_lane;
@@ -493,8 +493,8 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
/* set training pattern 2 for EQ */
exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
- adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
- - DPCD_ADDR_LANE0_1_STATUS);
+ adjust_request[0] = link_status[4];
+ adjust_request[1] = link_status[5];
exynos_dp_get_adjust_train(dp, adjust_request);
@@ -566,7 +566,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
u8 buf[5];
u32 reg;
- u8 *adjust_request;
+ u8 adjust_request[2];
udelay(400);
@@ -575,8 +575,8 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
lane_count = dp->link_train.lane_count;
if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
- adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1
- - DPCD_ADDR_LANE0_1_STATUS);
+ adjust_request[0] = link_status[4];
+ adjust_request[1] = link_status[5];
if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) {
/* traing pattern Set to Normal */
@@ -770,7 +770,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
return -ETIMEDOUT;
}
- mdelay(100);
+ udelay(1);
}
/* Set to use the register calculated M/N video */
@@ -804,7 +804,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
return -ETIMEDOUT;
}
- mdelay(100);
+ mdelay(1);
}
if (retval != 0)
@@ -860,7 +860,8 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
return -EINVAL;
}
- dp = kzalloc(sizeof(struct exynos_dp_device), GFP_KERNEL);
+ dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
+ GFP_KERNEL);
if (!dp) {
dev_err(&pdev->dev, "no memory for device data\n");
return -ENOMEM;
@@ -871,8 +872,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
dp->clock = clk_get(&pdev->dev, "dp");
if (IS_ERR(dp->clock)) {
dev_err(&pdev->dev, "failed to get clock\n");
- ret = PTR_ERR(dp->clock);
- goto err_dp;
+ return PTR_ERR(dp->clock);
}
clk_enable(dp->clock);
@@ -884,35 +884,25 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
goto err_clock;
}
- res = request_mem_region(res->start, resource_size(res),
- dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "failed to request registers region\n");
- ret = -EINVAL;
- goto err_clock;
- }
-
- dp->res = res;
-
- dp->reg_base = ioremap(res->start, resource_size(res));
+ dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
if (!dp->reg_base) {
dev_err(&pdev->dev, "failed to ioremap\n");
ret = -ENOMEM;
- goto err_req_region;
+ goto err_clock;
}
dp->irq = platform_get_irq(pdev, 0);
if (!dp->irq) {
dev_err(&pdev->dev, "failed to get irq\n");
ret = -ENODEV;
- goto err_ioremap;
+ goto err_clock;
}
- ret = request_irq(dp->irq, exynos_dp_irq_handler, 0,
- "exynos-dp", dp);
+ ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
+ "exynos-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_ioremap;
+ goto err_clock;
}
dp->video_info = pdata->video_info;
@@ -924,7 +914,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
ret = exynos_dp_detect_hpd(dp);
if (ret) {
dev_err(&pdev->dev, "unable to detect hpd\n");
- goto err_irq;
+ goto err_clock;
}
exynos_dp_handle_edid(dp);
@@ -933,7 +923,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
dp->video_info->link_rate);
if (ret) {
dev_err(&pdev->dev, "unable to do link train\n");
- goto err_irq;
+ goto err_clock;
}
exynos_dp_enable_scramble(dp, 1);
@@ -947,23 +937,15 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
ret = exynos_dp_config_video(dp, dp->video_info);
if (ret) {
dev_err(&pdev->dev, "unable to config video\n");
- goto err_irq;
+ goto err_clock;
}
platform_set_drvdata(pdev, dp);
return 0;
-err_irq:
- free_irq(dp->irq, dp);
-err_ioremap:
- iounmap(dp->reg_base);
-err_req_region:
- release_mem_region(res->start, resource_size(res));
err_clock:
clk_put(dp->clock);
-err_dp:
- kfree(dp);
return ret;
}
@@ -976,16 +958,9 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev)
if (pdata && pdata->phy_exit)
pdata->phy_exit();
- free_irq(dp->irq, dp);
- iounmap(dp->reg_base);
-
clk_disable(dp->clock);
clk_put(dp->clock);
- release_mem_region(dp->res->start, resource_size(dp->res));
-
- kfree(dp);
-
return 0;
}
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 90ceaca..1e0f998 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -26,7 +26,6 @@ struct link_train {
struct exynos_dp_device {
struct device *dev;
- struct resource *res;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;
@@ -39,8 +38,10 @@ struct exynos_dp_device {
void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
void exynos_dp_stop_video(struct exynos_dp_device *dp);
void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
void exynos_dp_reset(struct exynos_dp_device *dp);
+void exynos_dp_swreset(struct exynos_dp_device *dp);
void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 7ddd67d..bcb0e3a 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -16,8 +16,6 @@
#include <video/exynos_dp.h>
-#include <plat/cpu.h>
-
#include "exynos_dp_core.h"
#include "exynos_dp_reg.h"
@@ -65,6 +63,28 @@ void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
}
+void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
+{
+ u32 reg;
+
+ reg = TX_TERMINAL_CTRL_50_OHM;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
+
+ reg = SEL_24M | TX_DVDD_BIT_1_0625V;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
+
+ reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
+ writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
+
+ reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
+ TX_CUR1_2X | TX_CUR_8_MA;
+ writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
+
+ reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
+ CH1_AMP_400_MV | CH0_AMP_400_MV;
+ writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
+}
+
void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
{
/* Set interrupt pin assertion polarity as high */
@@ -89,8 +109,6 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
{
u32 reg;
- writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
-
exynos_dp_stop_video(dp);
exynos_dp_enable_video_mute(dp, 0);
@@ -131,9 +149,15 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
+ exynos_dp_init_analog_param(dp);
exynos_dp_init_interrupt(dp);
}
+void exynos_dp_swreset(struct exynos_dp_device *dp)
+{
+ writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
+}
+
void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
{
u32 reg;
@@ -271,6 +295,7 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
{
u32 reg;
+ int timeout_loop = 0;
exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
@@ -282,9 +307,19 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
/* Power up PLL */
- if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED)
+ if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
exynos_dp_set_pll_power_down(dp, 0);
+ while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
+ timeout_loop++;
+ if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
+ dev_err(dp->dev, "failed to get pll lock status\n");
+ return;
+ }
+ usleep_range(10, 20);
+ }
+ }
+
/* Enable Serdes FIFO function and Link symbol clock domain module */
reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h
index 42f608e..125b27c 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/video/exynos/exynos_dp_reg.h
@@ -24,6 +24,12 @@
#define EXYNOS_DP_LANE_MAP 0x35C
+#define EXYNOS_DP_ANALOG_CTL_1 0x370
+#define EXYNOS_DP_ANALOG_CTL_2 0x374
+#define EXYNOS_DP_ANALOG_CTL_3 0x378
+#define EXYNOS_DP_PLL_FILTER_CTL_1 0x37C
+#define EXYNOS_DP_TX_AMP_TUNING_CTL 0x380
+
#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390
#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4
@@ -166,6 +172,29 @@
#define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0)
#define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0)
+/* EXYNOS_DP_ANALOG_CTL_1 */
+#define TX_TERMINAL_CTRL_50_OHM (0x1 << 4)
+
+/* EXYNOS_DP_ANALOG_CTL_2 */
+#define SEL_24M (0x1 << 3)
+#define TX_DVDD_BIT_1_0625V (0x4 << 0)
+
+/* EXYNOS_DP_ANALOG_CTL_3 */
+#define DRIVE_DVDD_BIT_1_0625V (0x4 << 5)
+#define VCO_BIT_600_MICRO (0x5 << 0)
+
+/* EXYNOS_DP_PLL_FILTER_CTL_1 */
+#define PD_RING_OSC (0x1 << 6)
+#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4)
+#define TX_CUR1_2X (0x1 << 2)
+#define TX_CUR_8_MA (0x2 << 0)
+
+/* EXYNOS_DP_TX_AMP_TUNING_CTL */
+#define CH3_AMP_400_MV (0x0 << 24)
+#define CH2_AMP_400_MV (0x0 << 16)
+#define CH1_AMP_400_MV (0x0 << 8)
+#define CH0_AMP_400_MV (0x0 << 0)
+
/* EXYNOS_DP_AUX_HW_RETRY_CTL */
#define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8)
#define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3)
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 115e1b0..9908e75 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -58,7 +58,7 @@ static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device
}
static struct regulator_bulk_data supplies[] = {
- { .supply = "vdd10", },
+ { .supply = "vdd11", },
{ .supply = "vdd18", },
};
@@ -102,6 +102,8 @@ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
/* set display timing. */
exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
+ exynos_mipi_dsi_init_interrupt(dsim);
+
/*
* data from Display controller(FIMD) is transferred in video mode
* but in case of command mode, all settings are updated to registers.
@@ -413,27 +415,30 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
goto err_platform_get_irq;
}
+ init_completion(&dsim_wr_comp);
+ init_completion(&dsim_rd_comp);
+ platform_set_drvdata(pdev, dsim);
+
ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
- IRQF_SHARED, pdev->name, dsim);
+ IRQF_SHARED, dev_name(&pdev->dev), dsim);
if (ret != 0) {
dev_err(&pdev->dev, "failed to request dsim irq\n");
ret = -EINVAL;
goto err_bind;
}
- init_completion(&dsim_wr_comp);
- init_completion(&dsim_rd_comp);
-
- /* enable interrupt */
+ /* enable interrupts */
exynos_mipi_dsi_init_interrupt(dsim);
/* initialize mipi-dsi client(lcd panel). */
if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
- /* in case that mipi got enabled at bootloader. */
- if (dsim_pd->enabled)
- goto out;
+ /* in case mipi-dsi has been enabled by bootloader */
+ if (dsim_pd->enabled) {
+ exynos_mipi_regulator_enable(dsim);
+ goto done;
+ }
/* lcd panel power on. */
if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
@@ -453,12 +458,11 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
dsim->suspended = false;
-out:
+done:
platform_set_drvdata(pdev, dsim);
- dev_dbg(&pdev->dev, "mipi-dsi driver(%s mode) has been probed.\n",
- (dsim_config->e_interface == DSIM_COMMAND) ?
- "CPU" : "RGB");
+ dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__,
+ dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
return 0;
@@ -515,10 +519,10 @@ static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
- pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int exynos_mipi_dsi_suspend(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -544,8 +548,9 @@ static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
return 0;
}
-static int exynos_mipi_dsi_resume(struct platform_device *pdev)
+static int exynos_mipi_dsi_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -577,19 +582,19 @@ static int exynos_mipi_dsi_resume(struct platform_device *pdev)
return 0;
}
-#else
-#define exynos_mipi_dsi_suspend NULL
-#define exynos_mipi_dsi_resume NULL
#endif
+static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
+};
+
static struct platform_driver exynos_mipi_dsi_driver = {
.probe = exynos_mipi_dsi_probe,
.remove = __devexit_p(exynos_mipi_dsi_remove),
- .suspend = exynos_mipi_dsi_suspend,
- .resume = exynos_mipi_dsi_resume,
.driver = {
.name = "exynos-mipi-dsim",
.owner = THIS_MODULE,
+ .pm = &exynos_mipi_dsi_pm_ops,
},
};
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 14909c1..47b533a 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -76,33 +76,25 @@ static unsigned int dpll_table[15] = {
irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
{
- unsigned int intsrc = 0;
- unsigned int intmsk = 0;
- struct mipi_dsim_device *dsim = NULL;
-
- dsim = dev_id;
- if (!dsim) {
- dev_dbg(dsim->dev, KERN_ERR "%s:error: wrong parameter\n",
- __func__);
- return IRQ_HANDLED;
+ struct mipi_dsim_device *dsim = dev_id;
+ unsigned int intsrc, intmsk;
+
+ if (dsim == NULL) {
+ dev_err(dsim->dev, "%s: wrong parameter\n", __func__);
+ return IRQ_NONE;
}
intsrc = exynos_mipi_dsi_read_interrupt(dsim);
intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
+ intmsk = ~intmsk & intsrc;
- intmsk = ~(intmsk) & intsrc;
-
- switch (intmsk) {
- case INTMSK_RX_DONE:
+ if (intsrc & INTMSK_RX_DONE) {
complete(&dsim_rd_comp);
dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
- break;
- case INTMSK_FIFO_EMPTY:
+ }
+ if (intsrc & INTMSK_FIFO_EMPTY) {
complete(&dsim_wr_comp);
dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
- break;
- default:
- break;
}
exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
@@ -738,11 +730,11 @@ int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
if (dsim_config->auto_vertical_cnt == 0) {
exynos_mipi_dsi_set_main_disp_vporch(dsim,
dsim_config->cmd_allow,
- timing->upper_margin,
- timing->lower_margin);
+ timing->lower_margin,
+ timing->upper_margin);
exynos_mipi_dsi_set_main_disp_hporch(dsim,
- timing->left_margin,
- timing->right_margin);
+ timing->right_margin,
+ timing->left_margin);
exynos_mipi_dsi_set_main_disp_sync_area(dsim,
timing->vsync_len,
timing->hsync_len);
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c
index 4aa9ac6..05d080b 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/exynos/s6e8ax0.c
@@ -293,9 +293,20 @@ static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd)
0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
};
+ static const unsigned char data_to_send_panel_reverse[] = {
+ 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
+ 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
+ 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
+ 0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
+ };
- ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
- data_to_send, ARRAY_SIZE(data_to_send));
+ if (lcd->dsim_dev->panel_reverse)
+ ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+ data_to_send_panel_reverse,
+ ARRAY_SIZE(data_to_send_panel_reverse));
+ else
+ ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
+ data_to_send, ARRAY_SIZE(data_to_send));
}
static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index c27e153..1ddeb11 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -23,7 +23,7 @@
#include <linux/rmap.h>
#include <linux/pagemap.h>
-struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
+static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
{
void *screen_base = (void __force *) info->screen_base;
struct page *page;
@@ -107,6 +107,10 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
+ /* first write in this cycle, notify the driver */
+ if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
+ fbdefio->first_io(info);
+
/*
* We want the page to remain locked from ->page_mkwrite until
* the PTE is marked dirty to avoid page_mkclean() being called
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index c6ce416..0dff12a 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1046,20 +1046,29 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
int
fb_blank(struct fb_info *info, int blank)
{
- int ret = -EINVAL;
+ struct fb_event event;
+ int ret = -EINVAL, early_ret;
if (blank > FB_BLANK_POWERDOWN)
blank = FB_BLANK_POWERDOWN;
+ event.info = info;
+ event.data = &blank;
+
+ early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
+
if (info->fbops->fb_blank)
ret = info->fbops->fb_blank(blank, info);
- if (!ret) {
- struct fb_event event;
-
- event.info = info;
- event.data = &blank;
+ if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
+ else {
+ /*
+ * if fb_blank is failed then revert effects of
+ * the early blank event.
+ */
+ if (!early_ret)
+ fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
}
return ret;
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 67afa9c..a55e366 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -80,6 +80,8 @@ EXPORT_SYMBOL(framebuffer_alloc);
*/
void framebuffer_release(struct fb_info *info)
{
+ if (!info)
+ return;
kfree(info->apertures);
kfree(info);
}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 6af3f16..458c006 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -834,7 +834,6 @@ static void update_lcdc(struct fb_info *info)
diu_ops.set_pixel_clock(var->pixclock);
out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
- out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */
out_be32(&hw->plut, 0x01F5F666);
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 02fd226..bdcbfba 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -680,6 +680,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
+ dinfo->fb.size);
if (!dinfo->aperture.virtual) {
ERR_MSG("Cannot remap FB region.\n");
+ agp_backend_release(bridge);
cleanup(dinfo);
return -ENODEV;
}
@@ -689,6 +690,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
INTEL_REG_SIZE);
if (!dinfo->mmio_base) {
ERR_MSG("Cannot remap MMIO region.\n");
+ agp_backend_release(bridge);
cleanup(dinfo);
return -ENODEV;
}
diff --git a/drivers/video/matrox/matroxfb_maven.c b/drivers/video/matrox/matroxfb_maven.c
index 31b8f67..217678e 100644
--- a/drivers/video/matrox/matroxfb_maven.c
+++ b/drivers/video/matrox/matroxfb_maven.c
@@ -1243,6 +1243,7 @@ static int maven_probe(struct i2c_client *client,
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_WORD_DATA |
I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_NOSTART |
I2C_FUNC_PROTOCOL_MANGLING))
goto ERROR0;
if (!(data = kzalloc(sizeof(*data), GFP_KERNEL))) {
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c
index 273769b..c87e17a 100644
--- a/drivers/video/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/mb862xx/mb862xx-i2c.c
@@ -68,7 +68,7 @@ static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last)
return 1;
}
-void mb862xx_i2c_stop(struct i2c_adapter *adap)
+static void mb862xx_i2c_stop(struct i2c_adapter *adap)
{
struct mb862xxfb_par *par = adap->algo_data;
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 11a7a33..00ce1f3 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -579,7 +579,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
-irqreturn_t mb862xx_intr(int irq, void *dev_id)
+static irqreturn_t mb862xx_intr(int irq, void *dev_id)
{
struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
unsigned long reg_ist, mask;
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 55bf619..85e4f44 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -950,7 +950,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
res_size(mfbi->fb_req));
- if (!mfbi->reg_virt_addr) {
+ if (!mfbi->fb_virt_addr) {
dev_err(&dev->dev, "failed to ioremap frame buffer\n");
ret = -EINVAL;
goto err4;
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
static struct platform_driver mbxfb_driver = {
.probe = mbxfb_probe,
- .remove = mbxfb_remove,
+ .remove = __devexit_p(mbxfb_remove),
.suspend = mbxfb_suspend,
.resume = mbxfb_resume,
.driver = {
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 6c6bc57..49619b4 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -41,12 +41,14 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
-#include <mach/mxsfb.h>
+#include <linux/mxsfb.h>
#define REG_SET 4
#define REG_CLR 8
@@ -750,16 +752,43 @@ static void __devexit mxsfb_free_videomem(struct mxsfb_info *host)
}
}
+static struct platform_device_id mxsfb_devtype[] = {
+ {
+ .name = "imx23-fb",
+ .driver_data = MXSFB_V3,
+ }, {
+ .name = "imx28-fb",
+ .driver_data = MXSFB_V4,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
+
+static const struct of_device_id mxsfb_dt_ids[] = {
+ { .compatible = "fsl,imx23-lcdif", .data = &mxsfb_devtype[0], },
+ { .compatible = "fsl,imx28-lcdif", .data = &mxsfb_devtype[1], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
+
static int __devinit mxsfb_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxsfb_dt_ids, &pdev->dev);
struct mxsfb_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
struct mxsfb_info *host;
struct fb_info *fb_info;
struct fb_modelist *modelist;
struct pinctrl *pinctrl;
+ int panel_enable;
+ enum of_gpio_flags flags;
int i, ret;
+ if (of_id)
+ pdev->id_entry = of_id->data;
+
if (!pdata) {
dev_err(&pdev->dev, "No platformdata. Giving up\n");
return -ENODEV;
@@ -807,6 +836,22 @@ static int __devinit mxsfb_probe(struct platform_device *pdev)
goto error_getclock;
}
+ panel_enable = of_get_named_gpio_flags(pdev->dev.of_node,
+ "panel-enable-gpios", 0, &flags);
+ if (gpio_is_valid(panel_enable)) {
+ unsigned long f = GPIOF_OUT_INIT_HIGH;
+ if (flags == OF_GPIO_ACTIVE_LOW)
+ f = GPIOF_OUT_INIT_LOW;
+ ret = devm_gpio_request_one(&pdev->dev, panel_enable,
+ f, "panel-enable");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to request gpio %d: %d\n",
+ panel_enable, ret);
+ goto error_panel_enable;
+ }
+ }
+
fb_info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
if (!fb_info->pseudo_palette) {
ret = -ENOMEM;
@@ -854,6 +899,7 @@ error_register:
error_init_fb:
kfree(fb_info->pseudo_palette);
error_pseudo_pallette:
+error_panel_enable:
clk_put(host->clk);
error_getclock:
error_getpin:
@@ -889,25 +935,26 @@ static int __devexit mxsfb_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_device_id mxsfb_devtype[] = {
- {
- .name = "imx23-fb",
- .driver_data = MXSFB_V3,
- }, {
- .name = "imx28-fb",
- .driver_data = MXSFB_V4,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
+static void mxsfb_shutdown(struct platform_device *pdev)
+{
+ struct fb_info *fb_info = platform_get_drvdata(pdev);
+ struct mxsfb_info *host = to_imxfb_host(fb_info);
+
+ /*
+ * Force stop the LCD controller as keeping it running during reboot
+ * might interfere with the BootROM's boot mode pads sampling.
+ */
+ writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
+}
static struct platform_driver mxsfb_driver = {
.probe = mxsfb_probe,
.remove = __devexit_p(mxsfb_remove),
+ .shutdown = mxsfb_shutdown,
.id_table = mxsfb_devtype,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = mxsfb_dt_ids,
},
};
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 1e7536d..b48f95f 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -39,14 +39,6 @@ config FB_OMAP_LCD_MIPID
the Mobile Industry Processor Interface DBI-C/DCS
specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
-config FB_OMAP_BOOTLOADER_INIT
- bool "Check bootloader initialization"
- depends on FB_OMAP
- help
- Say Y here if you want to enable checking if the bootloader has
- already initialized the display controller. In this case the
- driver will skip the initialization.
-
config FB_OMAP_CONSISTENT_DMA_SIZE
int "Consistent DMA memory size (MB)"
depends on FB_OMAP
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index d26f37a..ad741c3 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -532,6 +532,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
/*------- Backlight control --------*/
+ memset(&props, 0, sizeof(props));
props.fb_blank = FB_BLANK_UNBLANK;
props.power = FB_BLANK_UNBLANK;
props.type = BACKLIGHT_RAW;
@@ -738,12 +739,6 @@ static void acx_panel_set_timings(struct omap_dss_device *dssdev,
}
}
-static void acx_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static int acx_panel_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -761,7 +756,6 @@ static struct omap_dss_driver acx_panel_driver = {
.resume = acx_panel_resume,
.set_timings = acx_panel_set_timings,
- .get_timings = acx_panel_get_timings,
.check_timings = acx_panel_check_timings,
.get_recommended_bpp = acx_get_recommended_bpp,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 30fe4df..e42f9dc 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -386,6 +386,106 @@ static struct panel_config generic_dpi_panels[] = {
.name = "innolux_at080tn52",
},
+
+ /* Mitsubishi AA084SB01 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 600,
+ .pixel_clock = 40000,
+
+ .hsw = 1,
+ .hfp = 254,
+ .hbp = 1,
+
+ .vsw = 1,
+ .vfp = 26,
+ .vbp = 1,
+ },
+ .config = OMAP_DSS_LCD_TFT,
+ .name = "mitsubishi_aa084sb01",
+ },
+ /* EDT ET0500G0DH6 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+ .pixel_clock = 33260,
+
+ .hsw = 128,
+ .hfp = 216,
+ .hbp = 40,
+
+ .vsw = 2,
+ .vfp = 35,
+ .vbp = 10,
+ },
+ .config = OMAP_DSS_LCD_TFT,
+ .name = "edt_et0500g0dh6",
+ },
+
+ /* Prime-View PD050VL1 */
+ {
+ {
+ .x_res = 640,
+ .y_res = 480,
+
+ .pixel_clock = 25000,
+
+ .hsw = 96,
+ .hfp = 18,
+ .hbp = 46,
+
+ .vsw = 2,
+ .vfp = 10,
+ .vbp = 33,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+ .name = "primeview_pd050vl1",
+ },
+
+ /* Prime-View PM070WL4 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 32000,
+
+ .hsw = 128,
+ .hfp = 42,
+ .hbp = 86,
+
+ .vsw = 2,
+ .vfp = 10,
+ .vbp = 33,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+ .name = "primeview_pm070wl4",
+ },
+
+ /* Prime-View PD104SLF */
+ {
+ {
+ .x_res = 800,
+ .y_res = 600,
+
+ .pixel_clock = 40000,
+
+ .hsw = 128,
+ .hfp = 42,
+ .hbp = 86,
+
+ .vsw = 4,
+ .vfp = 1,
+ .vbp = 23,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+ .name = "primeview_pd104slf",
+ },
};
struct panel_drv_data {
@@ -549,12 +649,6 @@ static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
dpi_set_timings(dssdev, timings);
}
-static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -571,7 +665,6 @@ static struct omap_dss_driver dpi_driver = {
.resume = generic_dpi_panel_resume,
.set_timings = generic_dpi_panel_set_timings,
- .get_timings = generic_dpi_panel_get_timings,
.check_timings = generic_dpi_panel_check_timings,
.driver = {
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index dc9408d..4a34cdc 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -610,12 +610,6 @@ static int n8x0_panel_resume(struct omap_dss_device *dssdev)
return 0;
}
-static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -678,8 +672,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
.get_resolution = n8x0_panel_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
- .get_timings = n8x0_panel_get_timings,
-
.driver = {
.name = "n8x0_panel",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index b2dd88b..901576e 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -30,7 +30,6 @@
#include <linux/gpio.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
#include <linux/mutex.h>
#include <video/omapdss.h>
@@ -55,73 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
static int taal_panel_reset(struct omap_dss_device *dssdev);
-struct panel_regulator {
- struct regulator *regulator;
- const char *name;
- int min_uV;
- int max_uV;
-};
-
-static void free_regulators(struct panel_regulator *regulators, int n)
-{
- int i;
-
- for (i = 0; i < n; i++) {
- /* disable/put in reverse order */
- regulator_disable(regulators[n - i - 1].regulator);
- regulator_put(regulators[n - i - 1].regulator);
- }
-}
-
-static int init_regulators(struct omap_dss_device *dssdev,
- struct panel_regulator *regulators, int n)
-{
- int r, i, v;
-
- for (i = 0; i < n; i++) {
- struct regulator *reg;
-
- reg = regulator_get(&dssdev->dev, regulators[i].name);
- if (IS_ERR(reg)) {
- dev_err(&dssdev->dev, "failed to get regulator %s\n",
- regulators[i].name);
- r = PTR_ERR(reg);
- goto err;
- }
-
- /* FIXME: better handling of fixed vs. variable regulators */
- v = regulator_get_voltage(reg);
- if (v < regulators[i].min_uV || v > regulators[i].max_uV) {
- r = regulator_set_voltage(reg, regulators[i].min_uV,
- regulators[i].max_uV);
- if (r) {
- dev_err(&dssdev->dev,
- "failed to set regulator %s voltage\n",
- regulators[i].name);
- regulator_put(reg);
- goto err;
- }
- }
-
- r = regulator_enable(reg);
- if (r) {
- dev_err(&dssdev->dev, "failed to enable regulator %s\n",
- regulators[i].name);
- regulator_put(reg);
- goto err;
- }
-
- regulators[i].regulator = reg;
- }
-
- return 0;
-
-err:
- free_regulators(regulators, i);
-
- return r;
-}
-
/**
* struct panel_config - panel configuration
* @name: panel name
@@ -150,8 +82,6 @@ struct panel_config {
unsigned int low;
} reset_sequence;
- struct panel_regulator *regulators;
- int num_regulators;
};
enum {
@@ -577,12 +507,6 @@ static const struct backlight_ops taal_bl_ops = {
.update_status = taal_bl_update_status,
};
-static void taal_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static void taal_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
@@ -602,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
{
struct omap_dss_device *dssdev = to_dss_device(dev);
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- u8 errors;
+ u8 errors = 0;
int r;
mutex_lock(&td->lock);
@@ -977,11 +901,6 @@ static int taal_probe(struct omap_dss_device *dssdev)
atomic_set(&td->do_update, 0);
- r = init_regulators(dssdev, panel_config->regulators,
- panel_config->num_regulators);
- if (r)
- goto err_reg;
-
td->workqueue = create_singlethread_workqueue("taal_esd");
if (td->workqueue == NULL) {
dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -1087,8 +1006,6 @@ err_bl:
err_rst_gpio:
destroy_workqueue(td->workqueue);
err_wq:
- free_regulators(panel_config->regulators, panel_config->num_regulators);
-err_reg:
kfree(td);
err:
return r;
@@ -1125,9 +1042,6 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
/* reset, to be sure that the panel is in a valid state */
taal_hw_reset(dssdev);
- free_regulators(td->panel_config->regulators,
- td->panel_config->num_regulators);
-
if (gpio_is_valid(panel_data->reset_gpio))
gpio_free(panel_data->reset_gpio);
@@ -1909,8 +1823,6 @@ static struct omap_dss_driver taal_driver = {
.run_test = taal_run_test,
.memory_read = taal_memory_read,
- .get_timings = taal_get_timings,
-
.driver = {
.name = "taal",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 52637fa..bff306e 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -47,13 +47,9 @@ struct panel_drv_data {
struct mutex lock;
int pd_gpio;
-};
-static inline struct tfp410_platform_data
-*get_pdata(const struct omap_dss_device *dssdev)
-{
- return dssdev->data;
-}
+ struct i2c_adapter *i2c_adapter;
+};
static int tfp410_power_on(struct omap_dss_device *dssdev)
{
@@ -68,7 +64,7 @@ static int tfp410_power_on(struct omap_dss_device *dssdev)
goto err0;
if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value(ddata->pd_gpio, 1);
+ gpio_set_value_cansleep(ddata->pd_gpio, 1);
return 0;
err0:
@@ -83,18 +79,18 @@ static void tfp410_power_off(struct omap_dss_device *dssdev)
return;
if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value(ddata->pd_gpio, 0);
+ gpio_set_value_cansleep(ddata->pd_gpio, 0);
omapdss_dpi_display_disable(dssdev);
}
static int tfp410_probe(struct omap_dss_device *dssdev)
{
- struct tfp410_platform_data *pdata = get_pdata(dssdev);
struct panel_drv_data *ddata;
int r;
+ int i2c_bus_num;
- ddata = kzalloc(sizeof(*ddata), GFP_KERNEL);
+ ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -104,10 +100,15 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
ddata->dssdev = dssdev;
mutex_init(&ddata->lock);
- if (pdata)
+ if (dssdev->data) {
+ struct tfp410_platform_data *pdata = dssdev->data;
+
ddata->pd_gpio = pdata->power_down_gpio;
- else
+ i2c_bus_num = pdata->i2c_bus_num;
+ } else {
ddata->pd_gpio = -1;
+ i2c_bus_num = -1;
+ }
if (gpio_is_valid(ddata->pd_gpio)) {
r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW,
@@ -115,13 +116,31 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
if (r) {
dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
ddata->pd_gpio);
- ddata->pd_gpio = -1;
+ return r;
}
}
+ if (i2c_bus_num != -1) {
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_get_adapter(i2c_bus_num);
+ if (!adapter) {
+ dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
+ i2c_bus_num);
+ r = -EINVAL;
+ goto err_i2c;
+ }
+
+ ddata->i2c_adapter = adapter;
+ }
+
dev_set_drvdata(&dssdev->dev, ddata);
return 0;
+err_i2c:
+ if (gpio_is_valid(ddata->pd_gpio))
+ gpio_free(ddata->pd_gpio);
+ return r;
}
static void __exit tfp410_remove(struct omap_dss_device *dssdev)
@@ -130,14 +149,15 @@ static void __exit tfp410_remove(struct omap_dss_device *dssdev)
mutex_lock(&ddata->lock);
+ if (ddata->i2c_adapter)
+ i2c_put_adapter(ddata->i2c_adapter);
+
if (gpio_is_valid(ddata->pd_gpio))
gpio_free(ddata->pd_gpio);
dev_set_drvdata(&dssdev->dev, NULL);
mutex_unlock(&ddata->lock);
-
- kfree(ddata);
}
static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -269,27 +289,17 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
u8 *edid, int len)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
- struct tfp410_platform_data *pdata = get_pdata(dssdev);
- struct i2c_adapter *adapter;
int r, l, bytes_read;
mutex_lock(&ddata->lock);
- if (pdata->i2c_bus_num == 0) {
+ if (!ddata->i2c_adapter) {
r = -ENODEV;
goto err;
}
- adapter = i2c_get_adapter(pdata->i2c_bus_num);
- if (!adapter) {
- dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
- pdata->i2c_bus_num);
- r = -EINVAL;
- goto err;
- }
-
l = min(EDID_LENGTH, len);
- r = tfp410_ddc_read(adapter, edid, l, 0);
+ r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0);
if (r)
goto err;
@@ -299,7 +309,7 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
if (len > EDID_LENGTH && edid[0x7e] > 0) {
l = min(EDID_LENGTH, len - EDID_LENGTH);
- r = tfp410_ddc_read(adapter, edid + EDID_LENGTH,
+ r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
l, EDID_LENGTH);
if (r)
goto err;
@@ -319,21 +329,15 @@ err:
static bool tfp410_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
- struct tfp410_platform_data *pdata = get_pdata(dssdev);
- struct i2c_adapter *adapter;
unsigned char out;
int r;
mutex_lock(&ddata->lock);
- if (pdata->i2c_bus_num == 0)
- goto out;
-
- adapter = i2c_get_adapter(pdata->i2c_bus_num);
- if (!adapter)
+ if (!ddata->i2c_adapter)
goto out;
- r = tfp410_ddc_read(adapter, &out, 1, 0);
+ r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0);
mutex_unlock(&ddata->lock);
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 32f3fcd..4b6448b 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -272,13 +272,16 @@ static const struct omap_video_timings tpo_td043_timings = {
static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
{
int nreset_gpio = tpo_td043->nreset_gpio;
+ int r;
if (tpo_td043->powered_on)
return 0;
- regulator_enable(tpo_td043->vcc_reg);
+ r = regulator_enable(tpo_td043->vcc_reg);
+ if (r != 0)
+ return r;
- /* wait for regulator to stabilize */
+ /* wait for panel to stabilize */
msleep(160);
if (gpio_is_valid(nreset_gpio))
@@ -470,6 +473,18 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev)
gpio_free(nreset_gpio);
}
+static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ dpi_set_timings(dssdev, timings);
+}
+
+static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ return dpi_check_timings(dssdev, timings);
+}
+
static struct omap_dss_driver tpo_td043_driver = {
.probe = tpo_td043_probe,
.remove = tpo_td043_remove,
@@ -481,6 +496,9 @@ static struct omap_dss_driver tpo_td043_driver = {
.set_mirror = tpo_td043_set_hmirror,
.get_mirror = tpo_td043_get_hmirror,
+ .set_timings = tpo_td043_set_timings,
+ .check_timings = tpo_td043_check_timings,
+
.driver = {
.name = "tpo_td043mtea1_panel",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 7be7c06..43324e5 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -68,6 +68,10 @@ config OMAP4_DSS_HDMI
HDMI Interface. This adds the High Definition Multimedia Interface.
See http://www.hdmi.org/ for HDMI specification.
+config OMAP4_DSS_HDMI_AUDIO
+ bool
+ depends on OMAP4_DSS_HDMI
+
config OMAP2_DSS_SDI
bool "SDI support"
depends on ARCH_OMAP3
@@ -90,15 +94,6 @@ config OMAP2_DSS_DSI
See http://www.mipi.org/ for DSI spesifications.
-config OMAP2_DSS_FAKE_VSYNC
- bool "Fake VSYNC irq from manual update displays"
- default n
- help
- If this is selected, DSI will generate a fake DISPC VSYNC interrupt
- when DSI has sent a frame. This is only needed with DSI or RFBI
- displays using manual mode, and you want VSYNC to, for example,
- time animation.
-
config OMAP2_DSS_MIN_FCK_PER_PCK
int "Minimum FCK/PCK ratio (for scaling)"
range 0 32
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index b10b3bc..ab22cc2 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -99,6 +99,11 @@ struct mgr_priv_data {
/* If true, a display is enabled using this manager */
bool enabled;
+
+ bool extra_info_dirty;
+ bool shadow_extra_info_dirty;
+
+ struct omap_video_timings timings;
};
static struct {
@@ -176,7 +181,7 @@ static bool mgr_manual_update(struct omap_overlay_manager *mgr)
}
static int dss_check_settings_low(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev, bool applying)
+ bool applying)
{
struct omap_overlay_info *oi;
struct omap_overlay_manager_info *mi;
@@ -187,6 +192,9 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
mp = get_mgr_priv(mgr);
+ if (!mp->enabled)
+ return 0;
+
if (applying && mp->user_info_dirty)
mi = &mp->user_info;
else
@@ -206,26 +214,24 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
ois[ovl->id] = oi;
}
- return dss_mgr_check(mgr, dssdev, mi, ois);
+ return dss_mgr_check(mgr, mi, &mp->timings, ois);
}
/*
* check manager and overlay settings using overlay_info from data->info
*/
-static int dss_check_settings(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev)
+static int dss_check_settings(struct omap_overlay_manager *mgr)
{
- return dss_check_settings_low(mgr, dssdev, false);
+ return dss_check_settings_low(mgr, false);
}
/*
* check manager and overlay settings using overlay_info from ovl->info if
* dirty and from data->info otherwise
*/
-static int dss_check_settings_apply(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev)
+static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
{
- return dss_check_settings_low(mgr, dssdev, true);
+ return dss_check_settings_low(mgr, true);
}
static bool need_isr(void)
@@ -261,6 +267,20 @@ static bool need_isr(void)
if (mp->shadow_info_dirty)
return true;
+ /*
+ * NOTE: we don't check extra_info flags for disabled
+ * managers, once the manager is enabled, the extra_info
+ * related manager changes will be taken in by HW.
+ */
+
+ /* to write new values to registers */
+ if (mp->extra_info_dirty)
+ return true;
+
+ /* to set GO bit */
+ if (mp->shadow_extra_info_dirty)
+ return true;
+
list_for_each_entry(ovl, &mgr->overlays, list) {
struct ovl_priv_data *op;
@@ -305,7 +325,7 @@ static bool need_go(struct omap_overlay_manager *mgr)
mp = get_mgr_priv(mgr);
- if (mp->shadow_info_dirty)
+ if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
return true;
list_for_each_entry(ovl, &mgr->overlays, list) {
@@ -320,20 +340,16 @@ static bool need_go(struct omap_overlay_manager *mgr)
/* returns true if an extra_info field is currently being updated */
static bool extra_info_update_ongoing(void)
{
- const int num_ovls = omap_dss_get_num_overlays();
- struct ovl_priv_data *op;
- struct omap_overlay *ovl;
- struct mgr_priv_data *mp;
+ const int num_mgrs = dss_feat_get_num_mgrs();
int i;
- for (i = 0; i < num_ovls; ++i) {
- ovl = omap_dss_get_overlay(i);
- op = get_ovl_priv(ovl);
-
- if (!ovl->manager)
- continue;
+ for (i = 0; i < num_mgrs; ++i) {
+ struct omap_overlay_manager *mgr;
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
- mp = get_mgr_priv(ovl->manager);
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
if (!mp->enabled)
continue;
@@ -341,8 +357,15 @@ static bool extra_info_update_ongoing(void)
if (!mp->updating)
continue;
- if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+ if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
return true;
+
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+
+ if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+ return true;
+ }
}
return false;
@@ -525,11 +548,13 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
oi = &op->info;
+ mp = get_mgr_priv(ovl->manager);
+
replication = dss_use_replication(ovl->manager->device, oi->color_mode);
ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
- r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
+ r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings);
if (r) {
/*
* We can't do much here, as this function can be called from
@@ -543,8 +568,6 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
return;
}
- mp = get_mgr_priv(ovl->manager);
-
op->info_dirty = false;
if (mp->updating)
op->shadow_info_dirty = true;
@@ -601,6 +624,22 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
}
}
+static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ DSSDBGF("%d", mgr->id);
+
+ if (!mp->extra_info_dirty)
+ return;
+
+ dispc_mgr_set_timings(mgr->id, &mp->timings);
+
+ mp->extra_info_dirty = false;
+ if (mp->updating)
+ mp->shadow_extra_info_dirty = true;
+}
+
static void dss_write_regs_common(void)
{
const int num_mgrs = omap_dss_get_num_overlay_managers();
@@ -646,7 +685,7 @@ static void dss_write_regs(void)
if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
continue;
- r = dss_check_settings(mgr, mgr->device);
+ r = dss_check_settings(mgr);
if (r) {
DSSERR("cannot write registers for manager %s: "
"illegal configuration\n", mgr->name);
@@ -654,6 +693,7 @@ static void dss_write_regs(void)
}
dss_mgr_write_regs(mgr);
+ dss_mgr_write_regs_extra(mgr);
}
}
@@ -693,6 +733,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
mp = get_mgr_priv(mgr);
mp->shadow_info_dirty = false;
+ mp->shadow_extra_info_dirty = false;
list_for_each_entry(ovl, &mgr->overlays, list) {
op = get_ovl_priv(ovl);
@@ -711,7 +752,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
WARN_ON(mp->updating);
- r = dss_check_settings(mgr, mgr->device);
+ r = dss_check_settings(mgr);
if (r) {
DSSERR("cannot start manual update: illegal configuration\n");
spin_unlock_irqrestore(&data_lock, flags);
@@ -719,6 +760,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
}
dss_mgr_write_regs(mgr);
+ dss_mgr_write_regs_extra(mgr);
dss_write_regs_common();
@@ -857,7 +899,7 @@ int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
spin_lock_irqsave(&data_lock, flags);
- r = dss_check_settings_apply(mgr, mgr->device);
+ r = dss_check_settings_apply(mgr);
if (r) {
spin_unlock_irqrestore(&data_lock, flags);
DSSERR("failed to apply settings: illegal configuration.\n");
@@ -918,16 +960,13 @@ static void dss_ovl_setup_fifo(struct omap_overlay *ovl,
bool use_fifo_merge)
{
struct ovl_priv_data *op = get_ovl_priv(ovl);
- struct omap_dss_device *dssdev;
u32 fifo_low, fifo_high;
if (!op->enabled && !op->enabling)
return;
- dssdev = ovl->manager->device;
-
dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
- use_fifo_merge);
+ use_fifo_merge, ovl_manual_update(ovl));
dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
}
@@ -1050,7 +1089,7 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
mp->enabled = true;
- r = dss_check_settings(mgr, mgr->device);
+ r = dss_check_settings(mgr);
if (r) {
DSSERR("failed to enable manager %d: check_settings failed\n",
mgr->id);
@@ -1225,6 +1264,35 @@ err:
return r;
}
+static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+
+ mp->timings = *timings;
+ mp->extra_info_dirty = true;
+}
+
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings)
+{
+ unsigned long flags;
+
+ mutex_lock(&apply_lock);
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ dss_apply_mgr_timings(mgr, timings);
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ wait_pending_extra_info_updates();
+
+ mutex_unlock(&apply_lock);
+}
int dss_ovl_set_info(struct omap_overlay *ovl,
struct omap_overlay_info *info)
@@ -1393,7 +1461,7 @@ int dss_ovl_enable(struct omap_overlay *ovl)
op->enabling = true;
- r = dss_check_settings(ovl->manager, ovl->manager->device);
+ r = dss_check_settings(ovl->manager);
if (r) {
DSSERR("failed to enable overlay %d: check_settings failed\n",
ovl->id);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index e8a1207..58bd9c2 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/device.h>
#include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
#include <video/omapdss.h>
@@ -43,6 +44,8 @@ static struct {
struct regulator *vdds_dsi_reg;
struct regulator *vdds_sdi_reg;
+
+ const char *default_display_name;
} core;
static char *def_disp_name;
@@ -54,9 +57,6 @@ bool dss_debug;
module_param_named(debug, dss_debug, bool, 0644);
#endif
-static int omap_dss_register_device(struct omap_dss_device *);
-static void omap_dss_unregister_device(struct omap_dss_device *);
-
/* REGULATORS */
struct regulator *dss_get_vdds_dsi(void)
@@ -87,6 +87,51 @@ struct regulator *dss_get_vdds_sdi(void)
return reg;
}
+int dss_get_ctx_loss_count(struct device *dev)
+{
+ struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+ int cnt;
+
+ if (!board_data->get_context_loss_count)
+ return -ENOENT;
+
+ cnt = board_data->get_context_loss_count(dev);
+
+ WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
+
+ return cnt;
+}
+
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
+{
+ struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+ if (!board_data->dsi_enable_pads)
+ return -ENOENT;
+
+ return board_data->dsi_enable_pads(dsi_id, lane_mask);
+}
+
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
+{
+ struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
+
+ if (!board_data->dsi_enable_pads)
+ return;
+
+ return board_data->dsi_disable_pads(dsi_id, lane_mask);
+}
+
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
+{
+ struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
+
+ if (pdata->set_min_bus_tput)
+ return pdata->set_min_bus_tput(dev, tput);
+ else
+ return 0;
+}
+
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
static int dss_debug_show(struct seq_file *s, void *unused)
{
@@ -121,34 +166,6 @@ static int dss_initialize_debugfs(void)
debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
&dss_debug_dump_clocks, &dss_debug_fops);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
- &dispc_dump_irqs, &dss_debug_fops);
-#endif
-
-#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
- dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
-#endif
-
- debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
- &dss_dump_regs, &dss_debug_fops);
- debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
- &dispc_dump_regs, &dss_debug_fops);
-#ifdef CONFIG_OMAP2_DSS_RFBI
- debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
- &rfbi_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
- &venc_dump_regs, &dss_debug_fops);
-#endif
-#ifdef CONFIG_OMAP4_DSS_HDMI
- debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
- &hdmi_dump_regs, &dss_debug_fops);
-#endif
return 0;
}
@@ -157,6 +174,19 @@ static void dss_uninitialize_debugfs(void)
if (dss_debugfs_dir)
debugfs_remove_recursive(dss_debugfs_dir);
}
+
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+ struct dentry *d;
+
+ d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
+ write, &dss_debug_fops);
+
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+
+ return 0;
+}
#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
static inline int dss_initialize_debugfs(void)
{
@@ -165,14 +195,39 @@ static inline int dss_initialize_debugfs(void)
static inline void dss_uninitialize_debugfs(void)
{
}
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+{
+ return 0;
+}
#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
/* PLATFORM DEVICE */
-static int omap_dss_probe(struct platform_device *pdev)
+static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+{
+ DSSDBG("pm notif %lu\n", v);
+
+ switch (v) {
+ case PM_SUSPEND_PREPARE:
+ DSSDBG("suspending displays\n");
+ return dss_suspend_all_devices();
+
+ case PM_POST_SUSPEND:
+ DSSDBG("resuming displays\n");
+ return dss_resume_all_devices();
+
+ default:
+ return 0;
+ }
+}
+
+static struct notifier_block omap_dss_pm_notif_block = {
+ .notifier_call = omap_dss_pm_notif,
+};
+
+static int __init omap_dss_probe(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
int r;
- int i;
core.pdev = pdev;
@@ -187,28 +242,15 @@ static int omap_dss_probe(struct platform_device *pdev)
if (r)
goto err_debugfs;
- for (i = 0; i < pdata->num_devices; ++i) {
- struct omap_dss_device *dssdev = pdata->devices[i];
-
- r = omap_dss_register_device(dssdev);
- if (r) {
- DSSERR("device %d %s register failed %d\n", i,
- dssdev->name ?: "unnamed", r);
+ if (def_disp_name)
+ core.default_display_name = def_disp_name;
+ else if (pdata->default_device)
+ core.default_display_name = pdata->default_device->name;
- while (--i >= 0)
- omap_dss_unregister_device(pdata->devices[i]);
-
- goto err_register;
- }
-
- if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
- pdata->default_device = dssdev;
- }
+ register_pm_notifier(&omap_dss_pm_notif_block);
return 0;
-err_register:
- dss_uninitialize_debugfs();
err_debugfs:
return r;
@@ -216,17 +258,13 @@ err_debugfs:
static int omap_dss_remove(struct platform_device *pdev)
{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- int i;
+ unregister_pm_notifier(&omap_dss_pm_notif_block);
dss_uninitialize_debugfs();
dss_uninit_overlays(pdev);
dss_uninit_overlay_managers(pdev);
- for (i = 0; i < pdata->num_devices; ++i)
- omap_dss_unregister_device(pdata->devices[i]);
-
return 0;
}
@@ -236,26 +274,9 @@ static void omap_dss_shutdown(struct platform_device *pdev)
dss_disable_all_devices();
}
-static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
-{
- DSSDBG("suspend %d\n", state.event);
-
- return dss_suspend_all_devices();
-}
-
-static int omap_dss_resume(struct platform_device *pdev)
-{
- DSSDBG("resume\n");
-
- return dss_resume_all_devices();
-}
-
static struct platform_driver omap_dss_driver = {
- .probe = omap_dss_probe,
.remove = omap_dss_remove,
.shutdown = omap_dss_shutdown,
- .suspend = omap_dss_suspend,
- .resume = omap_dss_resume,
.driver = {
.name = "omapdss",
.owner = THIS_MODULE,
@@ -326,7 +347,6 @@ static int dss_driver_probe(struct device *dev)
int r;
struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
struct omap_dss_device *dssdev = to_dss_device(dev);
- struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
bool force;
DSSDBG("driver_probe: dev %s/%s, drv %s\n",
@@ -335,7 +355,8 @@ static int dss_driver_probe(struct device *dev)
dss_init_device(core.pdev, dssdev);
- force = pdata->default_device == dssdev;
+ force = core.default_display_name &&
+ strcmp(core.default_display_name, dssdev->name) == 0;
dss_recheck_connections(dssdev, force);
r = dssdrv->probe(dssdev);
@@ -381,6 +402,8 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
if (dssdriver->get_recommended_bpp == NULL)
dssdriver->get_recommended_bpp =
omapdss_default_get_recommended_bpp;
+ if (dssdriver->get_timings == NULL)
+ dssdriver->get_timings = omapdss_default_get_timings;
return driver_register(&dssdriver->driver);
}
@@ -427,27 +450,38 @@ static void omap_dss_dev_release(struct device *dev)
reset_device(dev, 0);
}
-static int omap_dss_register_device(struct omap_dss_device *dssdev)
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+ struct device *parent, int disp_num)
{
- static int dev_num;
-
WARN_ON(!dssdev->driver_name);
reset_device(&dssdev->dev, 1);
dssdev->dev.bus = &dss_bus_type;
- dssdev->dev.parent = &dss_bus;
+ dssdev->dev.parent = parent;
dssdev->dev.release = omap_dss_dev_release;
- dev_set_name(&dssdev->dev, "display%d", dev_num++);
+ dev_set_name(&dssdev->dev, "display%d", disp_num);
return device_register(&dssdev->dev);
}
-static void omap_dss_unregister_device(struct omap_dss_device *dssdev)
+void omap_dss_unregister_device(struct omap_dss_device *dssdev)
{
device_unregister(&dssdev->dev);
}
+static int dss_unregister_dss_dev(struct device *dev, void *data)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ omap_dss_unregister_device(dssdev);
+ return 0;
+}
+
+void omap_dss_unregister_child_devices(struct device *parent)
+{
+ device_for_each_child(parent, NULL, dss_unregister_dss_dev);
+}
+
/* BUS */
-static int omap_dss_bus_register(void)
+static int __init omap_dss_bus_register(void)
{
int r;
@@ -469,12 +503,56 @@ static int omap_dss_bus_register(void)
}
/* INIT */
+static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+ dpi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+ sdi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ rfbi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ venc_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_init_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ hdmi_init_platform_driver,
+#endif
+};
+
+static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
+#ifdef CONFIG_OMAP2_DSS_DPI
+ dpi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_SDI
+ sdi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_RFBI
+ rfbi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_VENC
+ venc_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_uninit_platform_driver,
+#endif
+#ifdef CONFIG_OMAP4_DSS_HDMI
+ hdmi_uninit_platform_driver,
+#endif
+};
+
+static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)];
static int __init omap_dss_register_drivers(void)
{
int r;
+ int i;
- r = platform_driver_register(&omap_dss_driver);
+ r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
if (r)
return r;
@@ -490,40 +568,18 @@ static int __init omap_dss_register_drivers(void)
goto err_dispc;
}
- r = rfbi_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize rfbi platform driver\n");
- goto err_rfbi;
- }
-
- r = venc_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize venc platform driver\n");
- goto err_venc;
- }
-
- r = dsi_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize DSI platform driver\n");
- goto err_dsi;
- }
-
- r = hdmi_init_platform_driver();
- if (r) {
- DSSERR("Failed to initialize hdmi\n");
- goto err_hdmi;
+ /*
+ * It's ok if the output-driver register fails. It happens, for example,
+ * when there is no output-device (e.g. SDI for OMAP4).
+ */
+ for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
+ r = dss_output_drv_reg_funcs[i]();
+ if (r == 0)
+ dss_output_drv_loaded[i] = true;
}
return 0;
-err_hdmi:
- dsi_uninit_platform_driver();
-err_dsi:
- venc_uninit_platform_driver();
-err_venc:
- rfbi_uninit_platform_driver();
-err_rfbi:
- dispc_uninit_platform_driver();
err_dispc:
dss_uninit_platform_driver();
err_dss:
@@ -534,10 +590,13 @@ err_dss:
static void __exit omap_dss_unregister_drivers(void)
{
- hdmi_uninit_platform_driver();
- dsi_uninit_platform_driver();
- venc_uninit_platform_driver();
- rfbi_uninit_platform_driver();
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) {
+ if (dss_output_drv_loaded[i])
+ dss_output_drv_unreg_funcs[i]();
+ }
+
dispc_uninit_platform_driver();
dss_uninit_platform_driver();
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index ee30937..397d4ee 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -131,23 +131,6 @@ static inline u32 dispc_read_reg(const u16 idx)
return __raw_readl(dispc.base + idx);
}
-static int dispc_get_ctx_loss_count(void)
-{
- struct device *dev = &dispc.pdev->dev;
- struct omap_display_platform_data *pdata = dev->platform_data;
- struct omap_dss_board_info *board_data = pdata->board_data;
- int cnt;
-
- if (!board_data->get_context_loss_count)
- return -ENOENT;
-
- cnt = board_data->get_context_loss_count(dev);
-
- WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
-
- return cnt;
-}
-
#define SR(reg) \
dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
#define RR(reg) \
@@ -251,7 +234,7 @@ static void dispc_save_context(void)
if (dss_has_feature(FEAT_CORE_CLK_DIV))
SR(DIVISOR);
- dispc.ctx_loss_cnt = dispc_get_ctx_loss_count();
+ dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
dispc.ctx_valid = true;
DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -266,7 +249,7 @@ static void dispc_restore_context(void)
if (!dispc.ctx_valid)
return;
- ctx = dispc_get_ctx_loss_count();
+ ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
return;
@@ -401,7 +384,7 @@ void dispc_runtime_put(void)
DSSDBG("dispc_runtime_put\n");
r = pm_runtime_put_sync(&dispc.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
@@ -413,14 +396,6 @@ static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
return false;
}
-static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
-{
- struct omap_overlay_manager *mgr =
- omap_dss_get_overlay_manager(channel);
-
- return mgr ? mgr->device : NULL;
-}
-
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
{
switch (channel) {
@@ -432,6 +407,7 @@ u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
default:
BUG();
+ return 0;
}
}
@@ -446,6 +422,7 @@ u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
return 0;
default:
BUG();
+ return 0;
}
}
@@ -764,7 +741,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
case OMAP_DSS_COLOR_XRGB16_1555:
m = 0xf; break;
default:
- BUG(); break;
+ BUG(); return;
}
} else {
switch (color_mode) {
@@ -801,13 +778,25 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
case OMAP_DSS_COLOR_XRGB16_1555:
m = 0xf; break;
default:
- BUG(); break;
+ BUG(); return;
}
}
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
+static void dispc_ovl_configure_burst_type(enum omap_plane plane,
+ enum omap_dss_rotation_type rotation_type)
+{
+ if (dss_has_feature(FEAT_BURST_2D) == 0)
+ return;
+
+ if (rotation_type == OMAP_DSS_ROT_TILER)
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
+ else
+ REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
+}
+
void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
{
int shift;
@@ -845,6 +834,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
break;
default:
BUG();
+ return;
}
val = FLD_MOD(val, chan, shift, shift);
@@ -872,6 +862,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
break;
default:
BUG();
+ return 0;
}
val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -983,20 +974,13 @@ static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable)
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
}
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height)
+static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
+ u16 height)
{
u32 val;
- BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
- val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- dispc_write_reg(DISPC_SIZE_MGR(channel), val);
-}
-void dispc_set_digit_size(u16 width, u16 height)
-{
- u32 val;
- BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
- dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val);
+ dispc_write_reg(DISPC_SIZE_MGR(channel), val);
}
static void dispc_read_plane_fifo_sizes(void)
@@ -1063,7 +1047,8 @@ void dispc_enable_fifomerge(bool enable)
}
void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
- u32 *fifo_low, u32 *fifo_high, bool use_fifomerge)
+ u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+ bool manual_update)
{
/*
* All sizes are in bytes. Both the buffer and burst are made of
@@ -1091,7 +1076,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
* combined fifo size
*/
- if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+ if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
*fifo_low = ovl_fifo_size - burst_size * 2;
*fifo_high = total_fifo_size - burst_size;
} else {
@@ -1185,6 +1170,94 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane,
dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
}
+static void dispc_ovl_set_accu_uv(enum omap_plane plane,
+ u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
+ bool ilace, enum omap_color_mode color_mode, u8 rotation)
+{
+ int h_accu2_0, h_accu2_1;
+ int v_accu2_0, v_accu2_1;
+ int chroma_hinc, chroma_vinc;
+ int idx;
+
+ struct accu {
+ s8 h0_m, h0_n;
+ s8 h1_m, h1_n;
+ s8 v0_m, v0_n;
+ s8 v1_m, v1_n;
+ };
+
+ const struct accu *accu_table;
+ const struct accu *accu_val;
+
+ static const struct accu accu_nv12[4] = {
+ { 0, 1, 0, 1 , -1, 2, 0, 1 },
+ { 1, 2, -3, 4 , 0, 1, 0, 1 },
+ { -1, 1, 0, 1 , -1, 2, 0, 1 },
+ { -1, 2, -1, 2 , -1, 1, 0, 1 },
+ };
+
+ static const struct accu accu_nv12_ilace[4] = {
+ { 0, 1, 0, 1 , -3, 4, -1, 4 },
+ { -1, 4, -3, 4 , 0, 1, 0, 1 },
+ { -1, 1, 0, 1 , -1, 4, -3, 4 },
+ { -3, 4, -3, 4 , -1, 1, 0, 1 },
+ };
+
+ static const struct accu accu_yuv[4] = {
+ { 0, 1, 0, 1, 0, 1, 0, 1 },
+ { 0, 1, 0, 1, 0, 1, 0, 1 },
+ { -1, 1, 0, 1, 0, 1, 0, 1 },
+ { 0, 1, 0, 1, -1, 1, 0, 1 },
+ };
+
+ switch (rotation) {
+ case OMAP_DSS_ROT_0:
+ idx = 0;
+ break;
+ case OMAP_DSS_ROT_90:
+ idx = 1;
+ break;
+ case OMAP_DSS_ROT_180:
+ idx = 2;
+ break;
+ case OMAP_DSS_ROT_270:
+ idx = 3;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_NV12:
+ if (ilace)
+ accu_table = accu_nv12_ilace;
+ else
+ accu_table = accu_nv12;
+ break;
+ case OMAP_DSS_COLOR_YUV2:
+ case OMAP_DSS_COLOR_UYVY:
+ accu_table = accu_yuv;
+ break;
+ default:
+ BUG();
+ return;
+ }
+
+ accu_val = &accu_table[idx];
+
+ chroma_hinc = 1024 * orig_width / out_width;
+ chroma_vinc = 1024 * orig_height / out_height;
+
+ h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024;
+ h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024;
+ v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
+ v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
+
+ dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
+ dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
+}
+
static void dispc_ovl_set_scaling_common(enum omap_plane plane,
u16 orig_width, u16 orig_height,
u16 out_width, u16 out_height,
@@ -1258,6 +1331,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
return;
}
+
+ dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
+ out_height, ilace, color_mode, rotation);
+
switch (color_mode) {
case OMAP_DSS_COLOR_NV12:
/* UV is subsampled by 2 vertically*/
@@ -1280,6 +1357,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
break;
default:
BUG();
+ return;
}
if (out_width != orig_width)
@@ -1297,9 +1375,6 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
/* set V scaling */
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
-
- dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
- dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
}
static void dispc_ovl_set_scaling(enum omap_plane plane,
@@ -1410,6 +1485,7 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode)
return 32;
default:
BUG();
+ return 0;
}
}
@@ -1423,6 +1499,7 @@ static s32 pixinc(int pixels, u8 ps)
return 1 - (-pixels + 1) * ps;
else
BUG();
+ return 0;
}
static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
@@ -1431,7 +1508,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
enum omap_color_mode color_mode, bool fieldmode,
unsigned int field_offset,
unsigned *offset0, unsigned *offset1,
- s32 *row_inc, s32 *pix_inc)
+ s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
{
u8 ps;
@@ -1477,10 +1554,10 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
else
*offset0 = 0;
- *row_inc = pixinc(1 + (screen_width - width) +
- (fieldmode ? screen_width : 0),
- ps);
- *pix_inc = pixinc(1, ps);
+ *row_inc = pixinc(1 +
+ (y_predecim * screen_width - x_predecim * width) +
+ (fieldmode ? screen_width : 0), ps);
+ *pix_inc = pixinc(x_predecim, ps);
break;
case OMAP_DSS_ROT_0 + 4:
@@ -1498,14 +1575,15 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
*offset0 = field_offset * screen_width * ps;
else
*offset0 = 0;
- *row_inc = pixinc(1 - (screen_width + width) -
- (fieldmode ? screen_width : 0),
- ps);
- *pix_inc = pixinc(1, ps);
+ *row_inc = pixinc(1 -
+ (y_predecim * screen_width + x_predecim * width) -
+ (fieldmode ? screen_width : 0), ps);
+ *pix_inc = pixinc(x_predecim, ps);
break;
default:
BUG();
+ return;
}
}
@@ -1515,7 +1593,7 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
enum omap_color_mode color_mode, bool fieldmode,
unsigned int field_offset,
unsigned *offset0, unsigned *offset1,
- s32 *row_inc, s32 *pix_inc)
+ s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
{
u8 ps;
u16 fbw, fbh;
@@ -1557,10 +1635,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 + field_offset * screen_width * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(1 + (screen_width - fbw) +
- (fieldmode ? screen_width : 0),
- ps);
- *pix_inc = pixinc(1, ps);
+ *row_inc = pixinc(1 +
+ (y_predecim * screen_width - fbw * x_predecim) +
+ (fieldmode ? screen_width : 0), ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(x_predecim, ps);
break;
case OMAP_DSS_ROT_90:
*offset1 = screen_width * (fbh - 1) * ps;
@@ -1568,9 +1650,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 + field_offset * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(screen_width * (fbh - 1) + 1 +
- (fieldmode ? 1 : 0), ps);
- *pix_inc = pixinc(-screen_width, ps);
+ *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) +
+ y_predecim + (fieldmode ? 1 : 0), ps);
+ *pix_inc = pixinc(-x_predecim * screen_width, ps);
break;
case OMAP_DSS_ROT_180:
*offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
@@ -1579,10 +1661,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
else
*offset0 = *offset1;
*row_inc = pixinc(-1 -
- (screen_width - fbw) -
- (fieldmode ? screen_width : 0),
- ps);
- *pix_inc = pixinc(-1, ps);
+ (y_predecim * screen_width - fbw * x_predecim) -
+ (fieldmode ? screen_width : 0), ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(-x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(-x_predecim, ps);
break;
case OMAP_DSS_ROT_270:
*offset1 = (fbw - 1) * ps;
@@ -1590,9 +1675,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 - field_offset * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
- (fieldmode ? 1 : 0), ps);
- *pix_inc = pixinc(screen_width, ps);
+ *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) -
+ y_predecim - (fieldmode ? 1 : 0), ps);
+ *pix_inc = pixinc(x_predecim * screen_width, ps);
break;
/* mirroring */
@@ -1602,10 +1687,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 + field_offset * screen_width * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(screen_width * 2 - 1 +
+ *row_inc = pixinc(y_predecim * screen_width * 2 - 1 +
(fieldmode ? screen_width : 0),
ps);
- *pix_inc = pixinc(-1, ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(-x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(-x_predecim, ps);
break;
case OMAP_DSS_ROT_90 + 4:
@@ -1614,10 +1703,10 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 + field_offset * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
- (fieldmode ? 1 : 0),
+ *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) +
+ y_predecim + (fieldmode ? 1 : 0),
ps);
- *pix_inc = pixinc(screen_width, ps);
+ *pix_inc = pixinc(x_predecim * screen_width, ps);
break;
case OMAP_DSS_ROT_180 + 4:
@@ -1626,10 +1715,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 - field_offset * screen_width * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(1 - screen_width * 2 -
+ *row_inc = pixinc(1 - y_predecim * screen_width * 2 -
(fieldmode ? screen_width : 0),
ps);
- *pix_inc = pixinc(1, ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(x_predecim, ps);
break;
case OMAP_DSS_ROT_270 + 4:
@@ -1638,34 +1731,130 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
*offset0 = *offset1 - field_offset * ps;
else
*offset0 = *offset1;
- *row_inc = pixinc(screen_width * (fbh - 1) - 1 -
- (fieldmode ? 1 : 0),
+ *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) -
+ y_predecim - (fieldmode ? 1 : 0),
ps);
- *pix_inc = pixinc(-screen_width, ps);
+ *pix_inc = pixinc(-x_predecim * screen_width, ps);
break;
default:
BUG();
+ return;
}
}
-static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
+static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
+ enum omap_color_mode color_mode, bool fieldmode,
+ unsigned int field_offset, unsigned *offset0, unsigned *offset1,
+ s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
+{
+ u8 ps;
+
+ switch (color_mode) {
+ case OMAP_DSS_COLOR_CLUT1:
+ case OMAP_DSS_COLOR_CLUT2:
+ case OMAP_DSS_COLOR_CLUT4:
+ case OMAP_DSS_COLOR_CLUT8:
+ BUG();
+ return;
+ default:
+ ps = color_mode_to_bpp(color_mode) / 8;
+ break;
+ }
+
+ DSSDBG("scrw %d, width %d\n", screen_width, width);
+
+ /*
+ * field 0 = even field = bottom field
+ * field 1 = odd field = top field
+ */
+ *offset1 = 0;
+ if (field_offset)
+ *offset0 = *offset1 + field_offset * screen_width * ps;
+ else
+ *offset0 = *offset1;
+ *row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
+ (fieldmode ? screen_width : 0), ps);
+ if (color_mode == OMAP_DSS_COLOR_YUV2 ||
+ color_mode == OMAP_DSS_COLOR_UYVY)
+ *pix_inc = pixinc(x_predecim, 2 * ps);
+ else
+ *pix_inc = pixinc(x_predecim, ps);
+}
+
+/*
+ * This function is used to avoid synclosts in OMAP3, because of some
+ * undocumented horizontal position and timing related limitations.
+ */
+static int check_horiz_timing_omap3(enum omap_channel channel,
+ const struct omap_video_timings *t, u16 pos_x,
+ u16 width, u16 height, u16 out_width, u16 out_height)
+{
+ int DS = DIV_ROUND_UP(height, out_height);
+ unsigned long nonactive, lclk, pclk;
+ static const u8 limits[3] = { 8, 10, 20 };
+ u64 val, blank;
+ int i;
+
+ nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
+ pclk = dispc_mgr_pclk_rate(channel);
+ if (dispc_mgr_is_lcd(channel))
+ lclk = dispc_mgr_lclk_rate(channel);
+ else
+ lclk = dispc_fclk_rate();
+
+ i = 0;
+ if (out_height < height)
+ i++;
+ if (out_width < width)
+ i++;
+ blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
+ DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
+ if (blank <= limits[i])
+ return -EINVAL;
+
+ /*
+ * Pixel data should be prepared before visible display point starts.
+ * So, atleast DS-2 lines must have already been fetched by DISPC
+ * during nonactive - pos_x period.
+ */
+ val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
+ DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
+ val, max(0, DS - 2) * width);
+ if (val < max(0, DS - 2) * width)
+ return -EINVAL;
+
+ /*
+ * All lines need to be refilled during the nonactive period of which
+ * only one line can be loaded during the active period. So, atleast
+ * DS - 1 lines should be loaded during nonactive period.
+ */
+ val = div_u64((u64)nonactive * lclk, pclk);
+ DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n",
+ val, max(0, DS - 1) * width);
+ if (val < max(0, DS - 1) * width)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned long calc_core_clk_five_taps(enum omap_channel channel,
+ const struct omap_video_timings *mgr_timings, u16 width,
u16 height, u16 out_width, u16 out_height,
enum omap_color_mode color_mode)
{
- u32 fclk = 0;
+ u32 core_clk = 0;
u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
if (height <= out_height && width <= out_width)
return (unsigned long) pclk;
if (height > out_height) {
- struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
- unsigned int ppl = dssdev->panel.timings.x_res;
+ unsigned int ppl = mgr_timings->x_res;
tmp = pclk * height * out_width;
do_div(tmp, 2 * out_height * ppl);
- fclk = tmp;
+ core_clk = tmp;
if (height > 2 * out_height) {
if (ppl == out_width)
@@ -1673,23 +1862,23 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
tmp = pclk * (height - 2 * out_height) * out_width;
do_div(tmp, 2 * out_height * (ppl - out_width));
- fclk = max(fclk, (u32) tmp);
+ core_clk = max_t(u32, core_clk, tmp);
}
}
if (width > out_width) {
tmp = pclk * width;
do_div(tmp, out_width);
- fclk = max(fclk, (u32) tmp);
+ core_clk = max_t(u32, core_clk, tmp);
if (color_mode == OMAP_DSS_COLOR_RGB24U)
- fclk <<= 1;
+ core_clk <<= 1;
}
- return fclk;
+ return core_clk;
}
-static unsigned long calc_fclk(enum omap_channel channel, u16 width,
+static unsigned long calc_core_clk(enum omap_channel channel, u16 width,
u16 height, u16 out_width, u16 out_height)
{
unsigned int hf, vf;
@@ -1730,15 +1919,20 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
}
static int dispc_ovl_calc_scaling(enum omap_plane plane,
- enum omap_channel channel, u16 width, u16 height,
- u16 out_width, u16 out_height,
- enum omap_color_mode color_mode, bool *five_taps)
+ enum omap_channel channel,
+ const struct omap_video_timings *mgr_timings,
+ u16 width, u16 height, u16 out_width, u16 out_height,
+ enum omap_color_mode color_mode, bool *five_taps,
+ int *x_predecim, int *y_predecim, u16 pos_x)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
- unsigned long fclk = 0;
+ const int max_decim_limit = 16;
+ unsigned long core_clk = 0;
+ int decim_x, decim_y, error, min_factor;
+ u16 in_width, in_height, in_width_max = 0;
if (width == out_width && height == out_height)
return 0;
@@ -1746,64 +1940,154 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
return -EINVAL;
- if (out_width < width / maxdownscale ||
- out_width > width * 8)
+ *x_predecim = max_decim_limit;
+ *y_predecim = max_decim_limit;
+
+ if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
+ color_mode == OMAP_DSS_COLOR_CLUT2 ||
+ color_mode == OMAP_DSS_COLOR_CLUT4 ||
+ color_mode == OMAP_DSS_COLOR_CLUT8) {
+ *x_predecim = 1;
+ *y_predecim = 1;
+ *five_taps = false;
+ return 0;
+ }
+
+ decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
+ decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
+
+ min_factor = min(decim_x, decim_y);
+
+ if (decim_x > *x_predecim || out_width > width * 8)
return -EINVAL;
- if (out_height < height / maxdownscale ||
- out_height > height * 8)
+ if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
if (cpu_is_omap24xx()) {
- if (width > maxsinglelinewidth)
- DSSERR("Cannot scale max input width exceeded");
*five_taps = false;
- fclk = calc_fclk(channel, width, height, out_width,
- out_height);
+
+ do {
+ in_height = DIV_ROUND_UP(height, decim_y);
+ in_width = DIV_ROUND_UP(width, decim_x);
+ core_clk = calc_core_clk(channel, in_width, in_height,
+ out_width, out_height);
+ error = (in_width > maxsinglelinewidth || !core_clk ||
+ core_clk > dispc_core_clk_rate());
+ if (error) {
+ if (decim_x == decim_y) {
+ decim_x = min_factor;
+ decim_y++;
+ } else {
+ swap(decim_x, decim_y);
+ if (decim_x < decim_y)
+ decim_x++;
+ }
+ }
+ } while (decim_x <= *x_predecim && decim_y <= *y_predecim &&
+ error);
+
+ if (in_width > maxsinglelinewidth) {
+ DSSERR("Cannot scale max input width exceeded");
+ return -EINVAL;
+ }
} else if (cpu_is_omap34xx()) {
- if (width > (maxsinglelinewidth * 2)) {
+
+ do {
+ in_height = DIV_ROUND_UP(height, decim_y);
+ in_width = DIV_ROUND_UP(width, decim_x);
+ core_clk = calc_core_clk_five_taps(channel, mgr_timings,
+ in_width, in_height, out_width, out_height,
+ color_mode);
+
+ error = check_horiz_timing_omap3(channel, mgr_timings,
+ pos_x, in_width, in_height, out_width,
+ out_height);
+
+ if (in_width > maxsinglelinewidth)
+ if (in_height > out_height &&
+ in_height < out_height * 2)
+ *five_taps = false;
+ if (!*five_taps)
+ core_clk = calc_core_clk(channel, in_width,
+ in_height, out_width, out_height);
+ error = (error || in_width > maxsinglelinewidth * 2 ||
+ (in_width > maxsinglelinewidth && *five_taps) ||
+ !core_clk || core_clk > dispc_core_clk_rate());
+ if (error) {
+ if (decim_x == decim_y) {
+ decim_x = min_factor;
+ decim_y++;
+ } else {
+ swap(decim_x, decim_y);
+ if (decim_x < decim_y)
+ decim_x++;
+ }
+ }
+ } while (decim_x <= *x_predecim && decim_y <= *y_predecim
+ && error);
+
+ if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width,
+ height, out_width, out_height)){
+ DSSERR("horizontal timing too tight\n");
+ return -EINVAL;
+ }
+
+ if (in_width > (maxsinglelinewidth * 2)) {
DSSERR("Cannot setup scaling");
DSSERR("width exceeds maximum width possible");
return -EINVAL;
}
- fclk = calc_fclk_five_taps(channel, width, height, out_width,
- out_height, color_mode);
- if (width > maxsinglelinewidth) {
- if (height > out_height && height < out_height * 2)
- *five_taps = false;
- else {
- DSSERR("cannot setup scaling with five taps");
- return -EINVAL;
- }
+
+ if (in_width > maxsinglelinewidth && *five_taps) {
+ DSSERR("cannot setup scaling with five taps");
+ return -EINVAL;
}
- if (!*five_taps)
- fclk = calc_fclk(channel, width, height, out_width,
- out_height);
} else {
- if (width > maxsinglelinewidth) {
+ int decim_x_min = decim_x;
+ in_height = DIV_ROUND_UP(height, decim_y);
+ in_width_max = dispc_core_clk_rate() /
+ DIV_ROUND_UP(dispc_mgr_pclk_rate(channel),
+ out_width);
+ decim_x = DIV_ROUND_UP(width, in_width_max);
+
+ decim_x = decim_x > decim_x_min ? decim_x : decim_x_min;
+ if (decim_x > *x_predecim)
+ return -EINVAL;
+
+ do {
+ in_width = DIV_ROUND_UP(width, decim_x);
+ } while (decim_x <= *x_predecim &&
+ in_width > maxsinglelinewidth && decim_x++);
+
+ if (in_width > maxsinglelinewidth) {
DSSERR("Cannot scale width exceeds max line width");
return -EINVAL;
}
- fclk = calc_fclk(channel, width, height, out_width,
- out_height);
+
+ core_clk = calc_core_clk(channel, in_width, in_height,
+ out_width, out_height);
}
- DSSDBG("required fclk rate = %lu Hz\n", fclk);
- DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
+ DSSDBG("required core clk rate = %lu Hz\n", core_clk);
+ DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate());
- if (!fclk || fclk > dispc_fclk_rate()) {
+ if (!core_clk || core_clk > dispc_core_clk_rate()) {
DSSERR("failed to set up scaling, "
- "required fclk rate = %lu Hz, "
- "current fclk rate = %lu Hz\n",
- fclk, dispc_fclk_rate());
+ "required core clk rate = %lu Hz, "
+ "current core clk rate = %lu Hz\n",
+ core_clk, dispc_core_clk_rate());
return -EINVAL;
}
+ *x_predecim = decim_x;
+ *y_predecim = decim_y;
return 0;
}
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, bool replication)
+ bool ilace, bool replication,
+ const struct omap_video_timings *mgr_timings)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
bool five_taps = true;
@@ -1814,8 +2098,11 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
s32 pix_inc;
u16 frame_height = oi->height;
unsigned int field_offset = 0;
- u16 outw, outh;
+ u16 in_height = oi->height;
+ u16 in_width = oi->width;
+ u16 out_width, out_height;
enum omap_channel channel;
+ int x_predecim = 1, y_predecim = 1;
channel = dispc_ovl_get_channel_out(plane);
@@ -1829,32 +2116,35 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
if (oi->paddr == 0)
return -EINVAL;
- outw = oi->out_width == 0 ? oi->width : oi->out_width;
- outh = oi->out_height == 0 ? oi->height : oi->out_height;
+ out_width = oi->out_width == 0 ? oi->width : oi->out_width;
+ out_height = oi->out_height == 0 ? oi->height : oi->out_height;
- if (ilace && oi->height == outh)
+ if (ilace && oi->height == out_height)
fieldmode = 1;
if (ilace) {
if (fieldmode)
- oi->height /= 2;
+ in_height /= 2;
oi->pos_y /= 2;
- outh /= 2;
+ out_height /= 2;
DSSDBG("adjusting for ilace: height %d, pos_y %d, "
"out_height %d\n",
- oi->height, oi->pos_y, outh);
+ in_height, oi->pos_y, out_height);
}
if (!dss_feat_color_mode_supported(plane, oi->color_mode))
return -EINVAL;
- r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
- outw, outh, oi->color_mode,
- &five_taps);
+ r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width,
+ in_height, out_width, out_height, oi->color_mode,
+ &five_taps, &x_predecim, &y_predecim, oi->pos_x);
if (r)
return r;
+ in_width = DIV_ROUND_UP(in_width, x_predecim);
+ in_height = DIV_ROUND_UP(in_height, y_predecim);
+
if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
oi->color_mode == OMAP_DSS_COLOR_UYVY ||
oi->color_mode == OMAP_DSS_COLOR_NV12)
@@ -1868,32 +2158,46 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
* so the integer part must be added to the base address of the
* bottom field.
*/
- if (!oi->height || oi->height == outh)
+ if (!in_height || in_height == out_height)
field_offset = 0;
else
- field_offset = oi->height / outh / 2;
+ field_offset = in_height / out_height / 2;
}
/* Fields are independent but interleaved in memory. */
if (fieldmode)
field_offset = 1;
- if (oi->rotation_type == OMAP_DSS_ROT_DMA)
+ offset0 = 0;
+ offset1 = 0;
+ row_inc = 0;
+ pix_inc = 0;
+
+ if (oi->rotation_type == OMAP_DSS_ROT_TILER)
+ calc_tiler_rotation_offset(oi->screen_width, in_width,
+ oi->color_mode, fieldmode, field_offset,
+ &offset0, &offset1, &row_inc, &pix_inc,
+ x_predecim, y_predecim);
+ else if (oi->rotation_type == OMAP_DSS_ROT_DMA)
calc_dma_rotation_offset(oi->rotation, oi->mirror,
- oi->screen_width, oi->width, frame_height,
+ oi->screen_width, in_width, frame_height,
oi->color_mode, fieldmode, field_offset,
- &offset0, &offset1, &row_inc, &pix_inc);
+ &offset0, &offset1, &row_inc, &pix_inc,
+ x_predecim, y_predecim);
else
calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
- oi->screen_width, oi->width, frame_height,
+ oi->screen_width, in_width, frame_height,
oi->color_mode, fieldmode, field_offset,
- &offset0, &offset1, &row_inc, &pix_inc);
+ &offset0, &offset1, &row_inc, &pix_inc,
+ x_predecim, y_predecim);
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
dispc_ovl_set_color_mode(plane, oi->color_mode);
+ dispc_ovl_configure_burst_type(plane, oi->rotation_type);
+
dispc_ovl_set_ba0(plane, oi->paddr + offset0);
dispc_ovl_set_ba1(plane, oi->paddr + offset1);
@@ -1906,19 +2210,18 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
dispc_ovl_set_row_inc(plane, row_inc);
dispc_ovl_set_pix_inc(plane, pix_inc);
- DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
- oi->height, outw, outh);
+ DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width,
+ in_height, out_width, out_height);
dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
- dispc_ovl_set_pic_size(plane, oi->width, oi->height);
+ dispc_ovl_set_pic_size(plane, in_width, in_height);
if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
- dispc_ovl_set_scaling(plane, oi->width, oi->height,
- outw, outh,
- ilace, five_taps, fieldmode,
+ dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
+ out_height, ilace, five_taps, fieldmode,
oi->color_mode, oi->rotation);
- dispc_ovl_set_vid_size(plane, outw, outh);
+ dispc_ovl_set_vid_size(plane, out_width, out_height);
dispc_ovl_set_vid_color_conv(plane, cconv);
}
@@ -2087,8 +2390,10 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
return !!REG_GET(DISPC_CONTROL, 1, 1);
else if (channel == OMAP_DSS_CHANNEL_LCD2)
return !!REG_GET(DISPC_CONTROL2, 0, 0);
- else
+ else {
BUG();
+ return false;
+ }
}
void dispc_mgr_enable(enum omap_channel channel, bool enable)
@@ -2285,6 +2590,12 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
}
+static bool _dispc_mgr_size_ok(u16 width, u16 height)
+{
+ return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
+ height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
+}
+
static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
int vsw, int vfp, int vbp)
{
@@ -2309,11 +2620,20 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
return true;
}
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+ const struct omap_video_timings *timings)
{
- return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw,
- timings->vfp, timings->vbp);
+ bool timings_ok;
+
+ timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
+
+ if (dispc_mgr_is_lcd(channel))
+ timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw,
+ timings->hfp, timings->hbp,
+ timings->vsw, timings->vfp,
+ timings->vbp);
+
+ return timings_ok;
}
static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
@@ -2340,37 +2660,45 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
}
/* change name to mode? */
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
struct omap_video_timings *timings)
{
unsigned xtot, ytot;
unsigned long ht, vt;
+ struct omap_video_timings t = *timings;
+
+ DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
- if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
- timings->hbp, timings->vsw,
- timings->vfp, timings->vbp))
+ if (!dispc_mgr_timings_ok(channel, &t)) {
BUG();
+ return;
+ }
+
+ if (dispc_mgr_is_lcd(channel)) {
+ _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
+ t.vfp, t.vbp);
+
+ xtot = t.x_res + t.hfp + t.hsw + t.hbp;
+ ytot = t.y_res + t.vfp + t.vsw + t.vbp;
- _dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp,
- timings->hbp, timings->vsw, timings->vfp,
- timings->vbp);
+ ht = (timings->pixel_clock * 1000) / xtot;
+ vt = (timings->pixel_clock * 1000) / xtot / ytot;
- dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res);
+ DSSDBG("pck %u\n", timings->pixel_clock);
+ DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
+ t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
- xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
- ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
+ DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+ } else {
+ enum dss_hdmi_venc_clk_source_select source;
- ht = (timings->pixel_clock * 1000) / xtot;
- vt = (timings->pixel_clock * 1000) / xtot / ytot;
+ source = dss_get_hdmi_venc_clk_source();
- DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res,
- timings->y_res);
- DSSDBG("pck %u\n", timings->pixel_clock);
- DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
- timings->hsw, timings->hfp, timings->hbp,
- timings->vsw, timings->vfp, timings->vbp);
+ if (source == DSS_VENC_TV_CLK)
+ t.y_res /= 2;
+ }
- DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
+ dispc_mgr_set_size(channel, t.x_res, t.y_res);
}
static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
@@ -2411,6 +2739,7 @@ unsigned long dispc_fclk_rate(void)
break;
default:
BUG();
+ return 0;
}
return r;
@@ -2441,6 +2770,7 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
break;
default:
BUG();
+ return 0;
}
return r / lcd;
@@ -2462,20 +2792,35 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
return r / pcd;
} else {
- struct omap_dss_device *dssdev =
- dispc_mgr_get_device(channel);
+ enum dss_hdmi_venc_clk_source_select source;
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_VENC:
+ source = dss_get_hdmi_venc_clk_source();
+
+ switch (source) {
+ case DSS_VENC_TV_CLK:
return venc_get_pixel_clock();
- case OMAP_DISPLAY_TYPE_HDMI:
+ case DSS_HDMI_M_PCLK:
return hdmi_get_pixel_clock();
default:
BUG();
+ return 0;
}
}
}
+unsigned long dispc_core_clk_rate(void)
+{
+ int lcd;
+ unsigned long fclk = dispc_fclk_rate();
+
+ if (dss_has_feature(FEAT_CORE_CLK_DIV))
+ lcd = REG_GET(DISPC_DIVISOR, 23, 16);
+ else
+ lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
+
+ return fclk / lcd;
+}
+
void dispc_dump_clocks(struct seq_file *s)
{
int lcd, pcd;
@@ -2588,7 +2933,7 @@ void dispc_dump_irqs(struct seq_file *s)
}
#endif
-void dispc_dump_regs(struct seq_file *s)
+static void dispc_dump_regs(struct seq_file *s)
{
int i, j;
const char *mgr_names[] = {
@@ -3247,27 +3592,6 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
return 0;
}
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
-void dispc_fake_vsync_irq(void)
-{
- u32 irqstatus = DISPC_IRQ_VSYNC;
- int i;
-
- WARN_ON(!in_interrupt());
-
- for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
- struct omap_dispc_isr_data *isr_data;
- isr_data = &dispc.registered_isr[i];
-
- if (!isr_data->isr)
- continue;
-
- if (isr_data->mask & irqstatus)
- isr_data->isr(isr_data->arg, irqstatus);
- }
-}
-#endif
-
static void _omap_dispc_initialize_irq(void)
{
unsigned long flags;
@@ -3330,7 +3654,7 @@ static void _omap_dispc_initial_config(void)
}
/* DISPC HW IP initialisation */
-static int omap_dispchw_probe(struct platform_device *pdev)
+static int __init omap_dispchw_probe(struct platform_device *pdev)
{
u32 rev;
int r = 0;
@@ -3399,6 +3723,11 @@ static int omap_dispchw_probe(struct platform_device *pdev)
dispc_runtime_put();
+ dss_debugfs_create_file("dispc", dispc_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
+#endif
return 0;
err_runtime_get:
@@ -3407,7 +3736,7 @@ err_runtime_get:
return r;
}
-static int omap_dispchw_remove(struct platform_device *pdev)
+static int __exit omap_dispchw_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
@@ -3419,19 +3748,12 @@ static int omap_dispchw_remove(struct platform_device *pdev)
static int dispc_runtime_suspend(struct device *dev)
{
dispc_save_context();
- dss_runtime_put();
return 0;
}
static int dispc_runtime_resume(struct device *dev)
{
- int r;
-
- r = dss_runtime_get();
- if (r < 0)
- return r;
-
dispc_restore_context();
return 0;
@@ -3443,8 +3765,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
};
static struct platform_driver omap_dispchw_driver = {
- .probe = omap_dispchw_probe,
- .remove = omap_dispchw_remove,
+ .remove = __exit_p(omap_dispchw_remove),
.driver = {
.name = "omapdss_dispc",
.owner = THIS_MODULE,
@@ -3452,12 +3773,12 @@ static struct platform_driver omap_dispchw_driver = {
},
};
-int dispc_init_platform_driver(void)
+int __init dispc_init_platform_driver(void)
{
- return platform_driver_register(&omap_dispchw_driver);
+ return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe);
}
-void dispc_uninit_platform_driver(void)
+void __exit dispc_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_dispchw_driver);
+ platform_driver_unregister(&omap_dispchw_driver);
}
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 5836bd1..f278080 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -120,6 +120,7 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
return 0x03AC;
default:
BUG();
+ return 0;
}
}
@@ -134,6 +135,7 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
return 0x03B0;
default:
BUG();
+ return 0;
}
}
@@ -144,10 +146,12 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel)
return 0x0064;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0400;
default:
BUG();
+ return 0;
}
}
@@ -158,10 +162,12 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel)
return 0x0068;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0404;
default:
BUG();
+ return 0;
}
}
@@ -172,10 +178,12 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
return 0x006C;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x0408;
default:
BUG();
+ return 0;
}
}
@@ -186,10 +194,12 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel)
return 0x0070;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x040C;
default:
BUG();
+ return 0;
}
}
@@ -205,6 +215,7 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
return 0x03CC;
default:
BUG();
+ return 0;
}
}
@@ -215,10 +226,12 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
return 0x01D4;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C0;
default:
BUG();
+ return 0;
}
}
@@ -229,10 +242,12 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
return 0x01D8;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C4;
default:
BUG();
+ return 0;
}
}
@@ -243,10 +258,12 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
return 0x01DC;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03C8;
default:
BUG();
+ return 0;
}
}
@@ -257,10 +274,12 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
return 0x0220;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03BC;
default:
BUG();
+ return 0;
}
}
@@ -271,10 +290,12 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
return 0x0224;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03B8;
default:
BUG();
+ return 0;
}
}
@@ -285,10 +306,12 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
return 0x0228;
case OMAP_DSS_CHANNEL_DIGIT:
BUG();
+ return 0;
case OMAP_DSS_CHANNEL_LCD2:
return 0x03B4;
default:
BUG();
+ return 0;
}
}
@@ -306,6 +329,7 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
return 0x0300;
default:
BUG();
+ return 0;
}
}
@@ -321,6 +345,7 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
return 0x0008;
default:
BUG();
+ return 0;
}
}
@@ -335,6 +360,7 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
return 0x000C;
default:
BUG();
+ return 0;
}
}
@@ -343,6 +369,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0544;
case OMAP_DSS_VIDEO2:
@@ -351,6 +378,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
return 0x0310;
default:
BUG();
+ return 0;
}
}
@@ -359,6 +387,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0548;
case OMAP_DSS_VIDEO2:
@@ -367,6 +396,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
return 0x0314;
default:
BUG();
+ return 0;
}
}
@@ -381,6 +411,7 @@ static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
return 0x009C;
default:
BUG();
+ return 0;
}
}
@@ -395,6 +426,7 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
return 0x00A8;
default:
BUG();
+ return 0;
}
}
@@ -410,6 +442,7 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
return 0x0070;
default:
BUG();
+ return 0;
}
}
@@ -418,6 +451,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0568;
case OMAP_DSS_VIDEO2:
@@ -426,6 +460,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
return 0x032C;
default:
BUG();
+ return 0;
}
}
@@ -441,6 +476,7 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
return 0x008C;
default:
BUG();
+ return 0;
}
}
@@ -456,6 +492,7 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
return 0x0088;
default:
BUG();
+ return 0;
}
}
@@ -471,6 +508,7 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
return 0x00A4;
default:
BUG();
+ return 0;
}
}
@@ -486,6 +524,7 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
return 0x0098;
default:
BUG();
+ return 0;
}
}
@@ -498,8 +537,10 @@ static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
BUG();
+ return 0;
default:
BUG();
+ return 0;
}
}
@@ -512,8 +553,10 @@ static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
BUG();
+ return 0;
default:
BUG();
+ return 0;
}
}
@@ -522,6 +565,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0024;
@@ -529,6 +573,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
return 0x0090;
default:
BUG();
+ return 0;
}
}
@@ -537,6 +582,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0580;
case OMAP_DSS_VIDEO2:
@@ -545,6 +591,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
return 0x0424;
default:
BUG();
+ return 0;
}
}
@@ -553,6 +600,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0028;
@@ -560,6 +608,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
return 0x0094;
default:
BUG();
+ return 0;
}
}
@@ -569,6 +618,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x002C;
@@ -576,6 +626,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
return 0x0000;
default:
BUG();
+ return 0;
}
}
@@ -584,6 +635,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0584;
case OMAP_DSS_VIDEO2:
@@ -592,6 +644,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
return 0x0428;
default:
BUG();
+ return 0;
}
}
@@ -600,6 +653,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0030;
@@ -607,6 +661,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
return 0x0004;
default:
BUG();
+ return 0;
}
}
@@ -615,6 +670,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0588;
case OMAP_DSS_VIDEO2:
@@ -623,6 +679,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
return 0x042C;
default:
BUG();
+ return 0;
}
}
@@ -632,6 +689,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0034 + i * 0x8;
@@ -639,6 +697,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
return 0x0010 + i * 0x8;
default:
BUG();
+ return 0;
}
}
@@ -648,6 +707,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x058C + i * 0x8;
case OMAP_DSS_VIDEO2:
@@ -656,6 +716,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
return 0x0430 + i * 0x8;
default:
BUG();
+ return 0;
}
}
@@ -665,6 +726,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
return 0x0038 + i * 0x8;
@@ -672,6 +734,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
return 0x0014 + i * 0x8;
default:
BUG();
+ return 0;
}
}
@@ -681,6 +744,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0590 + i * 8;
case OMAP_DSS_VIDEO2:
@@ -689,6 +753,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
return 0x0434 + i * 0x8;
default:
BUG();
+ return 0;
}
}
@@ -698,12 +763,14 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
case OMAP_DSS_VIDEO2:
case OMAP_DSS_VIDEO3:
return 0x0074 + i * 0x4;
default:
BUG();
+ return 0;
}
}
@@ -713,6 +780,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x0124 + i * 0x4;
case OMAP_DSS_VIDEO2:
@@ -721,6 +789,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
return 0x0050 + i * 0x4;
default:
BUG();
+ return 0;
}
}
@@ -730,6 +799,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
switch (plane) {
case OMAP_DSS_GFX:
BUG();
+ return 0;
case OMAP_DSS_VIDEO1:
return 0x05CC + i * 0x4;
case OMAP_DSS_VIDEO2:
@@ -738,6 +808,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
return 0x0470 + i * 0x4;
default:
BUG();
+ return 0;
}
}
@@ -754,6 +825,7 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
return 0x00A0;
default:
BUG();
+ return 0;
}
}
#endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 4424c19..2490106 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -304,10 +304,18 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
return 24;
default:
BUG();
+ return 0;
}
}
EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
+void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+ struct omap_video_timings *timings)
+{
+ *timings = dssdev->panel.timings;
+}
+EXPORT_SYMBOL(omapdss_default_get_timings);
+
/* Checks if replication logic should be used. Only use for active matrix,
* when overlay is in RGB12U or RGB16 mode, and LCD interface is
* 18bpp or 24bpp */
@@ -340,6 +348,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
break;
default:
BUG();
+ return false;
}
return bpp > 16;
@@ -352,46 +361,6 @@ void dss_init_device(struct platform_device *pdev,
int i;
int r;
- switch (dssdev->type) {
-#ifdef CONFIG_OMAP2_DSS_DPI
- case OMAP_DISPLAY_TYPE_DPI:
- r = dpi_init_display(dssdev);
- break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_RFBI
- case OMAP_DISPLAY_TYPE_DBI:
- r = rfbi_init_display(dssdev);
- break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_VENC
- case OMAP_DISPLAY_TYPE_VENC:
- r = venc_init_display(dssdev);
- break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_SDI
- case OMAP_DISPLAY_TYPE_SDI:
- r = sdi_init_display(dssdev);
- break;
-#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
- case OMAP_DISPLAY_TYPE_DSI:
- r = dsi_init_display(dssdev);
- break;
-#endif
- case OMAP_DISPLAY_TYPE_HDMI:
- r = hdmi_init_display(dssdev);
- break;
- default:
- DSSERR("Support for display '%s' not compiled in.\n",
- dssdev->name);
- return;
- }
-
- if (r) {
- DSSERR("failed to init display %s\n", dssdev->name);
- return;
- }
-
/* create device sysfs files */
i = 0;
while ((attr = display_sysfs_attrs[i++]) != NULL) {
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index faaf305..8c2056c 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -156,7 +156,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
t->pixel_clock = pck;
}
- dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+ dss_mgr_set_timings(dssdev->manager, t);
return 0;
}
@@ -202,10 +202,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
goto err_reg_enable;
}
- r = dss_runtime_get();
- if (r)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r)
goto err_get_dispc;
@@ -244,8 +240,6 @@ err_dsi_pll_init:
err_get_dsi:
dispc_runtime_put();
err_get_dispc:
- dss_runtime_put();
-err_get_dss:
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
err_reg_enable:
@@ -266,7 +260,6 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
}
dispc_runtime_put();
- dss_runtime_put();
if (cpu_is_omap34xx())
regulator_disable(dpi.vdds_dsi_reg);
@@ -283,21 +276,15 @@ void dpi_set_timings(struct omap_dss_device *dssdev,
DSSDBG("dpi_set_timings\n");
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- r = dss_runtime_get();
- if (r)
- return;
-
r = dispc_runtime_get();
- if (r) {
- dss_runtime_put();
+ if (r)
return;
- }
dpi_set_mode(dssdev);
- dispc_mgr_go(dssdev->manager->id);
dispc_runtime_put();
- dss_runtime_put();
+ } else {
+ dss_mgr_set_timings(dssdev->manager, timings);
}
}
EXPORT_SYMBOL(dpi_set_timings);
@@ -312,7 +299,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
unsigned long pck;
struct dispc_clock_info dispc_cinfo;
- if (!dispc_lcd_timings_ok(timings))
+ if (dss_mgr_check_timings(dssdev->manager, timings))
return -EINVAL;
if (timings->pixel_clock == 0)
@@ -352,7 +339,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(dpi_check_timings);
-int dpi_init_display(struct omap_dss_device *dssdev)
+static int __init dpi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -378,12 +365,58 @@ int dpi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-int dpi_init(void)
+static void __init dpi_probe_pdata(struct platform_device *pdev)
{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int i, r;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
+ continue;
+
+ r = dpi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
+static int __init omap_dpi_probe(struct platform_device *pdev)
+{
+ dpi_probe_pdata(pdev);
+
+ return 0;
+}
+
+static int __exit omap_dpi_remove(struct platform_device *pdev)
+{
+ omap_dss_unregister_child_devices(&pdev->dev);
+
return 0;
}
-void dpi_exit(void)
+static struct platform_driver omap_dpi_driver = {
+ .remove = __exit_p(omap_dpi_remove),
+ .driver = {
+ .name = "omapdss_dpi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init dpi_init_platform_driver(void)
{
+ return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
}
+void __exit dpi_uninit_platform_driver(void)
+{
+ platform_driver_unregister(&omap_dpi_driver);
+}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 210a3c4..14ce8cc 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -256,14 +256,13 @@ struct dsi_data {
struct platform_device *pdev;
void __iomem *base;
+ int module_id;
+
int irq;
struct clk *dss_clk;
struct clk *sys_clk;
- int (*enable_pads)(int dsi_id, unsigned lane_mask);
- void (*disable_pads)(int dsi_id, unsigned lane_mask);
-
struct dsi_clock_info current_cinfo;
bool vdds_dsi_enabled;
@@ -361,11 +360,6 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
return dsi_pdev_map[module];
}
-static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
-{
- return dsidev->id;
-}
-
static inline void dsi_write_reg(struct platform_device *dsidev,
const struct dsi_reg idx, u32 val)
{
@@ -452,6 +446,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
return 16;
default:
BUG();
+ return 0;
}
}
@@ -1080,7 +1075,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
DSSDBG("dsi_runtime_put\n");
r = pm_runtime_put_sync(&dsi->pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
@@ -1184,10 +1179,9 @@ static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
{
unsigned long r;
- int dsi_module = dsi_get_dsidev_id(dsidev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) {
+ if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
r = clk_get_rate(dsi->dss_clk);
} else {
@@ -1279,10 +1273,9 @@ static int dsi_pll_power(struct platform_device *dsidev,
}
/* calculate clock rates using dividers in cinfo */
-static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
+static int dsi_calc_clock_rates(struct platform_device *dsidev,
struct dsi_clock_info *cinfo)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
@@ -1297,21 +1290,8 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
if (cinfo->regm_dsi > dsi->regm_dsi_max)
return -EINVAL;
- if (cinfo->use_sys_clk) {
- cinfo->clkin = clk_get_rate(dsi->sys_clk);
- /* XXX it is unclear if highfreq should be used
- * with DSS_SYS_CLK source also */
- cinfo->highfreq = 0;
- } else {
- cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
-
- if (cinfo->clkin < 32000000)
- cinfo->highfreq = 0;
- else
- cinfo->highfreq = 1;
- }
-
- cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
+ cinfo->clkin = clk_get_rate(dsi->sys_clk);
+ cinfo->fint = cinfo->clkin / cinfo->regn;
if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
return -EINVAL;
@@ -1378,27 +1358,21 @@ retry:
memset(&cur, 0, sizeof(cur));
cur.clkin = dss_sys_clk;
- cur.use_sys_clk = 1;
- cur.highfreq = 0;
- /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
- /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
+ /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
/* To reduce PLL lock time, keep Fint high (around 2 MHz) */
for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
- if (cur.highfreq == 0)
- cur.fint = cur.clkin / cur.regn;
- else
- cur.fint = cur.clkin / (2 * cur.regn);
+ cur.fint = cur.clkin / cur.regn;
if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
continue;
- /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
+ /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
unsigned long a, b;
a = 2 * cur.regm * (cur.clkin/1000);
- b = cur.regn * (cur.highfreq + 1);
+ b = cur.regn;
cur.clkin4ddr = a / b * 1000;
if (cur.clkin4ddr > 1800 * 1000 * 1000)
@@ -1486,9 +1460,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
DSSDBGF();
- dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk;
- dsi->current_cinfo.highfreq = cinfo->highfreq;
-
+ dsi->current_cinfo.clkin = cinfo->clkin;
dsi->current_cinfo.fint = cinfo->fint;
dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
@@ -1503,17 +1475,13 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
DSSDBG("DSI Fint %ld\n", cinfo->fint);
- DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
- cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
- cinfo->clkin,
- cinfo->highfreq);
+ DSSDBG("clkin rate %ld\n", cinfo->clkin);
/* DSIPHY == CLKIN4DDR */
- DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
+ DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
cinfo->regm,
cinfo->regn,
cinfo->clkin,
- cinfo->highfreq + 1,
cinfo->clkin4ddr);
DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
@@ -1568,10 +1536,6 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
- l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
- 11, 11); /* DSI_PLL_CLKSEL */
- l = FLD_MOD(l, cinfo->highfreq,
- 12, 12); /* DSI_PLL_HIGHFREQ */
l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
@@ -1716,7 +1680,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dsi_clock_info *cinfo = &dsi->current_cinfo;
enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
- int dsi_module = dsi_get_dsidev_id(dsidev);
+ int dsi_module = dsi->module_id;
dispc_clk_src = dss_get_dispc_clk_source();
dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1726,8 +1690,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
- seq_printf(s, "dsi pll source = %s\n",
- cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
+ seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin);
seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
@@ -1789,7 +1752,6 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
struct dsi_irq_stats stats;
- int dsi_module = dsi_get_dsidev_id(dsidev);
spin_lock_irqsave(&dsi->irq_stats_lock, flags);
@@ -1806,7 +1768,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
- seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1);
+ seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
PIS(VC1);
PIS(VC2);
@@ -1886,22 +1848,6 @@ static void dsi2_dump_irqs(struct seq_file *s)
dsi_dump_dsidev_irqs(dsidev, s);
}
-
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
- const struct file_operations *debug_fops)
-{
- struct platform_device *dsidev;
-
- dsidev = dsi_get_dsidev_from_id(0);
- if (dsidev)
- debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
- &dsi1_dump_irqs, debug_fops);
-
- dsidev = dsi_get_dsidev_from_id(1);
- if (dsidev)
- debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
- &dsi2_dump_irqs, debug_fops);
-}
#endif
static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
@@ -2002,21 +1948,6 @@ static void dsi2_dump_regs(struct seq_file *s)
dsi_dump_dsidev_regs(dsidev, s);
}
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
- const struct file_operations *debug_fops)
-{
- struct platform_device *dsidev;
-
- dsidev = dsi_get_dsidev_from_id(0);
- if (dsidev)
- debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
- &dsi1_dump_regs, debug_fops);
-
- dsidev = dsi_get_dsidev_from_id(1);
- if (dsidev)
- debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
- &dsi2_dump_regs, debug_fops);
-}
enum dsi_cio_power_state {
DSI_COMPLEXIO_POWER_OFF = 0x0,
DSI_COMPLEXIO_POWER_ON = 0x1,
@@ -2073,6 +2004,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
return 1365 * 3; /* 1365x24 bits */
default:
BUG();
+ return 0;
}
}
@@ -2337,7 +2269,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
DSSDBGF();
- r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+ r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
if (r)
return r;
@@ -2447,7 +2379,7 @@ err_cio_pwr:
dsi_cio_disable_lane_override(dsidev);
err_scp_clk_dom:
dsi_disable_scp_clk(dsidev);
- dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+ dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
return r;
}
@@ -2461,7 +2393,7 @@ static void dsi_cio_uninit(struct omap_dss_device *dssdev)
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
dsi_disable_scp_clk(dsidev);
- dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev));
+ dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
}
static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2485,6 +2417,7 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
BUG();
+ return;
}
v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2517,6 +2450,7 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
if (add + size > 4) {
DSSERR("Illegal FIFO configuration\n");
BUG();
+ return;
}
v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2658,6 +2592,7 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
return dsi_sync_vc_l4(dsidev, channel);
default:
BUG();
+ return -EINVAL;
}
}
@@ -3226,6 +3161,7 @@ static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev,
data = reqdata[0] | (reqdata[1] << 8);
} else {
BUG();
+ return -EINVAL;
}
r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
@@ -3340,7 +3276,6 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
goto err;
}
- BUG();
err:
DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
@@ -3735,6 +3670,186 @@ static void dsi_config_blanking_modes(struct omap_dss_device *dssdev)
dsi_write_reg(dsidev, DSI_CTRL, r);
}
+/*
+ * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
+ * results in maximum transition time for data and clock lanes to enter and
+ * exit HS mode. Hence, this is the scenario where the least amount of command
+ * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
+ * clock cycles that can be used to interleave command mode data in HS so that
+ * all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
+ int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
+{
+ int transition;
+
+ /*
+ * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
+ * time of data lanes only, if it isn't set, we need to consider HS
+ * transition time of both data and clock lanes. HS transition time
+ * of Scenario 3 is considered.
+ */
+ if (ddr_alwon) {
+ transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
+ } else {
+ int trans1, trans2;
+ trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
+ trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
+ enter_hs + 1;
+ transition = max(trans1, trans2);
+ }
+
+ return blank > transition ? blank - transition : 0;
+}
+
+/*
+ * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
+ * results in maximum transition time for data lanes to enter and exit LP mode.
+ * Hence, this is the scenario where the least amount of command mode data can
+ * be interleaved. We program the minimum amount of bytes that can be
+ * interleaved in LP so that all scenarios are satisfied.
+ */
+static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
+ int lp_clk_div, int tdsi_fclk)
+{
+ int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */
+ int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */
+ int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */
+ int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
+ int lp_inter; /* cmd mode data that can be interleaved, in bytes */
+
+ /* maximum LP transition time according to Scenario 1 */
+ trans_lp = exit_hs + max(enter_hs, 2) + 1;
+
+ /* CLKIN4DDR = 16 * TXBYTECLKHS */
+ tlp_avail = thsbyte_clk * (blank - trans_lp);
+
+ ttxclkesc = tdsi_fclk * lp_clk_div;
+
+ lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
+ 26) / 16;
+
+ return max(lp_inter, 0);
+}
+
+static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int blanking_mode;
+ int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
+ int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
+ int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
+ int tclk_trail, ths_exit, exiths_clk;
+ bool ddr_alwon;
+ struct omap_video_timings *timings = &dssdev->panel.timings;
+ int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
+ int ndl = dsi->num_lanes_used - 1;
+ int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
+ int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
+ int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
+ int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
+ int bl_interleave_hs = 0, bl_interleave_lp = 0;
+ u32 r;
+
+ r = dsi_read_reg(dsidev, DSI_CTRL);
+ blanking_mode = FLD_GET(r, 20, 20);
+ hfp_blanking_mode = FLD_GET(r, 21, 21);
+ hbp_blanking_mode = FLD_GET(r, 22, 22);
+ hsa_blanking_mode = FLD_GET(r, 23, 23);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+ hbp = FLD_GET(r, 11, 0);
+ hfp = FLD_GET(r, 23, 12);
+ hsa = FLD_GET(r, 31, 24);
+
+ r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+ ddr_clk_post = FLD_GET(r, 7, 0);
+ ddr_clk_pre = FLD_GET(r, 15, 8);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
+ exit_hs_mode_lat = FLD_GET(r, 15, 0);
+ enter_hs_mode_lat = FLD_GET(r, 31, 16);
+
+ r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
+ lp_clk_div = FLD_GET(r, 12, 0);
+ ddr_alwon = FLD_GET(r, 13, 13);
+
+ r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+ ths_exit = FLD_GET(r, 7, 0);
+
+ r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+ tclk_trail = FLD_GET(r, 15, 8);
+
+ exiths_clk = ths_exit + tclk_trail;
+
+ width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
+ bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
+
+ if (!hsa_blanking_mode) {
+ hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ exiths_clk, ddr_clk_pre, ddr_clk_post);
+ hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ lp_clk_div, dsi_fclk_hsdiv);
+ }
+
+ if (!hfp_blanking_mode) {
+ hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ exiths_clk, ddr_clk_pre, ddr_clk_post);
+ hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ lp_clk_div, dsi_fclk_hsdiv);
+ }
+
+ if (!hbp_blanking_mode) {
+ hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+ hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ lp_clk_div, dsi_fclk_hsdiv);
+ }
+
+ if (!blanking_mode) {
+ bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ exiths_clk, ddr_clk_pre, ddr_clk_post);
+
+ bl_interleave_lp = dsi_compute_interleave_lp(bllp,
+ enter_hs_mode_lat, exit_hs_mode_lat,
+ lp_clk_div, dsi_fclk_hsdiv);
+ }
+
+ DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+ hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
+ bl_interleave_hs);
+
+ DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
+ hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
+ bl_interleave_lp);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
+ r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
+ r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
+ r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
+ dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
+ r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
+ r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
+ r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
+ dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
+
+ r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
+ r = FLD_MOD(r, bl_interleave_hs, 31, 15);
+ r = FLD_MOD(r, bl_interleave_lp, 16, 0);
+ dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
+}
+
static int dsi_proto_config(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3769,6 +3884,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
break;
default:
BUG();
+ return -EINVAL;
}
r = dsi_read_reg(dsidev, DSI_CTRL);
@@ -3793,6 +3909,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
dsi_config_vp_sync_events(dssdev);
dsi_config_blanking_modes(dssdev);
+ dsi_config_cmd_mode_interleaving(dssdev);
}
dsi_vc_initial_config(dsidev, 0);
@@ -4008,6 +4125,7 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
break;
default:
BUG();
+ return -EINVAL;
};
dsi_if_enable(dsidev, false);
@@ -4192,10 +4310,6 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
__cancel_delayed_work(&dsi->framedone_timeout_work);
dsi_handle_framedone(dsidev, 0);
-
-#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
- dispc_fake_vsync_irq();
-#endif
}
int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
@@ -4259,13 +4373,12 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
dispc_mgr_enable_stallmode(dssdev->manager->id, true);
dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
- dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings);
+ dss_mgr_set_timings(dssdev->manager, &timings);
} else {
dispc_mgr_enable_stallmode(dssdev->manager->id, false);
dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
- dispc_mgr_set_lcd_timings(dssdev->manager->id,
- &dssdev->panel.timings);
+ dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
}
dispc_mgr_set_lcd_display_type(dssdev->manager->id,
@@ -4294,13 +4407,11 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
struct dsi_clock_info cinfo;
int r;
- /* we always use DSS_CLK_SYSCK as input clock */
- cinfo.use_sys_clk = true;
cinfo.regn = dssdev->clocks.dsi.regn;
cinfo.regm = dssdev->clocks.dsi.regm;
cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
- r = dsi_calc_clock_rates(dssdev, &cinfo);
+ r = dsi_calc_clock_rates(dsidev, &cinfo);
if (r) {
DSSERR("Failed to calc dsi clocks\n");
return r;
@@ -4345,7 +4456,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- int dsi_module = dsi_get_dsidev_id(dsidev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
r = dsi_pll_init(dsidev, true, true);
@@ -4357,7 +4468,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
goto err1;
dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
- dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src);
+ dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
dss_select_lcd_clk_source(dssdev->manager->id,
dssdev->clocks.dispc.channel.lcd_clk_src);
@@ -4396,7 +4507,7 @@ err3:
dsi_cio_uninit(dssdev);
err2:
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
- dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
err1:
@@ -4410,7 +4521,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int dsi_module = dsi_get_dsidev_id(dsidev);
if (enter_ulps && !dsi->ulps_enabled)
dsi_enter_ulps(dsidev);
@@ -4423,7 +4533,7 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
dsi_vc_enable(dsidev, 3, 0);
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
- dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
dsi_cio_uninit(dssdev);
dsi_pll_uninit(dsidev, disconnect_lanes);
@@ -4527,7 +4637,7 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
}
EXPORT_SYMBOL(omapdss_dsi_enable_te);
-int dsi_init_display(struct omap_dss_device *dssdev)
+static int __init dsi_init_display(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4680,13 +4790,39 @@ static void dsi_put_clocks(struct platform_device *dsidev)
clk_put(dsi->sys_clk);
}
+static void __init dsi_probe_pdata(struct platform_device *dsidev)
+{
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct omap_dss_board_info *pdata = dsidev->dev.platform_data;
+ int i, r;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
+ continue;
+
+ if (dssdev->phy.dsi.module != dsi->module_id)
+ continue;
+
+ r = dsi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &dsidev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
/* DSI1 HW IP initialisation */
-static int omap_dsihw_probe(struct platform_device *dsidev)
+static int __init omap_dsihw_probe(struct platform_device *dsidev)
{
- struct omap_display_platform_data *dss_plat_data;
- struct omap_dss_board_info *board_info;
u32 rev;
- int r, i, dsi_module = dsi_get_dsidev_id(dsidev);
+ int r, i;
struct resource *dsi_mem;
struct dsi_data *dsi;
@@ -4694,15 +4830,11 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
if (!dsi)
return -ENOMEM;
+ dsi->module_id = dsidev->id;
dsi->pdev = dsidev;
- dsi_pdev_map[dsi_module] = dsidev;
+ dsi_pdev_map[dsi->module_id] = dsidev;
dev_set_drvdata(&dsidev->dev, dsi);
- dss_plat_data = dsidev->dev.platform_data;
- board_info = dss_plat_data->board_data;
- dsi->enable_pads = board_info->dsi_enable_pads;
- dsi->disable_pads = board_info->dsi_disable_pads;
-
spin_lock_init(&dsi->irq_lock);
spin_lock_init(&dsi->errors_lock);
dsi->errors = 0;
@@ -4780,8 +4912,21 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
else
dsi->num_lanes_supported = 3;
+ dsi_probe_pdata(dsidev);
+
dsi_runtime_put(dsidev);
+ if (dsi->module_id == 0)
+ dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
+ else if (dsi->module_id == 1)
+ dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ if (dsi->module_id == 0)
+ dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
+ else if (dsi->module_id == 1)
+ dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
+#endif
return 0;
err_runtime_get:
@@ -4790,12 +4935,14 @@ err_runtime_get:
return r;
}
-static int omap_dsihw_remove(struct platform_device *dsidev)
+static int __exit omap_dsihw_remove(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
WARN_ON(dsi->scp_clk_refcount > 0);
+ omap_dss_unregister_child_devices(&dsidev->dev);
+
pm_runtime_disable(&dsidev->dev);
dsi_put_clocks(dsidev);
@@ -4816,7 +4963,6 @@ static int omap_dsihw_remove(struct platform_device *dsidev)
static int dsi_runtime_suspend(struct device *dev)
{
dispc_runtime_put();
- dss_runtime_put();
return 0;
}
@@ -4825,20 +4971,11 @@ static int dsi_runtime_resume(struct device *dev)
{
int r;
- r = dss_runtime_get();
- if (r)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r)
- goto err_get_dispc;
+ return r;
return 0;
-
-err_get_dispc:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static const struct dev_pm_ops dsi_pm_ops = {
@@ -4847,8 +4984,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
};
static struct platform_driver omap_dsihw_driver = {
- .probe = omap_dsihw_probe,
- .remove = omap_dsihw_remove,
+ .remove = __exit_p(omap_dsihw_remove),
.driver = {
.name = "omapdss_dsi",
.owner = THIS_MODULE,
@@ -4856,12 +4992,12 @@ static struct platform_driver omap_dsihw_driver = {
},
};
-int dsi_init_platform_driver(void)
+int __init dsi_init_platform_driver(void)
{
- return platform_driver_register(&omap_dsihw_driver);
+ return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
}
-void dsi_uninit_platform_driver(void)
+void __exit dsi_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_dsihw_driver);
+ platform_driver_unregister(&omap_dsihw_driver);
}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index bd2d5e1..d2b5719 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -62,6 +62,9 @@ struct dss_reg {
#define REG_FLD_MOD(idx, val, start, end) \
dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+static int dss_runtime_get(void);
+static void dss_runtime_put(void);
+
static struct {
struct platform_device *pdev;
void __iomem *base;
@@ -277,7 +280,7 @@ void dss_dump_clocks(struct seq_file *s)
dss_runtime_put();
}
-void dss_dump_regs(struct seq_file *s)
+static void dss_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
@@ -322,6 +325,7 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
break;
default:
BUG();
+ return;
}
dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
@@ -335,7 +339,7 @@ void dss_select_dsi_clk_source(int dsi_module,
enum omap_dss_clk_source clk_src)
{
struct platform_device *dsidev;
- int b;
+ int b, pos;
switch (clk_src) {
case OMAP_DSS_CLK_SRC_FCK:
@@ -355,9 +359,11 @@ void dss_select_dsi_clk_source(int dsi_module,
break;
default:
BUG();
+ return;
}
- REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
+ pos = dsi_module == 0 ? 1 : 10;
+ REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
dss.dsi_clk_source[dsi_module] = clk_src;
}
@@ -389,6 +395,7 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
break;
default:
BUG();
+ return;
}
pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
@@ -706,7 +713,7 @@ static void dss_put_clocks(void)
clk_put(dss.dss_clk);
}
-int dss_runtime_get(void)
+static int dss_runtime_get(void)
{
int r;
@@ -717,14 +724,14 @@ int dss_runtime_get(void)
return r < 0 ? r : 0;
}
-void dss_runtime_put(void)
+static void dss_runtime_put(void)
{
int r;
DSSDBG("dss_runtime_put\n");
r = pm_runtime_put_sync(&dss.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
}
/* DEBUGFS */
@@ -740,7 +747,7 @@ void dss_debug_dump_clocks(struct seq_file *s)
#endif
/* DSS HW IP initialisation */
-static int omap_dsshw_probe(struct platform_device *pdev)
+static int __init omap_dsshw_probe(struct platform_device *pdev)
{
struct resource *dss_mem;
u32 rev;
@@ -785,40 +792,24 @@ static int omap_dsshw_probe(struct platform_device *pdev)
dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
- r = dpi_init();
- if (r) {
- DSSERR("Failed to initialize DPI\n");
- goto err_dpi;
- }
-
- r = sdi_init();
- if (r) {
- DSSERR("Failed to initialize SDI\n");
- goto err_sdi;
- }
-
rev = dss_read_reg(DSS_REVISION);
printk(KERN_INFO "OMAP DSS rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
dss_runtime_put();
+ dss_debugfs_create_file("dss", dss_dump_regs);
+
return 0;
-err_sdi:
- dpi_exit();
-err_dpi:
- dss_runtime_put();
+
err_runtime_get:
pm_runtime_disable(&pdev->dev);
dss_put_clocks();
return r;
}
-static int omap_dsshw_remove(struct platform_device *pdev)
+static int __exit omap_dsshw_remove(struct platform_device *pdev)
{
- dpi_exit();
- sdi_exit();
-
pm_runtime_disable(&pdev->dev);
dss_put_clocks();
@@ -829,11 +820,24 @@ static int omap_dsshw_remove(struct platform_device *pdev)
static int dss_runtime_suspend(struct device *dev)
{
dss_save_context();
+ dss_set_min_bus_tput(dev, 0);
return 0;
}
static int dss_runtime_resume(struct device *dev)
{
+ int r;
+ /*
+ * Set an arbitrarily high tput request to ensure OPP100.
+ * What we should really do is to make a request to stay in OPP100,
+ * without any tput requirements, but that is not currently possible
+ * via the PM layer.
+ */
+
+ r = dss_set_min_bus_tput(dev, 1000000000);
+ if (r)
+ return r;
+
dss_restore_context();
return 0;
}
@@ -844,8 +848,7 @@ static const struct dev_pm_ops dss_pm_ops = {
};
static struct platform_driver omap_dsshw_driver = {
- .probe = omap_dsshw_probe,
- .remove = omap_dsshw_remove,
+ .remove = __exit_p(omap_dsshw_remove),
.driver = {
.name = "omapdss_dss",
.owner = THIS_MODULE,
@@ -853,12 +856,12 @@ static struct platform_driver omap_dsshw_driver = {
},
};
-int dss_init_platform_driver(void)
+int __init dss_init_platform_driver(void)
{
- return platform_driver_register(&omap_dsshw_driver);
+ return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe);
}
void dss_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_dsshw_driver);
+ platform_driver_unregister(&omap_dsshw_driver);
}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index d4b3dff..dd1092c 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -150,9 +150,6 @@ struct dsi_clock_info {
u16 regm_dsi; /* OMAP3: REGM4
* OMAP4: REGM5 */
u16 lp_clk_div;
-
- u8 highfreq;
- bool use_sys_clk;
};
struct seq_file;
@@ -162,6 +159,16 @@ struct platform_device;
struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
+int dss_get_ctx_loss_count(struct device *dev);
+int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
+void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
+int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
+int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+
+int omap_dss_register_device(struct omap_dss_device *dssdev,
+ struct device *parent, int disp_num);
+void omap_dss_unregister_device(struct omap_dss_device *dssdev);
+void omap_dss_unregister_child_devices(struct device *parent);
/* apply */
void dss_apply_init(void);
@@ -179,6 +186,9 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
int dss_mgr_set_device(struct omap_overlay_manager *mgr,
struct omap_dss_device *dssdev);
int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
+void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
+ struct omap_video_timings *timings);
+const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
bool dss_ovl_is_enabled(struct omap_overlay *ovl);
int dss_ovl_enable(struct omap_overlay *ovl);
@@ -208,9 +218,11 @@ int dss_init_overlay_managers(struct platform_device *pdev);
void dss_uninit_overlay_managers(struct platform_device *pdev);
int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
const struct omap_overlay_manager_info *info);
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings);
int dss_mgr_check(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev,
struct omap_overlay_manager_info *info,
+ const struct omap_video_timings *mgr_timings,
struct omap_overlay_info **overlay_infos);
/* overlay */
@@ -220,22 +232,18 @@ void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
int dss_ovl_simple_check(struct omap_overlay *ovl,
const struct omap_overlay_info *info);
-int dss_ovl_check(struct omap_overlay *ovl,
- struct omap_overlay_info *info, struct omap_dss_device *dssdev);
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+ const struct omap_video_timings *mgr_timings);
/* DSS */
-int dss_init_platform_driver(void);
+int dss_init_platform_driver(void) __init;
void dss_uninit_platform_driver(void);
-int dss_runtime_get(void);
-void dss_runtime_put(void);
-
void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
void dss_dump_clocks(struct seq_file *s);
-void dss_dump_regs(struct seq_file *s);
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
void dss_debug_dump_clocks(struct seq_file *s);
#endif
@@ -265,19 +273,8 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
struct dispc_clock_info *dispc_cinfo);
/* SDI */
-#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init(void);
-void sdi_exit(void);
-int sdi_init_display(struct omap_dss_device *display);
-#else
-static inline int sdi_init(void)
-{
- return 0;
-}
-static inline void sdi_exit(void)
-{
-}
-#endif
+int sdi_init_platform_driver(void) __init;
+void sdi_uninit_platform_driver(void) __exit;
/* DSI */
#ifdef CONFIG_OMAP2_DSS_DSI
@@ -285,19 +282,14 @@ static inline void sdi_exit(void)
struct dentry;
struct file_operations;
-int dsi_init_platform_driver(void);
-void dsi_uninit_platform_driver(void);
+int dsi_init_platform_driver(void) __init;
+void dsi_uninit_platform_driver(void) __exit;
int dsi_runtime_get(struct platform_device *dsidev);
void dsi_runtime_put(struct platform_device *dsidev);
void dsi_dump_clocks(struct seq_file *s);
-void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
- const struct file_operations *debug_fops);
-void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
- const struct file_operations *debug_fops);
-int dsi_init_display(struct omap_dss_device *display);
void dsi_irq_handler(void);
u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
@@ -314,13 +306,6 @@ void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
struct platform_device *dsi_get_dsidev_from_id(int module);
#else
-static inline int dsi_init_platform_driver(void)
-{
- return 0;
-}
-static inline void dsi_uninit_platform_driver(void)
-{
-}
static inline int dsi_runtime_get(struct platform_device *dsidev)
{
return 0;
@@ -377,28 +362,14 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module)
#endif
/* DPI */
-#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init(void);
-void dpi_exit(void);
-int dpi_init_display(struct omap_dss_device *dssdev);
-#else
-static inline int dpi_init(void)
-{
- return 0;
-}
-static inline void dpi_exit(void)
-{
-}
-#endif
+int dpi_init_platform_driver(void) __init;
+void dpi_uninit_platform_driver(void) __exit;
/* DISPC */
-int dispc_init_platform_driver(void);
-void dispc_uninit_platform_driver(void);
+int dispc_init_platform_driver(void) __init;
+void dispc_uninit_platform_driver(void) __exit;
void dispc_dump_clocks(struct seq_file *s);
-void dispc_dump_irqs(struct seq_file *s);
-void dispc_dump_regs(struct seq_file *s);
void dispc_irq_handler(void);
-void dispc_fake_vsync_irq(void);
int dispc_runtime_get(void);
void dispc_runtime_put(void);
@@ -409,12 +380,12 @@ void dispc_disable_sidle(void);
void dispc_lcd_enable_signal_polarity(bool act_high);
void dispc_lcd_enable_signal(bool enable);
void dispc_pck_free_enable(bool enable);
-void dispc_set_digit_size(u16 width, u16 height);
void dispc_enable_fifomerge(bool enable);
void dispc_enable_gamma_table(bool enable);
void dispc_set_loadmode(enum omap_dss_load_mode mode);
-bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
+bool dispc_mgr_timings_ok(enum omap_channel channel,
+ const struct omap_video_timings *timings);
unsigned long dispc_fclk_rate(void);
void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
struct dispc_clock_info *cinfo);
@@ -424,15 +395,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
- u32 *fifo_low, u32 *fifo_high, bool use_fifomerge);
+ u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
+ bool manual_update);
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, bool replication);
+ bool ilace, bool replication,
+ const struct omap_video_timings *mgr_timings);
int dispc_ovl_enable(enum omap_plane plane, bool enable);
void dispc_ovl_set_channel_out(enum omap_plane plane,
enum omap_channel channel);
void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
-void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
bool dispc_mgr_go_busy(enum omap_channel channel);
@@ -445,12 +417,13 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
enum omap_lcd_display_type type);
-void dispc_mgr_set_lcd_timings(enum omap_channel channel,
+void dispc_mgr_set_timings(enum omap_channel channel,
struct omap_video_timings *timings);
void dispc_mgr_set_pol_freq(enum omap_channel channel,
enum omap_panel_config config, u8 acbi, u8 acb);
unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
+unsigned long dispc_core_clk_rate(void);
int dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(enum omap_channel channel,
@@ -460,19 +433,10 @@ void dispc_mgr_setup(enum omap_channel channel,
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
-int venc_init_platform_driver(void);
-void venc_uninit_platform_driver(void);
-void venc_dump_regs(struct seq_file *s);
-int venc_init_display(struct omap_dss_device *display);
+int venc_init_platform_driver(void) __init;
+void venc_uninit_platform_driver(void) __exit;
unsigned long venc_get_pixel_clock(void);
#else
-static inline int venc_init_platform_driver(void)
-{
- return 0;
-}
-static inline void venc_uninit_platform_driver(void)
-{
-}
static inline unsigned long venc_get_pixel_clock(void)
{
WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
@@ -482,23 +446,10 @@ static inline unsigned long venc_get_pixel_clock(void)
/* HDMI */
#ifdef CONFIG_OMAP4_DSS_HDMI
-int hdmi_init_platform_driver(void);
-void hdmi_uninit_platform_driver(void);
-int hdmi_init_display(struct omap_dss_device *dssdev);
+int hdmi_init_platform_driver(void) __init;
+void hdmi_uninit_platform_driver(void) __exit;
unsigned long hdmi_get_pixel_clock(void);
-void hdmi_dump_regs(struct seq_file *s);
#else
-static inline int hdmi_init_display(struct omap_dss_device *dssdev)
-{
- return 0;
-}
-static inline int hdmi_init_platform_driver(void)
-{
- return 0;
-}
-static inline void hdmi_uninit_platform_driver(void)
-{
-}
static inline unsigned long hdmi_get_pixel_clock(void)
{
WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
@@ -514,22 +465,18 @@ int omapdss_hdmi_read_edid(u8 *buf, int len);
bool omapdss_hdmi_detect(void);
int hdmi_panel_init(void);
void hdmi_panel_exit(void);
+#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO
+int hdmi_audio_enable(void);
+void hdmi_audio_disable(void);
+int hdmi_audio_start(void);
+void hdmi_audio_stop(void);
+bool hdmi_mode_has_audio(void);
+int hdmi_audio_config(struct omap_dss_audio *audio);
+#endif
/* RFBI */
-#ifdef CONFIG_OMAP2_DSS_RFBI
-int rfbi_init_platform_driver(void);
-void rfbi_uninit_platform_driver(void);
-void rfbi_dump_regs(struct seq_file *s);
-int rfbi_init_display(struct omap_dss_device *display);
-#else
-static inline int rfbi_init_platform_driver(void)
-{
- return 0;
-}
-static inline void rfbi_uninit_platform_driver(void)
-{
-}
-#endif
+int rfbi_init_platform_driver(void) __init;
+void rfbi_uninit_platform_driver(void) __exit;
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index ce14aa6..9387097 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -52,6 +52,8 @@ struct omap_dss_features {
const char * const *clksrc_names;
const struct dss_param_range *dss_params;
+ const enum omap_dss_rotation_type supported_rotation_types;
+
const u32 buffer_size_unit;
const u32 burst_size_unit;
};
@@ -311,6 +313,8 @@ static const struct dss_param_range omap2_dss_param_range[] = {
* scaler cannot scale a image with width more than 768.
*/
[FEAT_PARAM_LINEWIDTH] = { 1, 768 },
+ [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
+ [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap3_dss_param_range[] = {
@@ -324,6 +328,8 @@ static const struct dss_param_range omap3_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
+ [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
+ [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const struct dss_param_range omap4_dss_param_range[] = {
@@ -337,6 +343,8 @@ static const struct dss_param_range omap4_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
[FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
+ [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
+ [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
};
static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -399,6 +407,7 @@ static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
FEAT_FIR_COEF_V,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
};
static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
@@ -416,6 +425,7 @@ static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
FEAT_FIR_COEF_V,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
};
static const enum dss_feat_id omap4_dss_feat_list[] = {
@@ -434,6 +444,7 @@ static const enum dss_feat_id omap4_dss_feat_list[] = {
FEAT_FIR_COEF_V,
FEAT_ALPHA_FREE_ZORDER,
FEAT_FIFO_MERGE,
+ FEAT_BURST_2D,
};
/* OMAP2 DSS Features */
@@ -451,6 +462,7 @@ static const struct omap_dss_features omap2_dss_features = {
.overlay_caps = omap2_dss_overlay_caps,
.clksrc_names = omap2_dss_clk_source_names,
.dss_params = omap2_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -470,6 +482,7 @@ static const struct omap_dss_features omap3430_dss_features = {
.overlay_caps = omap3430_dss_overlay_caps,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -488,6 +501,7 @@ static const struct omap_dss_features omap3630_dss_features = {
.overlay_caps = omap3630_dss_overlay_caps,
.clksrc_names = omap3_dss_clk_source_names,
.dss_params = omap3_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
.buffer_size_unit = 1,
.burst_size_unit = 8,
};
@@ -508,6 +522,7 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
.overlay_caps = omap4_dss_overlay_caps,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -527,6 +542,7 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
.overlay_caps = omap4_dss_overlay_caps,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -546,6 +562,7 @@ static const struct omap_dss_features omap4_dss_features = {
.overlay_caps = omap4_dss_overlay_caps,
.clksrc_names = omap4_dss_clk_source_names,
.dss_params = omap4_dss_param_range,
+ .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
.buffer_size_unit = 16,
.burst_size_unit = 16,
};
@@ -562,13 +579,17 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
.pll_enable = ti_hdmi_4xxx_pll_enable,
.pll_disable = ti_hdmi_4xxx_pll_disable,
.video_enable = ti_hdmi_4xxx_wp_video_start,
+ .video_disable = ti_hdmi_4xxx_wp_video_stop,
.dump_wrapper = ti_hdmi_4xxx_wp_dump,
.dump_core = ti_hdmi_4xxx_core_dump,
.dump_pll = ti_hdmi_4xxx_pll_dump,
.dump_phy = ti_hdmi_4xxx_phy_dump,
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
.audio_enable = ti_hdmi_4xxx_wp_audio_enable,
+ .audio_disable = ti_hdmi_4xxx_wp_audio_disable,
+ .audio_start = ti_hdmi_4xxx_audio_start,
+ .audio_stop = ti_hdmi_4xxx_audio_stop,
+ .audio_config = ti_hdmi_4xxx_audio_config,
#endif
};
@@ -662,6 +683,11 @@ void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
*end = omap_current_dss_features->reg_fields[id].end;
}
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type)
+{
+ return omap_current_dss_features->supported_rotation_types & rot_type;
+}
+
void dss_features_init(void)
{
if (cpu_is_omap24xx())
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index c332e7d..bdf469f0 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -62,6 +62,7 @@ enum dss_feat_id {
FEAT_FIFO_MERGE,
/* An unknown HW bug causing the normal FIFO thresholds not to work */
FEAT_OMAP3_DSI_FIFO_BUG,
+ FEAT_BURST_2D,
};
/* DSS register field id */
@@ -91,6 +92,8 @@ enum dss_range_param {
FEAT_PARAM_DSIPLL_LPDIV,
FEAT_PARAM_DOWNSCALE,
FEAT_PARAM_LINEWIDTH,
+ FEAT_PARAM_MGR_WIDTH,
+ FEAT_PARAM_MGR_HEIGHT,
};
/* DSS Feature Functions */
@@ -108,6 +111,8 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
u32 dss_feat_get_burst_size_unit(void); /* in bytes */
+bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
+
bool dss_has_feature(enum dss_feat_id id);
void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index c4b4f69..26a2430 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -33,12 +33,6 @@
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <video/omapdss.h>
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#include "ti_hdmi_4xxx_ip.h"
-#endif
#include "ti_hdmi.h"
#include "dss.h"
@@ -63,7 +57,6 @@
static struct {
struct mutex lock;
- struct omap_display_platform_data *pdata;
struct platform_device *pdev;
struct hdmi_ip_data ip_data;
@@ -130,25 +123,12 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
- /*
- * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled.
- * This should be removed later.
- */
- r = dss_runtime_get();
- if (r < 0)
- goto err_get_dss;
-
r = pm_runtime_get_sync(&hdmi.pdev->dev);
WARN_ON(r < 0);
if (r < 0)
- goto err_get_hdmi;
+ return r;
return 0;
-
-err_get_hdmi:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static void hdmi_runtime_put(void)
@@ -158,16 +138,10 @@ static void hdmi_runtime_put(void)
DSSDBG("hdmi_runtime_put\n");
r = pm_runtime_put_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0);
-
- /*
- * HACK: This is added to complement the dss_runtime_get() call in
- * hdmi_runtime_get(). This should be removed later.
- */
- dss_runtime_put();
+ WARN_ON(r < 0 && r != -ENOSYS);
}
-int hdmi_init_display(struct omap_dss_device *dssdev)
+static int __init hdmi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -344,7 +318,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
- hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
/* config the PLL and PHY hdmi_set_pll_pwrfirst */
r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
@@ -376,10 +350,11 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
dispc_enable_gamma_table(0);
/* tv size */
- dispc_set_digit_size(dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
+ dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
- hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1);
+ r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
+ if (r)
+ goto err_vid_enable;
r = dss_mgr_enable(dssdev->manager);
if (r)
@@ -388,7 +363,8 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
return 0;
err_mgr_enable:
- hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
+err_vid_enable:
hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
err:
@@ -400,7 +376,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
{
dss_mgr_disable(dssdev->manager);
- hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
hdmi_runtime_put();
@@ -436,10 +412,12 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
r = hdmi_power_on(dssdev);
if (r)
DSSERR("failed to power on device\n");
+ } else {
+ dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
}
}
-void hdmi_dump_regs(struct seq_file *s)
+static void hdmi_dump_regs(struct seq_file *s)
{
mutex_lock(&hdmi.lock);
@@ -555,248 +533,201 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-
-static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
+static int hdmi_get_clocks(struct platform_device *pdev)
{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_codec *codec = rtd->codec;
- struct platform_device *pdev = to_platform_device(codec->dev);
- struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
- int err = 0;
+ struct clk *clk;
- if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) {
- dev_err(&pdev->dev, "Cannot enable/disable audio\n");
- return -ENODEV;
+ clk = clk_get(&pdev->dev, "sys_clk");
+ if (IS_ERR(clk)) {
+ DSSERR("can't get sys_clk\n");
+ return PTR_ERR(clk);
}
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ip_data->ops->audio_enable(ip_data, true);
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- ip_data->ops->audio_enable(ip_data, false);
- break;
- default:
- err = -EINVAL;
- }
- return err;
-}
-
-static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_codec *codec = rtd->codec;
- struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
- struct hdmi_audio_format audio_format;
- struct hdmi_audio_dma audio_dma;
- struct hdmi_core_audio_config core_cfg;
- struct hdmi_core_infoframe_audio aud_if_cfg;
- int err, n, cts;
- enum hdmi_core_audio_sample_freq sample_freq;
-
- switch (params_format(params)) {
- case SNDRV_PCM_FORMAT_S16_LE:
- core_cfg.i2s_cfg.word_max_length =
- HDMI_AUDIO_I2S_MAX_WORD_20BITS;
- core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
- core_cfg.i2s_cfg.in_length_bits =
- HDMI_AUDIO_I2S_INPUT_LENGTH_16;
- core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
- audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
- audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
- audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
- audio_dma.transfer_size = 0x10;
- break;
- case SNDRV_PCM_FORMAT_S24_LE:
- core_cfg.i2s_cfg.word_max_length =
- HDMI_AUDIO_I2S_MAX_WORD_24BITS;
- core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
- core_cfg.i2s_cfg.in_length_bits =
- HDMI_AUDIO_I2S_INPUT_LENGTH_24;
- audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
- audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
- audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
- core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
- audio_dma.transfer_size = 0x20;
- break;
- default:
+ hdmi.sys_clk = clk;
+
+ return 0;
+}
+
+static void hdmi_put_clocks(void)
+{
+ if (hdmi.sys_clk)
+ clk_put(hdmi.sys_clk);
+}
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
+{
+ u32 deep_color;
+ bool deep_color_correct = false;
+ u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
+
+ if (n == NULL || cts == NULL)
return -EINVAL;
- }
- switch (params_rate(params)) {
+ /* TODO: When implemented, query deep color mode here. */
+ deep_color = 100;
+
+ /*
+ * When using deep color, the default N value (as in the HDMI
+ * specification) yields to an non-integer CTS. Hence, we
+ * modify it while keeping the restrictions described in
+ * section 7.2.1 of the HDMI 1.4a specification.
+ */
+ switch (sample_freq) {
case 32000:
- sample_freq = HDMI_AUDIO_FS_32000;
+ case 48000:
+ case 96000:
+ case 192000:
+ if (deep_color == 125)
+ if (pclk == 27027 || pclk == 74250)
+ deep_color_correct = true;
+ if (deep_color == 150)
+ if (pclk == 27027)
+ deep_color_correct = true;
break;
case 44100:
- sample_freq = HDMI_AUDIO_FS_44100;
- break;
- case 48000:
- sample_freq = HDMI_AUDIO_FS_48000;
+ case 88200:
+ case 176400:
+ if (deep_color == 125)
+ if (pclk == 27027)
+ deep_color_correct = true;
break;
default:
return -EINVAL;
}
- err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts);
- if (err < 0)
- return err;
-
- /* Audio wrapper config */
- audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
- audio_format.active_chnnls_msk = 0x03;
- audio_format.type = HDMI_AUDIO_TYPE_LPCM;
- audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
- /* Disable start/stop signals of IEC 60958 blocks */
- audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF;
+ if (deep_color_correct) {
+ switch (sample_freq) {
+ case 32000:
+ *n = 8192;
+ break;
+ case 44100:
+ *n = 12544;
+ break;
+ case 48000:
+ *n = 8192;
+ break;
+ case 88200:
+ *n = 25088;
+ break;
+ case 96000:
+ *n = 16384;
+ break;
+ case 176400:
+ *n = 50176;
+ break;
+ case 192000:
+ *n = 32768;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (sample_freq) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ case 88200:
+ *n = 12544;
+ break;
+ case 96000:
+ *n = 12288;
+ break;
+ case 176400:
+ *n = 25088;
+ break;
+ case 192000:
+ *n = 24576;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
+ *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
- audio_dma.block_size = 0xC0;
- audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
- audio_dma.fifo_threshold = 0x20; /* in number of samples */
+ return 0;
+}
- hdmi_wp_audio_config_dma(ip_data, &audio_dma);
- hdmi_wp_audio_config_format(ip_data, &audio_format);
+int hdmi_audio_enable(void)
+{
+ DSSDBG("audio_enable\n");
- /*
- * I2S config
- */
- core_cfg.i2s_cfg.en_high_bitrate_aud = false;
- /* Only used with high bitrate audio */
- core_cfg.i2s_cfg.cbit_order = false;
- /* Serial data and word select should change on sck rising edge */
- core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
- core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
- /* Set I2S word select polarity */
- core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
- core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
- /* Set serial data to word select shift. See Phillips spec. */
- core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
- /* Enable one of the four available serial data channels */
- core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
-
- /* Core audio config */
- core_cfg.freq_sample = sample_freq;
- core_cfg.n = n;
- core_cfg.cts = cts;
- if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
- core_cfg.aud_par_busclk = 0;
- core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
- core_cfg.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
- } else {
- core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
- core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
- core_cfg.use_mclk = true;
- }
+ return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
+}
- if (core_cfg.use_mclk)
- core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS;
- core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH;
- core_cfg.en_spdif = false;
- /* Use sample frequency from channel status word */
- core_cfg.fs_override = true;
- /* Enable ACR packets */
- core_cfg.en_acr_pkt = true;
- /* Disable direct streaming digital audio */
- core_cfg.en_dsd_audio = false;
- /* Use parallel audio interface */
- core_cfg.en_parallel_aud_input = true;
-
- hdmi_core_audio_config(ip_data, &core_cfg);
+void hdmi_audio_disable(void)
+{
+ DSSDBG("audio_disable\n");
- /*
- * Configure packet
- * info frame audio see doc CEA861-D page 74
- */
- aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
- aud_if_cfg.db1_channel_count = 2;
- aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
- aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
- aud_if_cfg.db4_channel_alloc = 0x00;
- aud_if_cfg.db5_downmix_inh = false;
- aud_if_cfg.db5_lsv = 0;
-
- hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg);
- return 0;
+ hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
}
-static int hdmi_audio_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+int hdmi_audio_start(void)
{
- if (!hdmi.ip_data.cfg.cm.mode) {
- pr_err("Current video settings do not support audio.\n");
- return -EIO;
- }
- return 0;
+ DSSDBG("audio_start\n");
+
+ return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
}
-static int hdmi_audio_codec_probe(struct snd_soc_codec *codec)
+void hdmi_audio_stop(void)
{
- struct hdmi_ip_data *priv = &hdmi.ip_data;
+ DSSDBG("audio_stop\n");
- snd_soc_codec_set_drvdata(codec, priv);
- return 0;
+ hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
}
-static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
- .probe = hdmi_audio_codec_probe,
-};
+bool hdmi_mode_has_audio(void)
+{
+ if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
+ return true;
+ else
+ return false;
+}
-static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
- .hw_params = hdmi_audio_hw_params,
- .trigger = hdmi_audio_trigger,
- .startup = hdmi_audio_startup,
-};
+int hdmi_audio_config(struct omap_dss_audio *audio)
+{
+ return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
+}
-static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
- .name = "hdmi-audio-codec",
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = SNDRV_PCM_RATE_32000 |
- SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE |
- SNDRV_PCM_FMTBIT_S24_LE,
- },
- .ops = &hdmi_audio_codec_ops,
-};
#endif
-static int hdmi_get_clocks(struct platform_device *pdev)
+static void __init hdmi_probe_pdata(struct platform_device *pdev)
{
- struct clk *clk;
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int r, i;
- clk = clk_get(&pdev->dev, "sys_clk");
- if (IS_ERR(clk)) {
- DSSERR("can't get sys_clk\n");
- return PTR_ERR(clk);
- }
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
- hdmi.sys_clk = clk;
+ if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
+ continue;
- return 0;
-}
+ r = hdmi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
-static void hdmi_put_clocks(void)
-{
- if (hdmi.sys_clk)
- clk_put(hdmi.sys_clk);
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
}
/* HDMI HW IP initialisation */
-static int omapdss_hdmihw_probe(struct platform_device *pdev)
+static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
{
struct resource *hdmi_mem;
int r;
- hdmi.pdata = pdev->dev.platform_data;
hdmi.pdev = pdev;
mutex_init(&hdmi.lock);
@@ -830,28 +761,18 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi_panel_init();
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+ dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+
+ hdmi_probe_pdata(pdev);
- /* Register ASoC codec DAI */
- r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
- &hdmi_codec_dai_drv, 1);
- if (r) {
- DSSERR("can't register ASoC HDMI audio codec\n");
- return r;
- }
-#endif
return 0;
}
-static int omapdss_hdmihw_remove(struct platform_device *pdev)
+static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
{
- hdmi_panel_exit();
+ omap_dss_unregister_child_devices(&pdev->dev);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- snd_soc_unregister_codec(&pdev->dev);
-#endif
+ hdmi_panel_exit();
pm_runtime_disable(&pdev->dev);
@@ -867,7 +788,6 @@ static int hdmi_runtime_suspend(struct device *dev)
clk_disable(hdmi.sys_clk);
dispc_runtime_put();
- dss_runtime_put();
return 0;
}
@@ -876,23 +796,13 @@ static int hdmi_runtime_resume(struct device *dev)
{
int r;
- r = dss_runtime_get();
- if (r < 0)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r < 0)
- goto err_get_dispc;
-
+ return r;
clk_enable(hdmi.sys_clk);
return 0;
-
-err_get_dispc:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static const struct dev_pm_ops hdmi_pm_ops = {
@@ -901,8 +811,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
};
static struct platform_driver omapdss_hdmihw_driver = {
- .probe = omapdss_hdmihw_probe,
- .remove = omapdss_hdmihw_remove,
+ .remove = __exit_p(omapdss_hdmihw_remove),
.driver = {
.name = "omapdss_hdmi",
.owner = THIS_MODULE,
@@ -910,12 +819,12 @@ static struct platform_driver omapdss_hdmihw_driver = {
},
};
-int hdmi_init_platform_driver(void)
+int __init hdmi_init_platform_driver(void)
{
- return platform_driver_register(&omapdss_hdmihw_driver);
+ return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
}
-void hdmi_uninit_platform_driver(void)
+void __exit hdmi_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omapdss_hdmihw_driver);
+ platform_driver_unregister(&omapdss_hdmihw_driver);
}
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 533d5dc..1179e3c 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -30,7 +30,12 @@
#include "dss.h"
static struct {
- struct mutex hdmi_lock;
+ /* This protects the panel ops, mainly when accessing the HDMI IP. */
+ struct mutex lock;
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+ /* This protects the audio ops, specifically. */
+ spinlock_t audio_lock;
+#endif
} hdmi;
@@ -54,12 +59,168 @@ static void hdmi_panel_remove(struct omap_dss_device *dssdev)
}
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&hdmi.lock);
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+ /* enable audio only if the display is active and supports audio */
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+ !hdmi_mode_has_audio()) {
+ DSSERR("audio not supported or display is off\n");
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_audio_enable();
+
+ if (!r)
+ dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+err:
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+ hdmi_audio_disable();
+
+ dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
+
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+ /*
+ * No need to check the panel state. It was checked when trasitioning
+ * to AUDIO_ENABLED.
+ */
+ if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) {
+ DSSERR("audio start from invalid state\n");
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_audio_start();
+
+ if (!r)
+ dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
+
+err:
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+ return r;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+ hdmi_audio_stop();
+ dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
+
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+ bool r = false;
+
+ mutex_lock(&hdmi.lock);
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ goto err;
+
+ if (!hdmi_mode_has_audio())
+ goto err;
+
+ r = true;
+err:
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&hdmi.lock);
+ spin_lock_irqsave(&hdmi.audio_lock, flags);
+
+ /* config audio only if the display is active and supports audio */
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
+ !hdmi_mode_has_audio()) {
+ DSSERR("audio not supported or display is off\n");
+ r = -EPERM;
+ goto err;
+ }
+
+ r = hdmi_audio_config(audio);
+
+ if (!r)
+ dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
+
+err:
+ spin_unlock_irqrestore(&hdmi.audio_lock, flags);
+ mutex_unlock(&hdmi.lock);
+ return r;
+}
+
+#else
+static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
+{
+}
+
+static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
+{
+ return -EPERM;
+}
+
+static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
+{
+}
+
+static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
+{
+ return false;
+}
+
+static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
+ struct omap_dss_audio *audio)
+{
+ return -EPERM;
+}
+#endif
+
static int hdmi_panel_enable(struct omap_dss_device *dssdev)
{
int r = 0;
DSSDBG("ENTER hdmi_panel_enable\n");
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
r = -EINVAL;
@@ -75,40 +236,52 @@ static int hdmi_panel_enable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
static void hdmi_panel_disable(struct omap_dss_device *dssdev)
{
- mutex_lock(&hdmi.hdmi_lock);
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ mutex_lock(&hdmi.lock);
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ /*
+ * TODO: notify audio users that the display was disabled. For
+ * now, disable audio locally to not break our audio state
+ * machine.
+ */
+ hdmi_panel_audio_disable(dssdev);
omapdss_hdmi_display_disable(dssdev);
+ }
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
}
static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
{
int r = 0;
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
r = -EINVAL;
goto err;
}
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ /*
+ * TODO: notify audio users that the display was suspended. For now,
+ * disable audio locally to not break our audio state machine.
+ */
+ hdmi_panel_audio_disable(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
omapdss_hdmi_display_disable(dssdev);
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -117,7 +290,7 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
{
int r = 0;
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
r = -EINVAL;
@@ -129,11 +302,12 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
DSSERR("failed to power on\n");
goto err;
}
+ /* TODO: notify audio users that the panel resumed. */
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -141,11 +315,11 @@ err:
static void hdmi_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
*timings = dssdev->panel.timings;
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
}
static void hdmi_set_timings(struct omap_dss_device *dssdev,
@@ -153,12 +327,18 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev,
{
DSSDBG("hdmi_set_timings\n");
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
+
+ /*
+ * TODO: notify audio users that there was a timings change. For
+ * now, disable audio locally to not break our audio state machine.
+ */
+ hdmi_panel_audio_disable(dssdev);
dssdev->panel.timings = *timings;
omapdss_hdmi_display_set_timing(dssdev);
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
}
static int hdmi_check_timings(struct omap_dss_device *dssdev,
@@ -168,11 +348,11 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
DSSDBG("hdmi_check_timings\n");
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
r = omapdss_hdmi_display_check_timing(dssdev, timings);
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -180,7 +360,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
{
int r;
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
r = omapdss_hdmi_display_enable(dssdev);
@@ -194,7 +374,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
omapdss_hdmi_display_disable(dssdev);
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -203,7 +383,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
{
int r;
- mutex_lock(&hdmi.hdmi_lock);
+ mutex_lock(&hdmi.lock);
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
r = omapdss_hdmi_display_enable(dssdev);
@@ -217,7 +397,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
omapdss_hdmi_display_disable(dssdev);
err:
- mutex_unlock(&hdmi.hdmi_lock);
+ mutex_unlock(&hdmi.lock);
return r;
}
@@ -234,6 +414,12 @@ static struct omap_dss_driver hdmi_driver = {
.check_timings = hdmi_check_timings,
.read_edid = hdmi_read_edid,
.detect = hdmi_detect,
+ .audio_enable = hdmi_panel_audio_enable,
+ .audio_disable = hdmi_panel_audio_disable,
+ .audio_start = hdmi_panel_audio_start,
+ .audio_stop = hdmi_panel_audio_stop,
+ .audio_supported = hdmi_panel_audio_supported,
+ .audio_config = hdmi_panel_audio_config,
.driver = {
.name = "hdmi_panel",
.owner = THIS_MODULE,
@@ -242,7 +428,11 @@ static struct omap_dss_driver hdmi_driver = {
int hdmi_panel_init(void)
{
- mutex_init(&hdmi.hdmi_lock);
+ mutex_init(&hdmi.lock);
+
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+ spin_lock_init(&hdmi.audio_lock);
+#endif
omap_dss_register_driver(&hdmi_driver);
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index e736460..0cbcde4 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -654,9 +654,20 @@ static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
return 0;
}
+int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
+ const struct omap_video_timings *timings)
+{
+ if (!dispc_mgr_timings_ok(mgr->id, timings)) {
+ DSSERR("check_manager: invalid timings\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int dss_mgr_check(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev,
struct omap_overlay_manager_info *info,
+ const struct omap_video_timings *mgr_timings,
struct omap_overlay_info **overlay_infos)
{
struct omap_overlay *ovl;
@@ -668,6 +679,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
return r;
}
+ r = dss_mgr_check_timings(mgr, mgr_timings);
+ if (r)
+ return r;
+
list_for_each_entry(ovl, &mgr->overlays, list) {
struct omap_overlay_info *oi;
int r;
@@ -677,7 +692,7 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
if (oi == NULL)
continue;
- r = dss_ovl_check(ovl, oi, dssdev);
+ r = dss_ovl_check(ovl, oi, mgr_timings);
if (r)
return r;
}
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 6e82181..b0ba60f 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -628,19 +628,23 @@ int dss_ovl_simple_check(struct omap_overlay *ovl,
return -EINVAL;
}
+ if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
+ DSSERR("check_overlay: rotation type %d not supported\n",
+ info->rotation_type);
+ return -EINVAL;
+ }
+
return 0;
}
-int dss_ovl_check(struct omap_overlay *ovl,
- struct omap_overlay_info *info, struct omap_dss_device *dssdev)
+int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
+ const struct omap_video_timings *mgr_timings)
{
u16 outw, outh;
u16 dw, dh;
- if (dssdev == NULL)
- return 0;
-
- dssdev->driver->get_resolution(dssdev, &dw, &dh);
+ dw = mgr_timings->x_res;
+ dh = mgr_timings->y_res;
if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
outw = info->width;
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 788a0ef..7985fa1 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -141,7 +141,7 @@ static void rfbi_runtime_put(void)
DSSDBG("rfbi_runtime_put\n");
r = pm_runtime_put_sync(&rfbi.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
void rfbi_bus_lock(void)
@@ -304,13 +304,23 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
u16 height, void (*callback)(void *data), void *data)
{
u32 l;
+ struct omap_video_timings timings = {
+ .hsw = 1,
+ .hfp = 1,
+ .hbp = 1,
+ .vsw = 1,
+ .vfp = 0,
+ .vbp = 0,
+ .x_res = width,
+ .y_res = height,
+ };
/*BUG_ON(callback == 0);*/
BUG_ON(rfbi.framedone_callback != NULL);
DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
- dispc_mgr_set_lcd_size(dssdev->manager->id, width, height);
+ dss_mgr_set_timings(dssdev->manager, &timings);
dispc_mgr_enable(dssdev->manager->id, true);
@@ -766,6 +776,16 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
u16 *x, u16 *y, u16 *w, u16 *h)
{
u16 dw, dh;
+ struct omap_video_timings timings = {
+ .hsw = 1,
+ .hfp = 1,
+ .hbp = 1,
+ .vsw = 1,
+ .vfp = 0,
+ .vbp = 0,
+ .x_res = *w,
+ .y_res = *h,
+ };
dssdev->driver->get_resolution(dssdev, &dw, &dh);
@@ -784,7 +804,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
if (*w == 0 || *h == 0)
return -EINVAL;
- dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
+ dss_mgr_set_timings(dssdev->manager, &timings);
return 0;
}
@@ -799,7 +819,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
}
EXPORT_SYMBOL(omap_rfbi_update);
-void rfbi_dump_regs(struct seq_file *s)
+static void rfbi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
@@ -900,15 +920,39 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omapdss_rfbi_display_disable);
-int rfbi_init_display(struct omap_dss_device *dssdev)
+static int __init rfbi_init_display(struct omap_dss_device *dssdev)
{
rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
return 0;
}
+static void __init rfbi_probe_pdata(struct platform_device *pdev)
+{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int i, r;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
+ continue;
+
+ r = rfbi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
/* RFBI HW IP initialisation */
-static int omap_rfbihw_probe(struct platform_device *pdev)
+static int __init omap_rfbihw_probe(struct platform_device *pdev)
{
u32 rev;
struct resource *rfbi_mem;
@@ -956,6 +1000,10 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
rfbi_runtime_put();
+ dss_debugfs_create_file("rfbi", rfbi_dump_regs);
+
+ rfbi_probe_pdata(pdev);
+
return 0;
err_runtime_get:
@@ -963,8 +1011,9 @@ err_runtime_get:
return r;
}
-static int omap_rfbihw_remove(struct platform_device *pdev)
+static int __exit omap_rfbihw_remove(struct platform_device *pdev)
{
+ omap_dss_unregister_child_devices(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -972,7 +1021,6 @@ static int omap_rfbihw_remove(struct platform_device *pdev)
static int rfbi_runtime_suspend(struct device *dev)
{
dispc_runtime_put();
- dss_runtime_put();
return 0;
}
@@ -981,20 +1029,11 @@ static int rfbi_runtime_resume(struct device *dev)
{
int r;
- r = dss_runtime_get();
- if (r < 0)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r < 0)
- goto err_get_dispc;
+ return r;
return 0;
-
-err_get_dispc:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static const struct dev_pm_ops rfbi_pm_ops = {
@@ -1003,8 +1042,7 @@ static const struct dev_pm_ops rfbi_pm_ops = {
};
static struct platform_driver omap_rfbihw_driver = {
- .probe = omap_rfbihw_probe,
- .remove = omap_rfbihw_remove,
+ .remove = __exit_p(omap_rfbihw_remove),
.driver = {
.name = "omapdss_rfbi",
.owner = THIS_MODULE,
@@ -1012,12 +1050,12 @@ static struct platform_driver omap_rfbihw_driver = {
},
};
-int rfbi_init_platform_driver(void)
+int __init rfbi_init_platform_driver(void)
{
- return platform_driver_register(&omap_rfbihw_driver);
+ return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
}
-void rfbi_uninit_platform_driver(void)
+void __exit rfbi_uninit_platform_driver(void)
{
- return platform_driver_unregister(&omap_rfbihw_driver);
+ platform_driver_unregister(&omap_rfbihw_driver);
}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 8266ca0..3a43dc2 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -24,6 +24,7 @@
#include <linux/err.h>
#include <linux/regulator/consumer.h>
#include <linux/export.h>
+#include <linux/platform_device.h>
#include <video/omapdss.h>
#include "dss.h"
@@ -71,10 +72,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_reg_enable;
- r = dss_runtime_get();
- if (r)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r)
goto err_get_dispc;
@@ -107,7 +104,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
}
- dispc_mgr_set_lcd_timings(dssdev->manager->id, t);
+ dss_mgr_set_timings(dssdev->manager, t);
r = dss_set_clock_div(&dss_cinfo);
if (r)
@@ -137,8 +134,6 @@ err_set_dss_clock_div:
err_calc_clock_div:
dispc_runtime_put();
err_get_dispc:
- dss_runtime_put();
-err_get_dss:
regulator_disable(sdi.vdds_sdi_reg);
err_reg_enable:
omap_dss_stop_device(dssdev);
@@ -154,7 +149,6 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
dss_sdi_disable();
dispc_runtime_put();
- dss_runtime_put();
regulator_disable(sdi.vdds_sdi_reg);
@@ -162,7 +156,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omapdss_sdi_display_disable);
-int sdi_init_display(struct omap_dss_device *dssdev)
+static int __init sdi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("SDI init\n");
@@ -182,11 +176,58 @@ int sdi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-int sdi_init(void)
+static void __init sdi_probe_pdata(struct platform_device *pdev)
+{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int i, r;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
+ continue;
+
+ r = sdi_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
+static int __init omap_sdi_probe(struct platform_device *pdev)
{
+ sdi_probe_pdata(pdev);
+
+ return 0;
+}
+
+static int __exit omap_sdi_remove(struct platform_device *pdev)
+{
+ omap_dss_unregister_child_devices(&pdev->dev);
+
return 0;
}
-void sdi_exit(void)
+static struct platform_driver omap_sdi_driver = {
+ .remove = __exit_p(omap_sdi_remove),
+ .driver = {
+ .name = "omapdss_sdi",
+ .owner = THIS_MODULE,
+ },
+};
+
+int __init sdi_init_platform_driver(void)
+{
+ return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
+}
+
+void __exit sdi_uninit_platform_driver(void)
{
+ platform_driver_unregister(&omap_sdi_driver);
}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index 1f58b84..e734cb4 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -96,7 +96,9 @@ struct ti_hdmi_ip_ops {
void (*pll_disable)(struct hdmi_ip_data *ip_data);
- void (*video_enable)(struct hdmi_ip_data *ip_data, bool start);
+ int (*video_enable)(struct hdmi_ip_data *ip_data);
+
+ void (*video_disable)(struct hdmi_ip_data *ip_data);
void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
@@ -106,9 +108,17 @@ struct ti_hdmi_ip_ops {
void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
- void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+ int (*audio_enable)(struct hdmi_ip_data *ip_data);
+
+ void (*audio_disable)(struct hdmi_ip_data *ip_data);
+
+ int (*audio_start)(struct hdmi_ip_data *ip_data);
+
+ void (*audio_stop)(struct hdmi_ip_data *ip_data);
+
+ int (*audio_config)(struct hdmi_ip_data *ip_data,
+ struct omap_dss_audio *audio);
#endif
};
@@ -173,7 +183,8 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start);
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
@@ -181,8 +192,13 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable);
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+ struct omap_dss_audio *audio);
#endif
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index bfe6fe6..4dae1b2 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -29,9 +29,14 @@
#include <linux/string.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+#include <sound/asound.h>
+#include <sound/asoundef.h>
+#endif
#include "ti_hdmi_4xxx_ip.h"
#include "dss.h"
+#include "dss_features.h"
static inline void hdmi_write_reg(void __iomem *base_addr,
const u16 idx, u32 val)
@@ -298,9 +303,9 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
- NULL, hpd_irq_handler,
- IRQF_DISABLED | IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING, "hpd", ip_data);
+ NULL, hpd_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, "hpd", ip_data);
if (r) {
DSSERR("HPD IRQ request failed\n");
hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
@@ -699,9 +704,15 @@ static void hdmi_wp_init(struct omap_video_timings *timings,
}
-void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start)
+int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
+ return 0;
+}
+
+void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
{
- REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31);
+ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
}
static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
@@ -886,10 +897,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
#define CORE_REG(i, name) name(i)
#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
- hdmi_read_reg(hdmi_pll_base(ip_data), r))
-#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
+ hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
+#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
+ hdmi_read_reg(hdmi_av_base(ip_data), r))
+#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
(i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
- hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r)))
+ hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
DUMPCORE(HDMI_CORE_SYS_VND_IDL);
DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
@@ -898,6 +911,13 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPCORE(HDMI_CORE_SYS_SRST);
DUMPCORE(HDMI_CORE_CTRL1);
DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
+ DUMPCORE(HDMI_CORE_SYS_DE_DLY);
+ DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
+ DUMPCORE(HDMI_CORE_SYS_DE_TOP);
+ DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
+ DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
+ DUMPCORE(HDMI_CORE_SYS_DE_LINL);
+ DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
DUMPCORE(HDMI_CORE_SYS_VID_MODE);
DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
@@ -907,102 +927,91 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPCORE(HDMI_CORE_SYS_INTR4);
DUMPCORE(HDMI_CORE_SYS_UMASK1);
DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
- DUMPCORE(HDMI_CORE_SYS_DE_DLY);
- DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
- DUMPCORE(HDMI_CORE_SYS_DE_TOP);
- DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
- DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
- DUMPCORE(HDMI_CORE_SYS_DE_LINL);
- DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
- DUMPCORE(HDMI_CORE_DDC_CMD);
- DUMPCORE(HDMI_CORE_DDC_STATUS);
DUMPCORE(HDMI_CORE_DDC_ADDR);
+ DUMPCORE(HDMI_CORE_DDC_SEGM);
DUMPCORE(HDMI_CORE_DDC_OFFSET);
DUMPCORE(HDMI_CORE_DDC_COUNT1);
DUMPCORE(HDMI_CORE_DDC_COUNT2);
+ DUMPCORE(HDMI_CORE_DDC_STATUS);
+ DUMPCORE(HDMI_CORE_DDC_CMD);
DUMPCORE(HDMI_CORE_DDC_DATA);
- DUMPCORE(HDMI_CORE_DDC_SEGM);
- DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
- DUMPCORE(HDMI_CORE_AV_DPD);
- DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
- DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
- DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
- DUMPCORE(HDMI_CORE_AV_AVI_VERS);
- DUMPCORE(HDMI_CORE_AV_AVI_LEN);
- DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
+ DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
+ DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
+ DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
+ DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
+ DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
+ DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
+ DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
+ DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
+ DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
+ DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
+ DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
+ DUMPCOREAV(HDMI_CORE_AV_ASRC);
+ DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
+ DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
+ DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
+ DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
+ DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
+ DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
+ DUMPCOREAV(HDMI_CORE_AV_DPD);
+ DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
+ DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
+ DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
+ DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
+ DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
+ DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
+ DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
+ DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
+ DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
+ DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
+ DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
+ DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE);
+ DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
- DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE);
-
- DUMPCORE(HDMI_CORE_AV_ACR_CTRL);
- DUMPCORE(HDMI_CORE_AV_FREQ_SVAL);
- DUMPCORE(HDMI_CORE_AV_N_SVAL1);
- DUMPCORE(HDMI_CORE_AV_N_SVAL2);
- DUMPCORE(HDMI_CORE_AV_N_SVAL3);
- DUMPCORE(HDMI_CORE_AV_CTS_SVAL1);
- DUMPCORE(HDMI_CORE_AV_CTS_SVAL2);
- DUMPCORE(HDMI_CORE_AV_CTS_SVAL3);
- DUMPCORE(HDMI_CORE_AV_CTS_HVAL1);
- DUMPCORE(HDMI_CORE_AV_CTS_HVAL2);
- DUMPCORE(HDMI_CORE_AV_CTS_HVAL3);
- DUMPCORE(HDMI_CORE_AV_AUD_MODE);
- DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL);
- DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS);
- DUMPCORE(HDMI_CORE_AV_SWAP_I2S);
- DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH);
- DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP);
- DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST0);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST1);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST2);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST4);
- DUMPCORE(HDMI_CORE_AV_I2S_CHST5);
- DUMPCORE(HDMI_CORE_AV_ASRC);
- DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN);
- DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
- DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT);
- DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
- DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
- DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
- DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL);
- DUMPCORE(HDMI_CORE_AV_DPD);
- DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
- DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
- DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
- DUMPCORE(HDMI_CORE_AV_AVI_VERS);
- DUMPCORE(HDMI_CORE_AV_AVI_LEN);
- DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
- DUMPCORE(HDMI_CORE_AV_SPD_TYPE);
- DUMPCORE(HDMI_CORE_AV_SPD_VERS);
- DUMPCORE(HDMI_CORE_AV_SPD_LEN);
- DUMPCORE(HDMI_CORE_AV_SPD_CHSUM);
- DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE);
- DUMPCORE(HDMI_CORE_AV_AUDIO_VERS);
- DUMPCORE(HDMI_CORE_AV_AUDIO_LEN);
- DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM);
- DUMPCORE(HDMI_CORE_AV_MPEG_TYPE);
- DUMPCORE(HDMI_CORE_AV_MPEG_VERS);
- DUMPCORE(HDMI_CORE_AV_MPEG_LEN);
- DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM);
- DUMPCORE(HDMI_CORE_AV_CP_BYTE1);
- DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID);
+ DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
+
+ DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
}
void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
@@ -1016,9 +1025,8 @@ void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
}
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
+#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
+static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
struct hdmi_audio_format *aud_fmt)
{
u32 r;
@@ -1037,7 +1045,7 @@ void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
}
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
struct hdmi_audio_dma *aud_dma)
{
u32 r;
@@ -1055,7 +1063,7 @@ void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
}
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
+static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
struct hdmi_core_audio_config *cfg)
{
u32 r;
@@ -1106,27 +1114,33 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
cfg->fs_override, 1, 1);
- /* I2S parameters */
- REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4,
- cfg->freq_sample, 3, 0);
-
+ /*
+ * Set IEC-60958-3 channel status word. It is passed to the IP
+ * just as it is received. The user of the driver is responsible
+ * for its contents.
+ */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
+ cfg->iec60958_cfg->status[0]);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
+ cfg->iec60958_cfg->status[1]);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
+ cfg->iec60958_cfg->status[2]);
+ /* yes, this is correct: status[3] goes to CHST4 register */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
+ cfg->iec60958_cfg->status[3]);
+ /* yes, this is correct: status[4] goes to CHST5 register */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
+ cfg->iec60958_cfg->status[4]);
+
+ /* set I2S parameters */
r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
- r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
- r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
- r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
- r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5);
- r = FLD_MOD(r, cfg->freq_sample, 7, 4);
- r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
- r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
- hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r);
-
REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
cfg->i2s_cfg.in_length_bits, 3, 0);
@@ -1138,12 +1152,19 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
r = FLD_MOD(r, cfg->en_spdif, 1, 1);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
+
+ /* Audio channel mappings */
+ /* TODO: Make channel mapping dynamic. For now, map channels
+ * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
+ * HDMI speaker order is different. See CEA-861 Section 6.6.2.
+ */
+ hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
+ REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
}
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
- struct hdmi_core_infoframe_audio *info_aud)
+static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
+ struct snd_cea_861_aud_if *info_aud)
{
- u8 val;
u8 sum = 0, checksum = 0;
void __iomem *av_base = hdmi_av_base(ip_data);
@@ -1157,24 +1178,23 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
sum += 0x84 + 0x001 + 0x00a;
- val = (info_aud->db1_coding_type << 4)
- | (info_aud->db1_channel_count - 1);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val);
- sum += val;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
+ info_aud->db1_ct_cc);
+ sum += info_aud->db1_ct_cc;
- val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val);
- sum += val;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
+ info_aud->db2_sf_ss);
+ sum += info_aud->db2_sf_ss;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00);
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
+ sum += info_aud->db3;
- val = info_aud->db4_channel_alloc;
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val);
- sum += val;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
+ sum += info_aud->db4_ca;
- val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3);
- hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val);
- sum += val;
+ hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
+ info_aud->db5_dminh_lsv);
+ sum += info_aud->db5_dminh_lsv;
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
@@ -1192,70 +1212,212 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
*/
}
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
- u32 sample_freq, u32 *n, u32 *cts)
+int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
+ struct omap_dss_audio *audio)
{
- u32 r;
- u32 deep_color = 0;
- u32 pclk = ip_data->cfg.timings.pixel_clock;
-
- if (n == NULL || cts == NULL)
+ struct hdmi_audio_format audio_format;
+ struct hdmi_audio_dma audio_dma;
+ struct hdmi_core_audio_config core;
+ int err, n, cts, channel_count;
+ unsigned int fs_nr;
+ bool word_length_16b = false;
+
+ if (!audio || !audio->iec || !audio->cea || !ip_data)
return -EINVAL;
+
+ core.iec60958_cfg = audio->iec;
/*
- * Obtain current deep color configuration. This needed
- * to calculate the TMDS clock based on the pixel clock.
+ * In the IEC-60958 status word, check if the audio sample word length
+ * is 16-bit as several optimizations can be performed in such case.
*/
- r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0);
- switch (r) {
- case 1: /* No deep color selected */
- deep_color = 100;
+ if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
+ if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
+ word_length_16b = true;
+
+ /* I2S configuration. See Phillips' specification */
+ if (word_length_16b)
+ core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+ else
+ core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+ /*
+ * The I2S input word length is twice the lenght given in the IEC-60958
+ * status word. If the word size is greater than
+ * 20 bits, increment by one.
+ */
+ core.i2s_cfg.in_length_bits = audio->iec->status[4]
+ & IEC958_AES4_CON_WORDLEN;
+ if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
+ core.i2s_cfg.in_length_bits++;
+ core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
+ core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
+ core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
+ core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
+
+ /* convert sample frequency to a number */
+ switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
+ case IEC958_AES3_CON_FS_32000:
+ fs_nr = 32000;
+ break;
+ case IEC958_AES3_CON_FS_44100:
+ fs_nr = 44100;
+ break;
+ case IEC958_AES3_CON_FS_48000:
+ fs_nr = 48000;
break;
- case 2: /* 10-bit deep color selected */
- deep_color = 125;
+ case IEC958_AES3_CON_FS_88200:
+ fs_nr = 88200;
break;
- case 3: /* 12-bit deep color selected */
- deep_color = 150;
+ case IEC958_AES3_CON_FS_96000:
+ fs_nr = 96000;
+ break;
+ case IEC958_AES3_CON_FS_176400:
+ fs_nr = 176400;
+ break;
+ case IEC958_AES3_CON_FS_192000:
+ fs_nr = 192000;
break;
default:
return -EINVAL;
}
- switch (sample_freq) {
- case 32000:
- if ((deep_color == 125) && ((pclk == 54054)
- || (pclk == 74250)))
- *n = 8192;
- else
- *n = 4096;
+ err = hdmi_compute_acr(fs_nr, &n, &cts);
+
+ /* Audio clock regeneration settings */
+ core.n = n;
+ core.cts = cts;
+ if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
+ core.aud_par_busclk = 0;
+ core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
+ core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
+ } else {
+ core.aud_par_busclk = (((128 * 31) - 1) << 8);
+ core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
+ core.use_mclk = true;
+ }
+
+ if (core.use_mclk)
+ core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
+
+ /* Audio channels settings */
+ channel_count = (audio->cea->db1_ct_cc &
+ CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
+
+ switch (channel_count) {
+ case 2:
+ audio_format.active_chnnls_msk = 0x03;
+ break;
+ case 3:
+ audio_format.active_chnnls_msk = 0x07;
+ break;
+ case 4:
+ audio_format.active_chnnls_msk = 0x0f;
+ break;
+ case 5:
+ audio_format.active_chnnls_msk = 0x1f;
break;
- case 44100:
- *n = 6272;
+ case 6:
+ audio_format.active_chnnls_msk = 0x3f;
break;
- case 48000:
- if ((deep_color == 125) && ((pclk == 54054)
- || (pclk == 74250)))
- *n = 8192;
- else
- *n = 6144;
+ case 7:
+ audio_format.active_chnnls_msk = 0x7f;
+ break;
+ case 8:
+ audio_format.active_chnnls_msk = 0xff;
break;
default:
- *n = 0;
return -EINVAL;
}
- /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
- *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
+ /*
+ * the HDMI IP needs to enable four stereo channels when transmitting
+ * more than 2 audio channels
+ */
+ if (channel_count == 2) {
+ audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
+ core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
+ core.layout = HDMI_AUDIO_LAYOUT_2CH;
+ } else {
+ audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
+ core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
+ HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
+ HDMI_AUDIO_I2S_SD3_EN;
+ core.layout = HDMI_AUDIO_LAYOUT_8CH;
+ }
+
+ core.en_spdif = false;
+ /* use sample frequency from channel status word */
+ core.fs_override = true;
+ /* enable ACR packets */
+ core.en_acr_pkt = true;
+ /* disable direct streaming digital audio */
+ core.en_dsd_audio = false;
+ /* use parallel audio interface */
+ core.en_parallel_aud_input = true;
+
+ /* DMA settings */
+ if (word_length_16b)
+ audio_dma.transfer_size = 0x10;
+ else
+ audio_dma.transfer_size = 0x20;
+ audio_dma.block_size = 0xC0;
+ audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
+ audio_dma.fifo_threshold = 0x20; /* in number of samples */
+
+ /* audio FIFO format settings */
+ if (word_length_16b) {
+ audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
+ audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
+ audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
+ } else {
+ audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
+ audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
+ audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
+ }
+ audio_format.type = HDMI_AUDIO_TYPE_LPCM;
+ audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
+ /* disable start/stop signals of IEC 60958 blocks */
+ audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
+
+ /* configure DMA and audio FIFO format*/
+ ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
+ ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
+
+ /* configure the core*/
+ ti_hdmi_4xxx_core_audio_config(ip_data, &core);
+
+ /* configure CEA 861 audio infoframe*/
+ ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
return 0;
}
-void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable)
+int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, true, 31, 31);
+ return 0;
+}
+
+void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
+{
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, false, 31, 31);
+}
+
+int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
{
REG_FLD_MOD(hdmi_av_base(ip_data),
- HDMI_CORE_AV_AUD_MODE, enable, 0, 0);
+ HDMI_CORE_AV_AUD_MODE, true, 0, 0);
REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+ HDMI_WP_AUDIO_CTRL, true, 30, 30);
+ return 0;
+}
+
+void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
+{
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_MODE, false, 0, 0);
REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, enable, 30, 30);
+ HDMI_WP_AUDIO_CTRL, false, 30, 30);
}
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
index a14d1a0..8366ae1 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
@@ -24,11 +24,6 @@
#include <linux/string.h>
#include <video/omapdss.h>
#include "ti_hdmi.h"
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-#endif
/* HDMI Wrapper */
@@ -57,6 +52,13 @@
#define HDMI_CORE_SYS_SRST 0x14
#define HDMI_CORE_CTRL1 0x20
#define HDMI_CORE_SYS_SYS_STAT 0x24
+#define HDMI_CORE_SYS_DE_DLY 0xC8
+#define HDMI_CORE_SYS_DE_CTRL 0xCC
+#define HDMI_CORE_SYS_DE_TOP 0xD0
+#define HDMI_CORE_SYS_DE_CNTL 0xD8
+#define HDMI_CORE_SYS_DE_CNTH 0xDC
+#define HDMI_CORE_SYS_DE_LINL 0xE0
+#define HDMI_CORE_SYS_DE_LINH_1 0xE4
#define HDMI_CORE_SYS_VID_ACEN 0x124
#define HDMI_CORE_SYS_VID_MODE 0x128
#define HDMI_CORE_SYS_INTR_STATE 0x1C0
@@ -66,50 +68,24 @@
#define HDMI_CORE_SYS_INTR4 0x1D0
#define HDMI_CORE_SYS_UMASK1 0x1D4
#define HDMI_CORE_SYS_TMDS_CTRL 0x208
-#define HDMI_CORE_SYS_DE_DLY 0xC8
-#define HDMI_CORE_SYS_DE_CTRL 0xCC
-#define HDMI_CORE_SYS_DE_TOP 0xD0
-#define HDMI_CORE_SYS_DE_CNTL 0xD8
-#define HDMI_CORE_SYS_DE_CNTH 0xDC
-#define HDMI_CORE_SYS_DE_LINL 0xE0
-#define HDMI_CORE_SYS_DE_LINH_1 0xE4
+
#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
-#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
+#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
/* HDMI DDC E-DID */
-#define HDMI_CORE_DDC_CMD 0x3CC
-#define HDMI_CORE_DDC_STATUS 0x3C8
#define HDMI_CORE_DDC_ADDR 0x3B4
+#define HDMI_CORE_DDC_SEGM 0x3B8
#define HDMI_CORE_DDC_OFFSET 0x3BC
#define HDMI_CORE_DDC_COUNT1 0x3C0
#define HDMI_CORE_DDC_COUNT2 0x3C4
+#define HDMI_CORE_DDC_STATUS 0x3C8
+#define HDMI_CORE_DDC_CMD 0x3CC
#define HDMI_CORE_DDC_DATA 0x3D0
-#define HDMI_CORE_DDC_SEGM 0x3B8
/* HDMI IP Core Audio Video */
-#define HDMI_CORE_AV_HDMI_CTRL 0xBC
-#define HDMI_CORE_AV_DPD 0xF4
-#define HDMI_CORE_AV_PB_CTRL1 0xF8
-#define HDMI_CORE_AV_PB_CTRL2 0xFC
-#define HDMI_CORE_AV_AVI_TYPE 0x100
-#define HDMI_CORE_AV_AVI_VERS 0x104
-#define HDMI_CORE_AV_AVI_LEN 0x108
-#define HDMI_CORE_AV_AVI_CHSUM 0x10C
-#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
-#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
-#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
-#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
-#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
-#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
-#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
-#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
-#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
-#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
-#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
-#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
#define HDMI_CORE_AV_ACR_CTRL 0x4
#define HDMI_CORE_AV_FREQ_SVAL 0x8
#define HDMI_CORE_AV_N_SVAL1 0xC
@@ -148,25 +124,39 @@
#define HDMI_CORE_AV_AVI_VERS 0x104
#define HDMI_CORE_AV_AVI_LEN 0x108
#define HDMI_CORE_AV_AVI_CHSUM 0x10C
+#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
#define HDMI_CORE_AV_SPD_TYPE 0x180
#define HDMI_CORE_AV_SPD_VERS 0x184
#define HDMI_CORE_AV_SPD_LEN 0x188
#define HDMI_CORE_AV_SPD_CHSUM 0x18C
+#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
#define HDMI_CORE_AV_AUDIO_TYPE 0x200
#define HDMI_CORE_AV_AUDIO_VERS 0x204
#define HDMI_CORE_AV_AUDIO_LEN 0x208
#define HDMI_CORE_AV_AUDIO_CHSUM 0x20C
+#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
#define HDMI_CORE_AV_MPEG_TYPE 0x280
#define HDMI_CORE_AV_MPEG_VERS 0x284
#define HDMI_CORE_AV_MPEG_LEN 0x288
#define HDMI_CORE_AV_MPEG_CHSUM 0x28C
+#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
+#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
#define HDMI_CORE_AV_CP_BYTE1 0x37C
+#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
#define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC
+
#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
+#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
+#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
+#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
+#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
+#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
+#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
+
/* PLL */
#define PLLCTRL_PLL_CONTROL 0x0
@@ -284,35 +274,6 @@ enum hdmi_core_infoframe {
HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
- HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
- HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
- HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
- HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
- HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
- HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
- HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
- HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
- HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
- HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
- HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
- HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
- HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
- HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
- HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
- HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
- HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
- HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
- HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
- HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
- HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
- HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
- HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
- HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
- HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
- HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
- HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
};
enum hdmi_packing_mode {
@@ -322,17 +283,6 @@ enum hdmi_packing_mode {
HDMI_PACK_ALREADYPACKED = 7
};
-enum hdmi_core_audio_sample_freq {
- HDMI_AUDIO_FS_32000 = 0x3,
- HDMI_AUDIO_FS_44100 = 0x0,
- HDMI_AUDIO_FS_48000 = 0x2,
- HDMI_AUDIO_FS_88200 = 0x8,
- HDMI_AUDIO_FS_96000 = 0xA,
- HDMI_AUDIO_FS_176400 = 0xC,
- HDMI_AUDIO_FS_192000 = 0xE,
- HDMI_AUDIO_FS_NOT_INDICATED = 0x1
-};
-
enum hdmi_core_audio_layout {
HDMI_AUDIO_LAYOUT_2CH = 0,
HDMI_AUDIO_LAYOUT_8CH = 1
@@ -387,37 +337,12 @@ enum hdmi_audio_blk_strt_end_sig {
};
enum hdmi_audio_i2s_config {
- HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
- HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
- HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
- HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
- HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
- HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
- HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
- HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
- HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
- HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
- HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
- HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
- HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
- HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
- HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
- HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
- HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
- HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
- HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
- HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
- HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
- HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
- HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
- HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
- HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
HDMI_AUDIO_I2S_SD0_EN = 1,
@@ -446,20 +371,6 @@ struct hdmi_core_video_config {
enum hdmi_core_tclkselclkmult tclk_sel_clkmult;
};
-/*
- * Refer to section 8.2 in HDMI 1.3 specification for
- * details about infoframe databytes
- */
-struct hdmi_core_infoframe_audio {
- u8 db1_coding_type;
- u8 db1_channel_count;
- u8 db2_sample_freq;
- u8 db2_sample_size;
- u8 db4_channel_alloc;
- bool db5_downmix_inh;
- u8 db5_lsv; /* Level shift values for downmix */
-};
-
struct hdmi_core_packet_enable_repeat {
u32 audio_pkt;
u32 audio_pkt_repeat;
@@ -496,15 +407,10 @@ struct hdmi_audio_dma {
};
struct hdmi_core_audio_i2s_config {
- u8 word_max_length;
- u8 word_length;
u8 in_length_bits;
u8 justification;
- u8 en_high_bitrate_aud;
u8 sck_edge_mode;
- u8 cbit_order;
u8 vbit;
- u8 ws_polarity;
u8 direction;
u8 shift;
u8 active_sds;
@@ -512,7 +418,7 @@ struct hdmi_core_audio_i2s_config {
struct hdmi_core_audio_config {
struct hdmi_core_audio_i2s_config i2s_cfg;
- enum hdmi_core_audio_sample_freq freq_sample;
+ struct snd_aes_iec958 *iec60958_cfg;
bool fs_override;
u32 n;
u32 cts;
@@ -527,17 +433,4 @@ struct hdmi_core_audio_config {
bool en_spdif;
};
-#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
- defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
- u32 sample_freq, u32 *n, u32 *cts);
-void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
- struct hdmi_core_infoframe_audio *info_aud);
-void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
- struct hdmi_core_audio_config *cfg);
-void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
- struct hdmi_audio_dma *aud_dma);
-void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
- struct hdmi_audio_format *aud_fmt);
-#endif
#endif
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 9c3daf7..3907c8b 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -402,7 +402,7 @@ static void venc_runtime_put(void)
DSSDBG("venc_runtime_put\n");
r = pm_runtime_put_sync(&venc.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static const struct venc_config *venc_timings_to_config(
@@ -415,6 +415,7 @@ static const struct venc_config *venc_timings_to_config(
return &venc_config_ntsc_trm;
BUG();
+ return NULL;
}
static int venc_power_on(struct omap_dss_device *dssdev)
@@ -440,10 +441,11 @@ static int venc_power_on(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, l);
- dispc_set_digit_size(dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res/2);
+ dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
- regulator_enable(venc.vdda_dac_reg);
+ r = regulator_enable(venc.vdda_dac_reg);
+ if (r)
+ goto err;
if (dssdev->platform_enable)
dssdev->platform_enable(dssdev);
@@ -485,16 +487,68 @@ unsigned long venc_get_pixel_clock(void)
return 13500000;
}
+static ssize_t display_output_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ const char *ret;
+
+ switch (dssdev->phy.venc.type) {
+ case OMAP_DSS_VENC_TYPE_COMPOSITE:
+ ret = "composite";
+ break;
+ case OMAP_DSS_VENC_TYPE_SVIDEO:
+ ret = "svideo";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ret);
+}
+
+static ssize_t display_output_type_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct omap_dss_device *dssdev = to_dss_device(dev);
+ enum omap_dss_venc_type new_type;
+
+ if (sysfs_streq("composite", buf))
+ new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+ else if (sysfs_streq("svideo", buf))
+ new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
+ else
+ return -EINVAL;
+
+ mutex_lock(&venc.venc_lock);
+
+ if (dssdev->phy.venc.type != new_type) {
+ dssdev->phy.venc.type = new_type;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ venc_power_off(dssdev);
+ venc_power_on(dssdev);
+ }
+ }
+
+ mutex_unlock(&venc.venc_lock);
+
+ return size;
+}
+
+static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
+ display_output_type_show, display_output_type_store);
+
/* driver */
static int venc_panel_probe(struct omap_dss_device *dssdev)
{
dssdev->panel.timings = omap_dss_pal_timings;
- return 0;
+ return device_create_file(&dssdev->dev, &dev_attr_output_type);
}
static void venc_panel_remove(struct omap_dss_device *dssdev)
{
+ device_remove_file(&dssdev->dev, &dev_attr_output_type);
}
static int venc_panel_enable(struct omap_dss_device *dssdev)
@@ -577,12 +631,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
return venc_panel_enable(dssdev);
}
-static void venc_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
static void venc_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
@@ -597,6 +645,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
/* turn the venc off and on to get new timings to use */
venc_panel_disable(dssdev);
venc_panel_enable(dssdev);
+ } else {
+ dss_mgr_set_timings(dssdev->manager, timings);
}
}
@@ -661,7 +711,6 @@ static struct omap_dss_driver venc_driver = {
.get_resolution = omapdss_default_get_resolution,
.get_recommended_bpp = omapdss_default_get_recommended_bpp,
- .get_timings = venc_get_timings,
.set_timings = venc_set_timings,
.check_timings = venc_check_timings,
@@ -675,7 +724,7 @@ static struct omap_dss_driver venc_driver = {
};
/* driver end */
-int venc_init_display(struct omap_dss_device *dssdev)
+static int __init venc_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
@@ -695,7 +744,7 @@ int venc_init_display(struct omap_dss_device *dssdev)
return 0;
}
-void venc_dump_regs(struct seq_file *s)
+static void venc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
@@ -779,8 +828,32 @@ static void venc_put_clocks(void)
clk_put(venc.tv_dac_clk);
}
+static void __init venc_probe_pdata(struct platform_device *pdev)
+{
+ struct omap_dss_board_info *pdata = pdev->dev.platform_data;
+ int r, i;
+
+ for (i = 0; i < pdata->num_devices; ++i) {
+ struct omap_dss_device *dssdev = pdata->devices[i];
+
+ if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
+ continue;
+
+ r = venc_init_display(dssdev);
+ if (r) {
+ DSSERR("device %s init failed: %d\n", dssdev->name, r);
+ continue;
+ }
+
+ r = omap_dss_register_device(dssdev, &pdev->dev, i);
+ if (r)
+ DSSERR("device %s register failed: %d\n",
+ dssdev->name, r);
+ }
+}
+
/* VENC HW IP initialisation */
-static int omap_venchw_probe(struct platform_device *pdev)
+static int __init omap_venchw_probe(struct platform_device *pdev)
{
u8 rev_id;
struct resource *venc_mem;
@@ -824,6 +897,10 @@ static int omap_venchw_probe(struct platform_device *pdev)
if (r)
goto err_reg_panel_driver;
+ dss_debugfs_create_file("venc", venc_dump_regs);
+
+ venc_probe_pdata(pdev);
+
return 0;
err_reg_panel_driver:
@@ -833,12 +910,15 @@ err_runtime_get:
return r;
}
-static int omap_venchw_remove(struct platform_device *pdev)
+static int __exit omap_venchw_remove(struct platform_device *pdev)
{
+ omap_dss_unregister_child_devices(&pdev->dev);
+
if (venc.vdda_dac_reg != NULL) {
regulator_put(venc.vdda_dac_reg);
venc.vdda_dac_reg = NULL;
}
+
omap_dss_unregister_driver(&venc_driver);
pm_runtime_disable(&pdev->dev);
@@ -853,7 +933,6 @@ static int venc_runtime_suspend(struct device *dev)
clk_disable(venc.tv_dac_clk);
dispc_runtime_put();
- dss_runtime_put();
return 0;
}
@@ -862,23 +941,14 @@ static int venc_runtime_resume(struct device *dev)
{
int r;
- r = dss_runtime_get();
- if (r < 0)
- goto err_get_dss;
-
r = dispc_runtime_get();
if (r < 0)
- goto err_get_dispc;
+ return r;
if (venc.tv_dac_clk)
clk_enable(venc.tv_dac_clk);
return 0;
-
-err_get_dispc:
- dss_runtime_put();
-err_get_dss:
- return r;
}
static const struct dev_pm_ops venc_pm_ops = {
@@ -887,8 +957,7 @@ static const struct dev_pm_ops venc_pm_ops = {
};
static struct platform_driver omap_venchw_driver = {
- .probe = omap_venchw_probe,
- .remove = omap_venchw_remove,
+ .remove = __exit_p(omap_venchw_remove),
.driver = {
.name = "omapdss_venc",
.owner = THIS_MODULE,
@@ -896,18 +965,18 @@ static struct platform_driver omap_venchw_driver = {
},
};
-int venc_init_platform_driver(void)
+int __init venc_init_platform_driver(void)
{
if (cpu_is_omap44xx())
return 0;
- return platform_driver_register(&omap_venchw_driver);
+ return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
}
-void venc_uninit_platform_driver(void)
+void __exit venc_uninit_platform_driver(void)
{
if (cpu_is_omap44xx())
return;
- return platform_driver_unregister(&omap_venchw_driver);
+ platform_driver_unregister(&omap_venchw_driver);
}
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 6a09ef8..c6cf372 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -70,7 +70,7 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
DBG("omapfb_setup_plane\n");
- if (ofbi->num_overlays != 1) {
+ if (ofbi->num_overlays == 0) {
r = -EINVAL;
goto out;
}
@@ -185,7 +185,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
- if (ofbi->num_overlays != 1) {
+ if (ofbi->num_overlays == 0) {
memset(pi, 0, sizeof(*pi));
} else {
struct omap_overlay *ovl;
@@ -225,6 +225,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
down_write_nested(&rg->lock, rg->id);
atomic_inc(&rg->lock_count);
+ if (rg->size == size && rg->type == mi->type)
+ goto out;
+
if (atomic_read(&rg->map_count)) {
r = -EBUSY;
goto out;
@@ -247,12 +250,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
}
}
- if (rg->size != size || rg->type != mi->type) {
- r = omapfb_realloc_fbmem(fbi, size, mi->type);
- if (r) {
- dev_err(fbdev->dev, "realloc fbmem failed\n");
- goto out;
- }
+ r = omapfb_realloc_fbmem(fbi, size, mi->type);
+ if (r) {
+ dev_err(fbdev->dev, "realloc fbmem failed\n");
+ goto out;
}
out:
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index b00db40..3450ea0 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -179,6 +179,7 @@ static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
break;
default:
BUG();
+ return 0;
}
offset *= vrfb->bytespp;
@@ -1502,7 +1503,7 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
fbnum = simple_strtoul(p, &p, 10);
- if (p == param)
+ if (p == start)
return -EINVAL;
if (*p != ':')
@@ -2307,7 +2308,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
return 0;
}
-static int omapfb_probe(struct platform_device *pdev)
+static int __init omapfb_probe(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = NULL;
int r = 0;
@@ -2448,7 +2449,7 @@ err0:
return r;
}
-static int omapfb_remove(struct platform_device *pdev)
+static int __exit omapfb_remove(struct platform_device *pdev)
{
struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
@@ -2462,8 +2463,7 @@ static int omapfb_remove(struct platform_device *pdev)
}
static struct platform_driver omapfb_driver = {
- .probe = omapfb_probe,
- .remove = omapfb_remove,
+ .remove = __exit_p(omapfb_remove),
.driver = {
.name = "omapfb",
.owner = THIS_MODULE,
@@ -2474,7 +2474,7 @@ static int __init omapfb_init(void)
{
DBG("omapfb_init\n");
- if (platform_driver_register(&omapfb_driver)) {
+ if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
printk(KERN_ERR "failed to register omapfb driver\n");
return -ENODEV;
}
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index c0bdc9b..30361a0 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -166,6 +166,7 @@ static inline struct omapfb_display_data *get_display_data(
/* This should never happen */
BUG();
+ return NULL;
}
static inline void omapfb_lock(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index 4e5b960..7e99022 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -179,8 +179,10 @@ void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
pixel_size_exp = 2;
else if (bytespp == 2)
pixel_size_exp = 1;
- else
+ else {
BUG();
+ return;
+ }
vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 1d71c08..0b4ae0c 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -316,12 +316,9 @@ pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
ret = wait_event_interruptible_timeout(priv->wait_idle,
!priv->shared->hw_running, HZ*4);
- if (ret < 0)
+ if (ret != 0)
break;
- if (ret > 0)
- continue;
-
if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
priv->shared->num_interrupts == num) {
QERROR("TIMEOUT");
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index e5331fc..69bf9d0 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -47,7 +47,7 @@
#ifdef CONFIG_FB_S3C_DEBUG_REGWRITE
#undef writel
#define writel(v, r) do { \
- printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \
+ pr_debug("%s: %08x => %p\n", __func__, (unsigned int)v, r); \
__raw_writel(v, r); \
} while (0)
#endif /* FB_S3C_DEBUG_REGWRITE */
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
result = (unsigned int)tmp / 1000;
dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
- pixclk, clk, result, clk / result);
+ pixclk, clk, result, result ? clk / result : clk);
return result;
}
@@ -495,7 +495,6 @@ static int s3c_fb_set_par(struct fb_info *info)
u32 alpha = 0;
u32 data;
u32 pagewidth;
- int clkdiv;
dev_dbg(sfb->dev, "setting framebuffer parameters\n");
@@ -532,48 +531,9 @@ static int s3c_fb_set_par(struct fb_info *info)
/* disable the window whilst we update it */
writel(0, regs + WINCON(win_no));
- /* use platform specified window as the basis for the lcd timings */
-
- if (win_no == sfb->pdata->default_win) {
- clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
-
- data = sfb->pdata->vidcon0;
- data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
-
- if (clkdiv > 1)
- data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
- else
- data &= ~VIDCON0_CLKDIR; /* 1:1 clock */
-
- /* write the timing data to the panel */
-
- if (sfb->variant.is_2443)
- data |= (1 << 5);
-
- writel(data, regs + VIDCON0);
-
+ if (!sfb->output_on)
s3c_fb_enable(sfb, 1);
- data = VIDTCON0_VBPD(var->upper_margin - 1) |
- VIDTCON0_VFPD(var->lower_margin - 1) |
- VIDTCON0_VSPW(var->vsync_len - 1);
-
- writel(data, regs + sfb->variant.vidtcon);
-
- data = VIDTCON1_HBPD(var->left_margin - 1) |
- VIDTCON1_HFPD(var->right_margin - 1) |
- VIDTCON1_HSPW(var->hsync_len - 1);
-
- /* VIDTCON1 */
- writel(data, regs + sfb->variant.vidtcon + 4);
-
- data = VIDTCON2_LINEVAL(var->yres - 1) |
- VIDTCON2_HOZVAL(var->xres - 1) |
- VIDTCON2_LINEVAL_E(var->yres - 1) |
- VIDTCON2_HOZVAL_E(var->xres - 1);
- writel(data, regs + sfb->variant.vidtcon + 8);
- }
-
/* write the buffer address */
/* start and end registers stride is 8 */
@@ -839,6 +799,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
struct s3c_fb *sfb = win->parent;
unsigned int index = win->index;
u32 wincon;
+ u32 output_on = sfb->output_on;
dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
@@ -877,34 +838,18 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
shadow_protect_win(win, 1);
writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
- shadow_protect_win(win, 0);
/* Check the enabled state to see if we need to be running the
* main LCD interface, as if there are no active windows then
* it is highly likely that we also do not need to output
* anything.
*/
-
- /* We could do something like the following code, but the current
- * system of using framebuffer events means that we cannot make
- * the distinction between just window 0 being inactive and all
- * the windows being down.
- *
- * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
- */
-
- /* we're stuck with this until we can do something about overriding
- * the power control using the blanking event for a single fb.
- */
- if (index == sfb->pdata->default_win) {
- shadow_protect_win(win, 1);
- s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
- shadow_protect_win(win, 0);
- }
+ s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
+ shadow_protect_win(win, 0);
pm_runtime_put_sync(sfb->dev);
- return 0;
+ return output_on == sfb->output_on;
}
/**
@@ -1111,7 +1056,7 @@ static struct fb_ops s3c_fb_ops = {
*
* Calculate the pixel clock when none has been given through platform data.
*/
-static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode)
+static void s3c_fb_missing_pixclock(struct fb_videomode *mode)
{
u64 pixclk = 1000000000000ULL;
u32 div;
@@ -1144,11 +1089,11 @@ static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb,
dev_dbg(sfb->dev, "allocating memory for display\n");
- real_size = windata->win_mode.xres * windata->win_mode.yres;
+ real_size = windata->xres * windata->yres;
virt_size = windata->virtual_x * windata->virtual_y;
dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n",
- real_size, windata->win_mode.xres, windata->win_mode.yres,
+ real_size, windata->xres, windata->yres,
virt_size, windata->virtual_x, windata->virtual_y);
size = (real_size > virt_size) ? real_size : virt_size;
@@ -1230,7 +1175,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
struct s3c_fb_win **res)
{
struct fb_var_screeninfo *var;
- struct fb_videomode *initmode;
+ struct fb_videomode initmode;
struct s3c_fb_pd_win *windata;
struct s3c_fb_win *win;
struct fb_info *fbinfo;
@@ -1251,11 +1196,11 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
}
windata = sfb->pdata->win[win_no];
- initmode = &windata->win_mode;
+ initmode = *sfb->pdata->vtiming;
WARN_ON(windata->max_bpp == 0);
- WARN_ON(windata->win_mode.xres == 0);
- WARN_ON(windata->win_mode.yres == 0);
+ WARN_ON(windata->xres == 0);
+ WARN_ON(windata->yres == 0);
win = fbinfo->par;
*res = win;
@@ -1294,7 +1239,9 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
}
/* setup the initial video mode from the window */
- fb_videomode_to_var(&fbinfo->var, initmode);
+ initmode.xres = windata->xres;
+ initmode.yres = windata->yres;
+ fb_videomode_to_var(&fbinfo->var, &initmode);
fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
fbinfo->fix.accel = FB_ACCEL_NONE;
@@ -1339,6 +1286,53 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
}
/**
+ * s3c_fb_set_rgb_timing() - set video timing for rgb interface.
+ * @sfb: The base resources for the hardware.
+ *
+ * Set horizontal and vertical lcd rgb interface timing.
+ */
+static void s3c_fb_set_rgb_timing(struct s3c_fb *sfb)
+{
+ struct fb_videomode *vmode = sfb->pdata->vtiming;
+ void __iomem *regs = sfb->regs;
+ int clkdiv;
+ u32 data;
+
+ if (!vmode->pixclock)
+ s3c_fb_missing_pixclock(vmode);
+
+ clkdiv = s3c_fb_calc_pixclk(sfb, vmode->pixclock);
+
+ data = sfb->pdata->vidcon0;
+ data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
+
+ if (clkdiv > 1)
+ data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
+ else
+ data &= ~VIDCON0_CLKDIR; /* 1:1 clock */
+
+ if (sfb->variant.is_2443)
+ data |= (1 << 5);
+ writel(data, regs + VIDCON0);
+
+ data = VIDTCON0_VBPD(vmode->upper_margin - 1) |
+ VIDTCON0_VFPD(vmode->lower_margin - 1) |
+ VIDTCON0_VSPW(vmode->vsync_len - 1);
+ writel(data, regs + sfb->variant.vidtcon);
+
+ data = VIDTCON1_HBPD(vmode->left_margin - 1) |
+ VIDTCON1_HFPD(vmode->right_margin - 1) |
+ VIDTCON1_HSPW(vmode->hsync_len - 1);
+ writel(data, regs + sfb->variant.vidtcon + 4);
+
+ data = VIDTCON2_LINEVAL(vmode->yres - 1) |
+ VIDTCON2_HOZVAL(vmode->xres - 1) |
+ VIDTCON2_LINEVAL_E(vmode->yres - 1) |
+ VIDTCON2_HOZVAL_E(vmode->xres - 1);
+ writel(data, regs + sfb->variant.vidtcon + 8);
+}
+
+/**
* s3c_fb_clear_win() - clear hardware window registers.
* @sfb: The base resources for the hardware.
* @win: The window to process.
@@ -1354,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
writel(0, regs + VIDOSD_A(win, sfb->variant));
writel(0, regs + VIDOSD_B(win, sfb->variant));
writel(0, regs + VIDOSD_C(win, sfb->variant));
- reg = readl(regs + SHADOWCON);
- writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON);
+
+ if (sfb->variant.has_shadowcon) {
+ reg = readl(sfb->regs + SHADOWCON);
+ reg &= ~(SHADOWCON_WINx_PROTECT(win) |
+ SHADOWCON_CHx_ENABLE(win) |
+ SHADOWCON_CHx_LOCAL_ENABLE(win));
+ writel(reg, sfb->regs + SHADOWCON);
+ }
}
static int __devinit s3c_fb_probe(struct platform_device *pdev)
@@ -1481,15 +1481,14 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
writel(0xffffff, regs + WKEYCON1);
}
+ s3c_fb_set_rgb_timing(sfb);
+
/* we have the register setup, start allocating framebuffers */
for (win = 0; win < fbdrv->variant.nr_windows; win++) {
if (!pd->win[win])
continue;
- if (!pd->win[win]->win_mode.pixclock)
- s3c_fb_missing_pixclock(&pd->win[win]->win_mode);
-
ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win],
&sfb->windows[win]);
if (ret < 0) {
@@ -1564,6 +1563,8 @@ static int s3c_fb_suspend(struct device *dev)
struct s3c_fb_win *win;
int win_no;
+ pm_runtime_get_sync(sfb->dev);
+
for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
win = sfb->windows[win_no];
if (!win)
@@ -1577,6 +1578,9 @@ static int s3c_fb_suspend(struct device *dev)
clk_disable(sfb->lcd_clk);
clk_disable(sfb->bus_clk);
+
+ pm_runtime_put_sync(sfb->dev);
+
return 0;
}
@@ -1589,6 +1593,8 @@ static int s3c_fb_resume(struct device *dev)
int win_no;
u32 reg;
+ pm_runtime_get_sync(sfb->dev);
+
clk_enable(sfb->bus_clk);
if (!sfb->variant.has_clksel)
@@ -1623,6 +1629,8 @@ static int s3c_fb_resume(struct device *dev)
shadow_protect_win(win, 0);
}
+ s3c_fb_set_rgb_timing(sfb);
+
/* restore framebuffers */
for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
win = sfb->windows[win_no];
@@ -1633,6 +1641,8 @@ static int s3c_fb_resume(struct device *dev)
s3c_fb_set_par(win->fbinfo);
}
+ pm_runtime_put_sync(sfb->dev);
+
return 0;
}
#endif
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index b4c2e21..0d0f52c 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r
/* following part not present in X11 driver */
cr67 = vga_in8(0x3d5, par) & 0xf;
vga_out8(0x3d5, 0x50 | cr67, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x67, par);
/* end of part */
vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par)
vga_out8(0x3d4, 0x66, par);
cr66 = vga_in8(0x3d5, par);
vga_out8(0x3d5, cr66 | 0x02, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x66, par);
vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */
- udelay(10000);
+ mdelay(10);
/*
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par)
vga_out8(0x3d4, 0x3f, par);
cr3f = vga_in8(0x3d5, par);
vga_out8(0x3d5, cr3f | 0x08, par);
- udelay(10000);
+ mdelay(10);
vga_out8(0x3d4, 0x3f, par);
vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */
- udelay(10000);
+ mdelay(10);
/* Savage ramdac speeds */
par->numClocks = 4;
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index eafb19d..930e550 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -31,6 +31,7 @@
#include "sh_mobile_lcdcfb.h"
+/* HDMI Core Control Register (HTOP0) */
#define HDMI_SYSTEM_CTRL 0x00 /* System control */
#define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control,
bits 19..16 of 20-bit N for Audio Clock Regeneration packet */
@@ -201,6 +202,68 @@
#define HDMI_REVISION_ID 0xF1 /* Revision ID */
#define HDMI_TEST_MODE 0xFE /* Test mode */
+/* HDMI Control Register (HTOP1) */
+#define HDMI_HTOP1_TEST_MODE 0x0000 /* Test mode */
+#define HDMI_HTOP1_VIDEO_INPUT 0x0008 /* VideoInput */
+#define HDMI_HTOP1_CORE_RSTN 0x000C /* CoreResetn */
+#define HDMI_HTOP1_PLLBW 0x0018 /* PLLBW */
+#define HDMI_HTOP1_CLK_TO_PHY 0x001C /* Clk to Phy */
+#define HDMI_HTOP1_VIDEO_INPUT2 0x0020 /* VideoInput2 */
+#define HDMI_HTOP1_TISEMP0_1 0x0024 /* tisemp0-1 */
+#define HDMI_HTOP1_TISEMP2_C 0x0028 /* tisemp2-c */
+#define HDMI_HTOP1_TISIDRV 0x002C /* tisidrv */
+#define HDMI_HTOP1_TISEN 0x0034 /* tisen */
+#define HDMI_HTOP1_TISDREN 0x0038 /* tisdren */
+#define HDMI_HTOP1_CISRANGE 0x003C /* cisrange */
+#define HDMI_HTOP1_ENABLE_SELECTOR 0x0040 /* Enable Selector */
+#define HDMI_HTOP1_MACRO_RESET 0x0044 /* Macro reset */
+#define HDMI_HTOP1_PLL_CALIBRATION 0x0048 /* PLL calibration */
+#define HDMI_HTOP1_RE_CALIBRATION 0x004C /* Re-calibration */
+#define HDMI_HTOP1_CURRENT 0x0050 /* Current */
+#define HDMI_HTOP1_PLL_LOCK_DETECT 0x0054 /* PLL lock detect */
+#define HDMI_HTOP1_PHY_TEST_MODE 0x0058 /* PHY Test Mode */
+#define HDMI_HTOP1_CLK_SET 0x0080 /* Clock Set */
+#define HDMI_HTOP1_DDC_FAIL_SAFE 0x0084 /* DDC fail safe */
+#define HDMI_HTOP1_PRBS 0x0088 /* PRBS */
+#define HDMI_HTOP1_EDID_AINC_CONTROL 0x008C /* EDID ainc Control */
+#define HDMI_HTOP1_HTOP_DCL_MODE 0x00FC /* Deep Coloer Mode */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0 0x0100 /* Deep Color:FRC COEF0 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1 0x0104 /* Deep Color:FRC COEF1 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2 0x0108 /* Deep Color:FRC COEF2 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3 0x010C /* Deep Color:FRC COEF3 */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C 0x0110 /* Deep Color:FRC COEF0C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C 0x0114 /* Deep Color:FRC COEF1C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C 0x0118 /* Deep Color:FRC COEF2C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C 0x011C /* Deep Color:FRC COEF3C */
+#define HDMI_HTOP1_HTOP_DCL_FRC_MODE 0x0120 /* Deep Color:FRC Mode */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START1 0x0124 /* Deep Color:Rect Start1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1 0x0128 /* Deep Color:Rect Size1 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START2 0x012C /* Deep Color:Rect Start2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2 0x0130 /* Deep Color:Rect Size2 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START3 0x0134 /* Deep Color:Rect Start3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3 0x0138 /* Deep Color:Rect Size3 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_START4 0x013C /* Deep Color:Rect Start4 */
+#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4 0x0140 /* Deep Color:Rect Size4 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1 0x0144 /* Deep Color:Fil Para Y1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2 0x0148 /* Deep Color:Fil Para Y1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1 0x014C /* Deep Color:Fil Para CB1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2 0x0150 /* Deep Color:Fil Para CB1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1 0x0154 /* Deep Color:Fil Para CR1_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2 0x0158 /* Deep Color:Fil Para CR1_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1 0x015C /* Deep Color:Fil Para Y2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2 0x0160 /* Deep Color:Fil Para Y2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1 0x0164 /* Deep Color:Fil Para CB2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2 0x0168 /* Deep Color:Fil Para CB2_2 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1 0x016C /* Deep Color:Fil Para CR2_1 */
+#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2 0x0170 /* Deep Color:Fil Para CR2_2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1 0x0174 /* Deep Color:Cor Para Y1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1 0x0178 /* Deep Color:Cor Para CB1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1 0x017C /* Deep Color:Cor Para CR1 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2 0x0180 /* Deep Color:Cor Para Y2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2 0x0184 /* Deep Color:Cor Para CB2 */
+#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2 0x0188 /* Deep Color:Cor Para CR2 */
+#define HDMI_HTOP1_EDID_DATA_READ 0x0200 /* EDID Data Read 128Byte:0x03FC */
+
enum hotplug_state {
HDMI_HOTPLUG_DISCONNECTED,
HDMI_HOTPLUG_CONNECTED,
@@ -211,6 +274,7 @@ struct sh_hdmi {
struct sh_mobile_lcdc_entity entity;
void __iomem *base;
+ void __iomem *htop1;
enum hotplug_state hp_state; /* hot-plug status */
u8 preprogrammed_vic; /* use a pre-programmed VIC or
the external mode */
@@ -222,20 +286,66 @@ struct sh_hdmi {
struct delayed_work edid_work;
struct fb_videomode mode;
struct fb_monspecs monspec;
+
+ /* register access functions */
+ void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg);
+ u8 (*read)(struct sh_hdmi *hdmi, u8 reg);
};
#define entity_to_sh_hdmi(e) container_of(e, struct sh_hdmi, entity)
-static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg)
{
iowrite8(data, hdmi->base + reg);
}
-static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg)
{
return ioread8(hdmi->base + reg);
}
+static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+ iowrite32((u32)data, hdmi->base + (reg * 4));
+ udelay(100);
+}
+
+static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg)
+{
+ return (u8)ioread32(hdmi->base + (reg * 4));
+}
+
+static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
+{
+ hdmi->write(hdmi, data, reg);
+}
+
+static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
+{
+ return hdmi->read(hdmi, reg);
+}
+
+static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg)
+{
+ u8 val = hdmi_read(hdmi, reg);
+
+ val &= ~mask;
+ val |= (data & mask);
+
+ hdmi_write(hdmi, val, reg);
+}
+
+static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg)
+{
+ iowrite32(data, hdmi->htop1 + reg);
+ udelay(100);
+}
+
+static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg)
+{
+ return ioread32(hdmi->htop1 + reg);
+}
+
/*
* HDMI sound
*/
@@ -693,11 +803,11 @@ static void sh_hdmi_configure(struct sh_hdmi *hdmi)
msleep(10);
/* PS mode b->d, reset PLLA and PLLB */
- hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL);
udelay(10);
- hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL);
}
static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
@@ -746,7 +856,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
/* Read EDID */
dev_dbg(hdmi->dev, "Read back EDID code:");
for (i = 0; i < 128; i++) {
- edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
+ edid[i] = (hdmi->htop1) ?
+ (u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) :
+ hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
#ifdef DEBUG
if ((i % 16) == 0) {
printk(KERN_CONT "\n");
@@ -917,13 +1029,13 @@ static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id)
u8 status1, status2, mask1, mask2;
/* mode_b and PLLA and PLLB reset */
- hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL);
/* How long shall reset be held? */
udelay(10);
/* mode_b and PLLA and PLLB reset release */
- hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL);
status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1);
status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2);
@@ -1001,7 +1113,7 @@ static int sh_hdmi_display_on(struct sh_mobile_lcdc_entity *entity)
*/
if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) {
/* PS mode d->e. All functions are active */
- hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL);
dev_dbg(hdmi->dev, "HDMI running\n");
}
@@ -1016,7 +1128,7 @@ static void sh_hdmi_display_off(struct sh_mobile_lcdc_entity *entity)
dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi);
/* PS mode e->a */
- hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL);
+ hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL);
}
static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = {
@@ -1110,10 +1222,58 @@ out:
dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi);
}
+static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
+{
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE);
+ hdmi_htop1_write(hdmi, 0x0000000b, 0x0010);
+ hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2);
+ hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1);
+ hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2);
+ hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE);
+ hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW);
+ hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+ hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+ hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+ hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+ hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE);
+ msleep(100);
+ hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR);
+ msleep(100);
+ hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
+ hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
+ hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
+ hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY);
+ hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2);
+ hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET);
+}
+
static int __init sh_hdmi_probe(struct platform_device *pdev)
{
struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ struct resource *htop1_res;
int irq = platform_get_irq(pdev, 0), ret;
struct sh_hdmi *hdmi;
long rate;
@@ -1121,6 +1281,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
if (!res || !pdata || irq < 0)
return -ENODEV;
+ htop1_res = NULL;
+ if (pdata->flags & HDMI_HAS_HTOP1) {
+ htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!htop1_res) {
+ dev_err(&pdev->dev, "htop1 needs register base\n");
+ return -EINVAL;
+ }
+ }
+
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) {
dev_err(&pdev->dev, "Cannot allocate device data\n");
@@ -1138,6 +1307,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
goto egetclk;
}
+ /* select register access functions */
+ if (pdata->flags & HDMI_32BIT_REG) {
+ hdmi->write = __hdmi_write32;
+ hdmi->read = __hdmi_read32;
+ } else {
+ hdmi->write = __hdmi_write8;
+ hdmi->read = __hdmi_read8;
+ }
+
/* An arbitrary relaxed pixclock just to get things started: from standard 480p */
rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037));
if (rate > 0)
@@ -1176,6 +1354,24 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
+ /* init interrupt polarity */
+ if (pdata->flags & HDMI_OUTPUT_PUSH_PULL)
+ hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL);
+
+ if (pdata->flags & HDMI_OUTPUT_POLARITY_HI)
+ hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL);
+
+ /* enable htop1 register if needed */
+ if (htop1_res) {
+ hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res));
+ if (!hdmi->htop1) {
+ dev_err(&pdev->dev, "control register region already claimed\n");
+ ret = -ENOMEM;
+ goto emap_htop1;
+ }
+ sh_hdmi_htop1_init(hdmi);
+ }
+
/* Product and revision IDs are 0 in sh-mobile version */
dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID));
@@ -1199,6 +1395,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
ecodec:
free_irq(irq, hdmi);
ereqirq:
+ if (hdmi->htop1)
+ iounmap(hdmi->htop1);
+emap_htop1:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
iounmap(hdmi->base);
@@ -1230,6 +1429,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
clk_disable(hdmi->hdmi_clk);
clk_put(hdmi->hdmi_clk);
+ if (hdmi->htop1)
+ iounmap(hdmi->htop1);
iounmap(hdmi->base);
release_mem_region(res->start, resource_size(res));
kfree(hdmi);
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h
index aff7384..85d6738 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/sis/init.h
@@ -105,51 +105,6 @@ static const unsigned short ModeIndex_1920x1440[] = {0x68, 0x69, 0x00, 0x6b};
static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00};
static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e};
-static const unsigned short SiS_DRAMType[17][5]={
- {0x0C,0x0A,0x02,0x40,0x39},
- {0x0D,0x0A,0x01,0x40,0x48},
- {0x0C,0x09,0x02,0x20,0x35},
- {0x0D,0x09,0x01,0x20,0x44},
- {0x0C,0x08,0x02,0x10,0x31},
- {0x0D,0x08,0x01,0x10,0x40},
- {0x0C,0x0A,0x01,0x20,0x34},
- {0x0C,0x09,0x01,0x08,0x32},
- {0x0B,0x08,0x02,0x08,0x21},
- {0x0C,0x08,0x01,0x08,0x30},
- {0x0A,0x08,0x02,0x04,0x11},
- {0x0B,0x0A,0x01,0x10,0x28},
- {0x09,0x08,0x02,0x02,0x01},
- {0x0B,0x09,0x01,0x08,0x24},
- {0x0B,0x08,0x01,0x04,0x20},
- {0x0A,0x08,0x01,0x02,0x10},
- {0x09,0x08,0x01,0x01,0x00}
-};
-
-static const unsigned short SiS_SDRDRAM_TYPE[13][5] =
-{
- { 2,12, 9,64,0x35},
- { 1,13, 9,64,0x44},
- { 2,12, 8,32,0x31},
- { 2,11, 9,32,0x25},
- { 1,12, 9,32,0x34},
- { 1,13, 8,32,0x40},
- { 2,11, 8,16,0x21},
- { 1,12, 8,16,0x30},
- { 1,11, 9,16,0x24},
- { 1,11, 8, 8,0x20},
- { 2, 9, 8, 4,0x01},
- { 1,10, 8, 4,0x10},
- { 1, 9, 8, 2,0x00}
-};
-
-static const unsigned short SiS_DDRDRAM_TYPE[4][5] =
-{
- { 2,12, 9,64,0x35},
- { 2,12, 8,32,0x31},
- { 2,11, 8,16,0x21},
- { 2, 9, 8, 4,0x01}
-};
-
static const unsigned char SiS_MDA_DAC[] =
{
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 078ca21..a7a48db 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -4222,6 +4222,26 @@ sisfb_post_300_buswidth(struct sis_video_info *ivideo)
return 1; /* 32bit */
}
+static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
+ {0x0C,0x0A,0x02,0x40,0x39},
+ {0x0D,0x0A,0x01,0x40,0x48},
+ {0x0C,0x09,0x02,0x20,0x35},
+ {0x0D,0x09,0x01,0x20,0x44},
+ {0x0C,0x08,0x02,0x10,0x31},
+ {0x0D,0x08,0x01,0x10,0x40},
+ {0x0C,0x0A,0x01,0x20,0x34},
+ {0x0C,0x09,0x01,0x08,0x32},
+ {0x0B,0x08,0x02,0x08,0x21},
+ {0x0C,0x08,0x01,0x08,0x30},
+ {0x0A,0x08,0x02,0x04,0x11},
+ {0x0B,0x0A,0x01,0x10,0x28},
+ {0x09,0x08,0x02,0x02,0x01},
+ {0x0B,0x09,0x01,0x08,0x24},
+ {0x0B,0x08,0x01,0x04,0x20},
+ {0x0A,0x08,0x01,0x02,0x10},
+ {0x09,0x08,0x01,0x01,0x00}
+};
+
static int __devinit
sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth,
int PseudoRankCapacity, int PseudoAdrPinCount,
@@ -4231,27 +4251,8 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
unsigned short sr14;
unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
- static const unsigned short SiS_DRAMType[17][5] = {
- {0x0C,0x0A,0x02,0x40,0x39},
- {0x0D,0x0A,0x01,0x40,0x48},
- {0x0C,0x09,0x02,0x20,0x35},
- {0x0D,0x09,0x01,0x20,0x44},
- {0x0C,0x08,0x02,0x10,0x31},
- {0x0D,0x08,0x01,0x10,0x40},
- {0x0C,0x0A,0x01,0x20,0x34},
- {0x0C,0x09,0x01,0x08,0x32},
- {0x0B,0x08,0x02,0x08,0x21},
- {0x0C,0x08,0x01,0x08,0x30},
- {0x0A,0x08,0x02,0x04,0x11},
- {0x0B,0x0A,0x01,0x10,0x28},
- {0x09,0x08,0x02,0x02,0x01},
- {0x0B,0x09,0x01,0x08,0x24},
- {0x0B,0x08,0x01,0x04,0x20},
- {0x0A,0x08,0x01,0x02,0x10},
- {0x09,0x08,0x01,0x01,0x00}
- };
- for(k = 0; k <= 16; k++) {
+ for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
RankCapacity = buswidth * SiS_DRAMType[k][3];
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 30f7a81..5b6abc6 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -1036,6 +1036,6 @@ static void __exit xxxfb_exit(void)
*/
module_init(xxxfb_init);
-module_exit(xxxfb_remove);
+module_exit(xxxfb_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index 538e5ef..26f8642 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -846,7 +846,7 @@ static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y,
}
}
-int ufx_handle_damage(struct ufx_data *dev, int x, int y,
+static int ufx_handle_damage(struct ufx_data *dev, int x, int y,
int width, int height)
{
size_t packed_line_len = ALIGN((width * 2), 4);
@@ -1083,7 +1083,7 @@ static int ufx_ops_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 7af1e81..8af6414 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -893,7 +893,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 0c88375..c80e770 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1276,17 +1276,12 @@ static int viafb_dfph_proc_open(struct inode *inode, struct file *file)
static ssize_t viafb_dfph_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- char buf[20];
- u8 reg_val = 0;
- unsigned long length;
- if (count < 1)
- return -EINVAL;
- length = count > 20 ? 20 : count;
- if (copy_from_user(&buf[0], buffer, length))
- return -EFAULT;
- buf[length - 1] = '\0'; /*Ensure end string */
- if (kstrtou8(buf, 0, &reg_val) < 0)
- return -EINVAL;
+ int err;
+ u8 reg_val;
+ err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+ if (err)
+ return err;
+
viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
return count;
}
@@ -1316,17 +1311,12 @@ static int viafb_dfpl_proc_open(struct inode *inode, struct file *file)
static ssize_t viafb_dfpl_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- char buf[20];
- u8 reg_val = 0;
- unsigned long length;
- if (count < 1)
- return -EINVAL;
- length = count > 20 ? 20 : count;
- if (copy_from_user(&buf[0], buffer, length))
- return -EFAULT;
- buf[length - 1] = '\0'; /*Ensure end string */
- if (kstrtou8(buf, 0, &reg_val) < 0)
- return -EINVAL;
+ int err;
+ u8 reg_val;
+ err = kstrtou8_from_user(buffer, count, 0, &reg_val);
+ if (err)
+ return err;
+
viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
return count;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfbc15c..0908e60 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -47,7 +47,7 @@ struct virtio_balloon
struct task_struct *thread;
/* Waiting for host to ack the pages we released. */
- struct completion acked;
+ wait_queue_head_t acked;
/* Number of balloon pages we've told the Host we're not using. */
unsigned int num_pages;
@@ -89,29 +89,25 @@ static struct page *balloon_pfn_to_page(u32 pfn)
static void balloon_ack(struct virtqueue *vq)
{
- struct virtio_balloon *vb;
- unsigned int len;
+ struct virtio_balloon *vb = vq->vdev->priv;
- vb = virtqueue_get_buf(vq, &len);
- if (vb)
- complete(&vb->acked);
+ wake_up(&vb->acked);
}
static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
{
struct scatterlist sg;
+ unsigned int len;
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
- init_completion(&vb->acked);
-
/* We should always be able to add one buffer to an empty queue. */
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
- wait_for_completion(&vb->acked);
+ wait_event(vb->acked, virtqueue_get_buf(vq, &len));
}
static void set_page_pfns(u32 pfns[], struct page *page)
@@ -231,12 +227,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
*/
static void stats_request(struct virtqueue *vq)
{
- struct virtio_balloon *vb;
- unsigned int len;
+ struct virtio_balloon *vb = vq->vdev->priv;
- vb = virtqueue_get_buf(vq, &len);
- if (!vb)
- return;
vb->need_stats_update = 1;
wake_up(&vb->config_change);
}
@@ -245,11 +237,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
{
struct virtqueue *vq;
struct scatterlist sg;
+ unsigned int len;
vb->need_stats_update = 0;
update_balloon_stats(vb);
vq = vb->stats_vq;
+ if (!virtqueue_get_buf(vq, &len))
+ return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
@@ -358,6 +353,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
INIT_LIST_HEAD(&vb->pages);
vb->num_pages = 0;
init_waitqueue_head(&vb->config_change);
+ init_waitqueue_head(&vb->acked);
vb->vdev = vdev;
vb->need_stats_update = 0;
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 979d6ee..5ceb1cd 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,7 +60,7 @@ config W1_MASTER_GPIO
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
- depends on SOC_OMAP2430 || ARCH_OMAP3
+ depends on ARCH_OMAP2PLUS
help
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 5ef385b..291897c 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -1,7 +1,7 @@
/*
* drivers/w1/masters/omap_hdq.c
*
- * Copyright (C) 2007 Texas Instruments, Inc.
+ * Copyright (C) 2007,2012 Texas Instruments, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -14,9 +14,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/pm_runtime.h>
#include <asm/irq.h>
#include <mach/hardware.h>
@@ -61,8 +61,6 @@ struct hdq_data {
/* lock status update */
struct mutex hdq_mutex;
int hdq_usecount;
- struct clk *hdq_ick;
- struct clk *hdq_fck;
u8 hdq_irqstatus;
/* device lock */
spinlock_t hdq_spinlock;
@@ -102,20 +100,20 @@ static struct w1_bus_master omap_w1_master = {
/* HDQ register I/O routines */
static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
{
- return __raw_readb(hdq_data->hdq_base + offset);
+ return __raw_readl(hdq_data->hdq_base + offset);
}
static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
{
- __raw_writeb(val, hdq_data->hdq_base + offset);
+ __raw_writel(val, hdq_data->hdq_base + offset);
}
static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
u8 val, u8 mask)
{
- u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
+ u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
| (val & mask);
- __raw_writeb(new_val, hdq_data->hdq_base + offset);
+ __raw_writel(new_val, hdq_data->hdq_base + offset);
return new_val;
}
@@ -419,17 +417,8 @@ static int omap_hdq_get(struct hdq_data *hdq_data)
hdq_data->hdq_usecount++;
try_module_get(THIS_MODULE);
if (1 == hdq_data->hdq_usecount) {
- if (clk_enable(hdq_data->hdq_ick)) {
- dev_dbg(hdq_data->dev, "Can not enable ick\n");
- ret = -ENODEV;
- goto clk_err;
- }
- if (clk_enable(hdq_data->hdq_fck)) {
- dev_dbg(hdq_data->dev, "Can not enable fck\n");
- clk_disable(hdq_data->hdq_ick);
- ret = -ENODEV;
- goto clk_err;
- }
+
+ pm_runtime_get_sync(hdq_data->dev);
/* make sure HDQ is out of reset */
if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
@@ -450,9 +439,6 @@ static int omap_hdq_get(struct hdq_data *hdq_data)
}
}
-clk_err:
- clk_put(hdq_data->hdq_ick);
- clk_put(hdq_data->hdq_fck);
out:
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
@@ -475,10 +461,8 @@ static int omap_hdq_put(struct hdq_data *hdq_data)
} else {
hdq_data->hdq_usecount--;
module_put(THIS_MODULE);
- if (0 == hdq_data->hdq_usecount) {
- clk_disable(hdq_data->hdq_ick);
- clk_disable(hdq_data->hdq_fck);
- }
+ if (0 == hdq_data->hdq_usecount)
+ pm_runtime_put_sync(hdq_data->dev);
}
mutex_unlock(&hdq_data->hdq_mutex);
@@ -591,35 +575,11 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
goto err_ioremap;
}
- /* get interface & functional clock objects */
- hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
- if (IS_ERR(hdq_data->hdq_ick)) {
- dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
- ret = PTR_ERR(hdq_data->hdq_ick);
- goto err_ick;
- }
-
- hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
- if (IS_ERR(hdq_data->hdq_fck)) {
- dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
- ret = PTR_ERR(hdq_data->hdq_fck);
- goto err_fck;
- }
-
hdq_data->hdq_usecount = 0;
mutex_init(&hdq_data->hdq_mutex);
- if (clk_enable(hdq_data->hdq_ick)) {
- dev_dbg(&pdev->dev, "Can not enable ick\n");
- ret = -ENODEV;
- goto err_intfclk;
- }
-
- if (clk_enable(hdq_data->hdq_fck)) {
- dev_dbg(&pdev->dev, "Can not enable fck\n");
- ret = -ENODEV;
- goto err_fnclk;
- }
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
@@ -641,9 +601,7 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
omap_hdq_break(hdq_data);
- /* don't clock the HDQ until it is needed */
- clk_disable(hdq_data->hdq_ick);
- clk_disable(hdq_data->hdq_fck);
+ pm_runtime_put_sync(&pdev->dev);
omap_w1_master.data = hdq_data;
@@ -655,20 +613,11 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
return 0;
-err_w1:
err_irq:
- clk_disable(hdq_data->hdq_fck);
-
-err_fnclk:
- clk_disable(hdq_data->hdq_ick);
-
-err_intfclk:
- clk_put(hdq_data->hdq_fck);
-
-err_fck:
- clk_put(hdq_data->hdq_ick);
+ pm_runtime_put_sync(&pdev->dev);
+err_w1:
+ pm_runtime_disable(&pdev->dev);
-err_ick:
iounmap(hdq_data->hdq_base);
err_ioremap:
@@ -696,8 +645,7 @@ static int omap_hdq_remove(struct platform_device *pdev)
mutex_unlock(&hdq_data->hdq_mutex);
/* remove module dependency */
- clk_put(hdq_data->hdq_ick);
- clk_put(hdq_data->hdq_fck);
+ pm_runtime_disable(&pdev->dev);
free_irq(INT_24XX_HDQ_IRQ, hdq_data);
platform_set_drvdata(pdev, NULL);
iounmap(hdq_data->hdq_base);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index d92d748..53d7571 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -64,6 +64,18 @@ config SOFT_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called softdog.
+config DA9052_WATCHDOG
+ tristate "Dialog DA9052 Watchdog"
+ depends on PMIC_DA9052
+ select WATCHDOG_CORE
+ help
+ Support for the watchdog in the DA9052 PMIC. Watchdog trigger
+ cause system reset.
+
+ Say Y here to include support for the DA9052 watchdog.
+ Alternatively say M to compile the driver as a module,
+ which will be called da9052_wdt.
+
config WM831X_WATCHDOG
tristate "WM831x watchdog"
depends on MFD_WM831X
@@ -87,6 +99,7 @@ config WM8350_WATCHDOG
config ARM_SP805_WATCHDOG
tristate "ARM SP805 Watchdog"
depends on ARM_AMBA
+ select WATCHDOG_CORE
help
ARM Primecell SP805 Watchdog timer. This will reboot your system when
the timeout is reached.
@@ -266,6 +279,7 @@ config DAVINCI_WATCHDOG
config ORION_WATCHDOG
tristate "Orion watchdog"
depends on ARCH_ORION5X || ARCH_KIRKWOOD
+ select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
in the Marvell Orion5x and Kirkwood ARM SoCs.
@@ -565,6 +579,7 @@ config INTEL_SCU_WATCHDOG
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
+ select WATCHDOG_CORE
select LPC_ICH
---help---
Hardware driver for the intel TCO timer based watchdog devices.
@@ -1102,10 +1117,10 @@ config BOOKE_WDT
config BOOKE_WDT_DEFAULT_TIMEOUT
int "PowerPC Book-E Watchdog Timer Default Timeout"
depends on BOOKE_WDT
- default 38 if FSL_BOOKE
- range 0 63 if FSL_BOOKE
- default 3 if !FSL_BOOKE
- range 0 3 if !FSL_BOOKE
+ default 38 if PPC_FSL_BOOK3E
+ range 0 63 if PPC_FSL_BOOK3E
+ default 3 if !PPC_FSL_BOOK3E
+ range 0 3 if !PPC_FSL_BOOK3E
help
Select the default watchdog timer period to be used by the PowerPC
Book-E watchdog driver. A watchdog "event" occurs when the bit
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 442bfbe..572b39b 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -163,6 +163,7 @@ obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
obj-$(CONFIG_XEN_WDT) += xen_wdt.o
# Architecture Independent
+obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
obj-$(CONFIG_MAX63XX_WATCHDOG) += max63xx_wdt.o
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 8379dc3..551880b 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -302,7 +302,7 @@ static void bcm63xx_wdt_shutdown(struct platform_device *pdev)
bcm63xx_wdt_pause();
}
-static struct platform_driver bcm63xx_wdt = {
+static struct platform_driver bcm63xx_wdt_driver = {
.probe = bcm63xx_wdt_probe,
.remove = __devexit_p(bcm63xx_wdt_remove),
.shutdown = bcm63xx_wdt_shutdown,
@@ -312,7 +312,7 @@ static struct platform_driver bcm63xx_wdt = {
}
};
-module_platform_driver(bcm63xx_wdt);
+module_platform_driver(bcm63xx_wdt_driver);
MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index ce0ab44..3fe82d0 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -37,7 +37,7 @@
u32 booke_wdt_enabled;
u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_FSL_BOOK3E
#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
#define WDTP_MASK (WDTP(0x3f))
#else
@@ -190,7 +190,7 @@ static long booke_wdt_ioctl(struct file *file,
case WDIOC_SETTIMEOUT:
if (get_user(tmp, p))
return -EFAULT;
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_FSL_BOOK3E
/* period of 1 gives the largest possible timeout */
if (tmp > period_to_sec(1))
return -EINVAL;
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 6876430..cb5da5c 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -263,6 +263,7 @@ static int __exit coh901327_remove(struct platform_device *pdev)
watchdog_unregister_device(&coh901327_wdt);
coh901327_disable();
free_irq(irq, pdev);
+ clk_unprepare(clk);
clk_put(clk);
iounmap(virtbase);
release_mem_region(phybase, physize);
@@ -300,9 +301,9 @@ static int __init coh901327_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "could not get clock\n");
goto out_no_clk;
}
- ret = clk_enable(clk);
+ ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
+ dev_err(&pdev->dev, "could not prepare and enable clock\n");
goto out_no_clk_enable;
}
@@ -369,7 +370,7 @@ static int __init coh901327_probe(struct platform_device *pdev)
out_no_wdog:
free_irq(irq, pdev);
out_no_irq:
- clk_disable(clk);
+ clk_disable_unprepare(clk);
out_no_clk_enable:
clk_put(clk);
out_no_clk:
diff --git a/drivers/watchdog/da9052_wdt.c b/drivers/watchdog/da9052_wdt.c
new file mode 100644
index 0000000..3f75129
--- /dev/null
+++ b/drivers/watchdog/da9052_wdt.c
@@ -0,0 +1,251 @@
+/*
+ * System monitoring driver for DA9052 PMICs.
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: Anthony Olech <Anthony.Olech@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+#include <linux/watchdog.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/da9052.h>
+
+#define DA9052_DEF_TIMEOUT 4
+#define DA9052_TWDMIN 256
+
+struct da9052_wdt_data {
+ struct watchdog_device wdt;
+ struct da9052 *da9052;
+ struct kref kref;
+ unsigned long jpast;
+};
+
+static const struct {
+ u8 reg_val;
+ int time; /* Seconds */
+} da9052_wdt_maps[] = {
+ { 1, 2 },
+ { 2, 4 },
+ { 3, 8 },
+ { 4, 16 },
+ { 5, 32 },
+ { 5, 33 }, /* Actual time 32.768s so included both 32s and 33s */
+ { 6, 65 },
+ { 6, 66 }, /* Actual time 65.536s so include both, 65s and 66s */
+ { 7, 131 },
+};
+
+
+static void da9052_wdt_release_resources(struct kref *r)
+{
+ struct da9052_wdt_data *driver_data =
+ container_of(r, struct da9052_wdt_data, kref);
+
+ kfree(driver_data);
+}
+
+static int da9052_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9052 *da9052 = driver_data->da9052;
+ int ret, i;
+
+ /*
+ * Disable the Watchdog timer before setting
+ * new time out.
+ */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE, 0);
+ if (ret < 0) {
+ dev_err(da9052->dev, "Failed to disable watchdog bit, %d\n",
+ ret);
+ return ret;
+ }
+ if (timeout) {
+ /*
+ * To change the timeout, da9052 needs to
+ * be disabled for at least 150 us.
+ */
+ udelay(150);
+
+ /* Set the desired timeout */
+ for (i = 0; i < ARRAY_SIZE(da9052_wdt_maps); i++)
+ if (da9052_wdt_maps[i].time == timeout)
+ break;
+
+ if (i == ARRAY_SIZE(da9052_wdt_maps))
+ ret = -EINVAL;
+ else
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE,
+ da9052_wdt_maps[i].reg_val);
+ if (ret < 0) {
+ dev_err(da9052->dev,
+ "Failed to update timescale bit, %d\n", ret);
+ return ret;
+ }
+
+ wdt_dev->timeout = timeout;
+ driver_data->jpast = jiffies;
+ }
+
+ return 0;
+}
+
+static void da9052_wdt_ref(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_get(&driver_data->kref);
+}
+
+static void da9052_wdt_unref(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+
+ kref_put(&driver_data->kref, da9052_wdt_release_resources);
+}
+
+static int da9052_wdt_start(struct watchdog_device *wdt_dev)
+{
+ return da9052_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
+}
+
+static int da9052_wdt_stop(struct watchdog_device *wdt_dev)
+{
+ return da9052_wdt_set_timeout(wdt_dev, 0);
+}
+
+static int da9052_wdt_ping(struct watchdog_device *wdt_dev)
+{
+ struct da9052_wdt_data *driver_data = watchdog_get_drvdata(wdt_dev);
+ struct da9052 *da9052 = driver_data->da9052;
+ unsigned long msec, jnow = jiffies;
+ int ret;
+
+ /*
+ * We have a minimum time for watchdog window called TWDMIN. A write
+ * to the watchdog before this elapsed time should cause an error.
+ */
+ msec = (jnow - driver_data->jpast) * 1000/HZ;
+ if (msec < DA9052_TWDMIN)
+ mdelay(msec);
+
+ /* Reset the watchdog timer */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_WATCHDOG, 1 << 7);
+ if (ret < 0)
+ goto err_strobe;
+
+ /*
+ * FIXME: Reset the watchdog core, in general PMIC
+ * is supposed to do this
+ */
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_WATCHDOG, 0 << 7);
+err_strobe:
+ return ret;
+}
+
+static struct watchdog_info da9052_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = "DA9052 Watchdog",
+};
+
+static const struct watchdog_ops da9052_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = da9052_wdt_start,
+ .stop = da9052_wdt_stop,
+ .ping = da9052_wdt_ping,
+ .set_timeout = da9052_wdt_set_timeout,
+ .ref = da9052_wdt_ref,
+ .unref = da9052_wdt_unref,
+};
+
+
+static int __devinit da9052_wdt_probe(struct platform_device *pdev)
+{
+ struct da9052 *da9052 = dev_get_drvdata(pdev->dev.parent);
+ struct da9052_wdt_data *driver_data;
+ struct watchdog_device *da9052_wdt;
+ int ret;
+
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
+ if (!driver_data) {
+ dev_err(da9052->dev, "Unable to alloacate watchdog device\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ driver_data->da9052 = da9052;
+
+ da9052_wdt = &driver_data->wdt;
+
+ da9052_wdt->timeout = DA9052_DEF_TIMEOUT;
+ da9052_wdt->info = &da9052_wdt_info;
+ da9052_wdt->ops = &da9052_wdt_ops;
+ watchdog_set_drvdata(da9052_wdt, driver_data);
+
+ kref_init(&driver_data->kref);
+
+ ret = da9052_reg_update(da9052, DA9052_CONTROL_D_REG,
+ DA9052_CONTROLD_TWDSCALE, 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to disable watchdog bits, %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = watchdog_register_device(&driver_data->wdt);
+ if (ret != 0) {
+ dev_err(da9052->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+ goto err;
+ }
+
+ dev_set_drvdata(&pdev->dev, driver_data);
+err:
+ return ret;
+}
+
+static int __devexit da9052_wdt_remove(struct platform_device *pdev)
+{
+ struct da9052_wdt_data *driver_data = dev_get_drvdata(&pdev->dev);
+
+ watchdog_unregister_device(&driver_data->wdt);
+ kref_put(&driver_data->kref, da9052_wdt_release_resources);
+
+ return 0;
+}
+
+static struct platform_driver da9052_wdt_driver = {
+ .probe = da9052_wdt_probe,
+ .remove = __devexit_p(da9052_wdt_remove),
+ .driver = {
+ .name = "da9052-watchdog",
+ },
+};
+
+module_platform_driver(da9052_wdt_driver);
+
+MODULE_AUTHOR("Anthony Olech <Anthony.Olech@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 SM Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-watchdog");
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index c65b0a5..016bd93 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -56,6 +56,7 @@
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71869_ID 0x0814 /* Chipset ID */
+#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
@@ -195,7 +196,7 @@ static inline int superio_enter(int base)
return -EBUSY;
}
- /* according to the datasheet the key must be send twice! */
+ /* according to the datasheet the key must be sent twice! */
outb(SIO_UNLOCK_KEY, base);
outb(SIO_UNLOCK_KEY, base);
@@ -756,6 +757,7 @@ static int __init f71808e_find(int sioaddr)
err = f71862fg_pin_configure(0); /* validate module parameter */
break;
case SIO_F71869_ID:
+ case SIO_F71869A_ID:
watchdog.type = f71869;
break;
case SIO_F71882_ID:
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 2b76381..1eff743 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -146,7 +146,7 @@ struct cmn_registers {
} __attribute__((packed));
static unsigned int hpwdt_nmi_decoding;
-static unsigned int allow_kdump;
+static unsigned int allow_kdump = 1;
static unsigned int is_icru;
static DEFINE_SPINLOCK(rom_lock);
static void *cru_rom_addr;
@@ -756,6 +756,8 @@ error:
static void hpwdt_exit_nmi_decoding(void)
{
unregister_nmi_handler(NMI_UNKNOWN, "hpwdt");
+ unregister_nmi_handler(NMI_SERR, "hpwdt");
+ unregister_nmi_handler(NMI_IO_CHECK, "hpwdt");
if (cru_rom_addr)
iounmap(cru_rom_addr);
}
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 741528b..ceed39f 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -47,7 +47,7 @@
/* Module and version information */
#define DRV_NAME "iTCO_wdt"
-#define DRV_VERSION "1.07"
+#define DRV_VERSION "1.10"
/* Includes */
#include <linux/module.h> /* For module specific items */
@@ -88,8 +88,6 @@
#define TCOv2_TMR (TCOBASE + 0x12) /* TCOv2 Timer Initial Value */
/* internal variables */
-static unsigned long is_active;
-static char expect_release;
static struct { /* this is private data for the iTCO_wdt device */
/* TCO version/generation */
unsigned int iTCO_version;
@@ -106,12 +104,12 @@ static struct { /* this is private data for the iTCO_wdt device */
} iTCO_wdt_private;
/* module parameters */
-#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
-static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
+#define WATCHDOG_TIMEOUT 30 /* 30 sec default heartbeat */
+static int heartbeat = WATCHDOG_TIMEOUT; /* in seconds */
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog timeout in seconds. "
"5..76 (TCO v1) or 3..614 (TCO v2), default="
- __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
@@ -178,13 +176,13 @@ static int iTCO_wdt_unset_NO_REBOOT_bit(void)
return ret; /* returns: 0 = OK, -EIO = Error */
}
-static int iTCO_wdt_start(void)
+static int iTCO_wdt_start(struct watchdog_device *wd_dev)
{
unsigned int val;
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, heartbeat);
+ iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, wd_dev->timeout);
/* disable chipset's NO_REBOOT bit */
if (iTCO_wdt_unset_NO_REBOOT_bit()) {
@@ -212,7 +210,7 @@ static int iTCO_wdt_start(void)
return 0;
}
-static int iTCO_wdt_stop(void)
+static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
{
unsigned int val;
@@ -236,11 +234,11 @@ static int iTCO_wdt_stop(void)
return 0;
}
-static int iTCO_wdt_keepalive(void)
+static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
{
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, heartbeat);
+ iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, wd_dev->timeout);
/* Reload the timer by writing to the TCO Timer Counter register */
if (iTCO_wdt_private.iTCO_version == 2)
@@ -257,7 +255,7 @@ static int iTCO_wdt_keepalive(void)
return 0;
}
-static int iTCO_wdt_set_heartbeat(int t)
+static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
{
unsigned int val16;
unsigned char val8;
@@ -304,14 +302,15 @@ static int iTCO_wdt_set_heartbeat(int t)
return -EINVAL;
}
- heartbeat = t;
+ wd_dev->timeout = t;
return 0;
}
-static int iTCO_wdt_get_timeleft(int *time_left)
+static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
{
unsigned int val16;
unsigned char val8;
+ unsigned int time_left = 0;
/* read the TCO Timer */
if (iTCO_wdt_private.iTCO_version == 2) {
@@ -320,7 +319,7 @@ static int iTCO_wdt_get_timeleft(int *time_left)
val16 &= 0x3ff;
spin_unlock(&iTCO_wdt_private.io_lock);
- *time_left = (val16 * 6) / 10;
+ time_left = (val16 * 6) / 10;
} else if (iTCO_wdt_private.iTCO_version == 1) {
spin_lock(&iTCO_wdt_private.io_lock);
val8 = inb(TCO_RLD);
@@ -329,156 +328,35 @@ static int iTCO_wdt_get_timeleft(int *time_left)
val8 += (inb(TCOv1_TMR) & 0x3f);
spin_unlock(&iTCO_wdt_private.io_lock);
- *time_left = (val8 * 6) / 10;
- } else
- return -EINVAL;
- return 0;
-}
-
-/*
- * /dev/watchdog handling
- */
-
-static int iTCO_wdt_open(struct inode *inode, struct file *file)
-{
- /* /dev/watchdog can only be opened once */
- if (test_and_set_bit(0, &is_active))
- return -EBUSY;
-
- /*
- * Reload and activate timer
- */
- iTCO_wdt_start();
- return nonseekable_open(inode, file);
-}
-
-static int iTCO_wdt_release(struct inode *inode, struct file *file)
-{
- /*
- * Shut off the timer.
- */
- if (expect_release == 42) {
- iTCO_wdt_stop();
- } else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- iTCO_wdt_keepalive();
- }
- clear_bit(0, &is_active);
- expect_release = 0;
- return 0;
-}
-
-static ssize_t iTCO_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- /* See if we got the magic character 'V' and reload the timer */
- if (len) {
- if (!nowayout) {
- size_t i;
-
- /* note: just in case someone wrote the magic
- character five months ago... */
- expect_release = 0;
-
- /* scan to see whether or not we got the
- magic character */
- for (i = 0; i != len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_release = 42;
- }
- }
-
- /* someone wrote to us, we should reload the timer */
- iTCO_wdt_keepalive();
- }
- return len;
-}
-
-static long iTCO_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int new_options, retval = -EINVAL;
- int new_heartbeat;
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- static const struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE,
- .firmware_version = 0,
- .identity = DRV_NAME,
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_SETOPTIONS:
- {
- if (get_user(new_options, p))
- return -EFAULT;
-
- if (new_options & WDIOS_DISABLECARD) {
- iTCO_wdt_stop();
- retval = 0;
- }
- if (new_options & WDIOS_ENABLECARD) {
- iTCO_wdt_keepalive();
- iTCO_wdt_start();
- retval = 0;
- }
- return retval;
- }
- case WDIOC_KEEPALIVE:
- iTCO_wdt_keepalive();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- {
- if (get_user(new_heartbeat, p))
- return -EFAULT;
- if (iTCO_wdt_set_heartbeat(new_heartbeat))
- return -EINVAL;
- iTCO_wdt_keepalive();
- /* Fall */
- }
- case WDIOC_GETTIMEOUT:
- return put_user(heartbeat, p);
- case WDIOC_GETTIMELEFT:
- {
- int time_left;
- if (iTCO_wdt_get_timeleft(&time_left))
- return -EINVAL;
- return put_user(time_left, p);
- }
- default:
- return -ENOTTY;
+ time_left = (val8 * 6) / 10;
}
+ return time_left;
}
/*
* Kernel Interfaces
*/
-static const struct file_operations iTCO_wdt_fops = {
+static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = DRV_NAME,
+};
+
+static const struct watchdog_ops iTCO_wdt_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = iTCO_wdt_write,
- .unlocked_ioctl = iTCO_wdt_ioctl,
- .open = iTCO_wdt_open,
- .release = iTCO_wdt_release,
+ .start = iTCO_wdt_start,
+ .stop = iTCO_wdt_stop,
+ .ping = iTCO_wdt_ping,
+ .set_timeout = iTCO_wdt_set_timeout,
+ .get_timeleft = iTCO_wdt_get_timeleft,
};
-static struct miscdevice iTCO_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &iTCO_wdt_fops,
+static struct watchdog_device iTCO_wdt_watchdog_dev = {
+ .info = &ident,
+ .ops = &iTCO_wdt_ops,
};
/*
@@ -489,10 +367,10 @@ static void __devexit iTCO_wdt_cleanup(void)
{
/* Stop the timer before we leave */
if (!nowayout)
- iTCO_wdt_stop();
+ iTCO_wdt_stop(&iTCO_wdt_watchdog_dev);
/* Deregister */
- misc_deregister(&iTCO_wdt_miscdev);
+ watchdog_unregister_device(&iTCO_wdt_watchdog_dev);
/* release resources */
release_region(iTCO_wdt_private.tco_res->start,
@@ -575,7 +453,7 @@ static int __devinit iTCO_wdt_probe(struct platform_device *dev)
if (!request_region(iTCO_wdt_private.smi_res->start,
resource_size(iTCO_wdt_private.smi_res), dev->name)) {
pr_err("I/O address 0x%04llx already in use, device disabled\n",
- SMI_EN);
+ (u64)SMI_EN);
ret = -EBUSY;
goto unmap_gcs;
}
@@ -592,33 +470,38 @@ static int __devinit iTCO_wdt_probe(struct platform_device *dev)
if (!request_region(iTCO_wdt_private.tco_res->start,
resource_size(iTCO_wdt_private.tco_res), dev->name)) {
pr_err("I/O address 0x%04llx already in use, device disabled\n",
- TCOBASE);
+ (u64)TCOBASE);
ret = -EBUSY;
goto unreg_smi;
}
pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
- ich_info->name, ich_info->iTCO_version, TCOBASE);
+ ich_info->name, ich_info->iTCO_version, (u64)TCOBASE);
/* Clear out the (probably old) status */
outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */
+ iTCO_wdt_watchdog_dev.bootstatus = 0;
+ iTCO_wdt_watchdog_dev.timeout = WATCHDOG_TIMEOUT;
+ watchdog_set_nowayout(&iTCO_wdt_watchdog_dev, nowayout);
+ iTCO_wdt_watchdog_dev.parent = dev->dev.parent;
+
/* Make sure the watchdog is not running */
- iTCO_wdt_stop();
+ iTCO_wdt_stop(&iTCO_wdt_watchdog_dev);
/* Check that the heartbeat value is within it's range;
if not reset to the default */
- if (iTCO_wdt_set_heartbeat(heartbeat)) {
- iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT);
- pr_info("timeout value out of range, using %d\n", heartbeat);
+ if (iTCO_wdt_set_timeout(&iTCO_wdt_watchdog_dev, heartbeat)) {
+ iTCO_wdt_set_timeout(&iTCO_wdt_watchdog_dev, WATCHDOG_TIMEOUT);
+ pr_info("timeout value out of range, using %d\n",
+ WATCHDOG_TIMEOUT);
}
- ret = misc_register(&iTCO_wdt_miscdev);
+ ret = watchdog_register_device(&iTCO_wdt_watchdog_dev);
if (ret != 0) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
+ pr_err("cannot register watchdog device (err=%d)\n", ret);
goto unreg_tco;
}
@@ -659,7 +542,7 @@ static int __devexit iTCO_wdt_remove(struct platform_device *dev)
static void iTCO_wdt_shutdown(struct platform_device *dev)
{
- iTCO_wdt_stop();
+ iTCO_wdt_stop(NULL);
}
static struct platform_driver iTCO_wdt_driver = {
@@ -699,3 +582,4 @@ MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 5f0d776..8f541b9 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -232,7 +232,7 @@ static void __devinit ie6xx_wdt_debugfs_init(void)
S_IFREG | S_IRUGO, NULL, NULL, &ie6xx_wdt_dbg_operations);
}
-static void __devexit ie6xx_wdt_debugfs_exit(void)
+static void ie6xx_wdt_debugfs_exit(void)
{
debugfs_remove(ie6xx_wdt_data.debugfs);
}
@@ -242,7 +242,7 @@ static void __devinit ie6xx_wdt_debugfs_init(void)
{
}
-static void __devexit ie6xx_wdt_debugfs_exit(void)
+static void ie6xx_wdt_debugfs_exit(void)
{
}
#endif
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index a9593a3..2e74c3a 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -13,14 +13,15 @@
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
-#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/clk.h>
#include <linux/io.h>
-#include <lantiq.h>
+#include <lantiq_soc.h>
-/* Section 3.4 of the datasheet
+/*
+ * Section 3.4 of the datasheet
* The password sequence protects the WDT control register from unintended
* write actions, which might cause malfunction of the WDT.
*
@@ -70,7 +71,8 @@ ltq_wdt_disable(void)
{
/* write the first password magic */
ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
- /* write the second password magic with no config
+ /*
+ * write the second password magic with no config
* this turns the watchdog off
*/
ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
@@ -184,7 +186,7 @@ static struct miscdevice ltq_wdt_miscdev = {
.fops = &ltq_wdt_fops,
};
-static int __init
+static int __devinit
ltq_wdt_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -194,28 +196,27 @@ ltq_wdt_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot obtain I/O memory region");
return -ENOENT;
}
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), dev_name(&pdev->dev));
- if (!res) {
- dev_err(&pdev->dev, "cannot request I/O memory region");
- return -EBUSY;
- }
- ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
+
+ ltq_wdt_membase = devm_request_and_ioremap(&pdev->dev, res);
if (!ltq_wdt_membase) {
dev_err(&pdev->dev, "cannot remap I/O memory region\n");
return -ENOMEM;
}
/* we do not need to enable the clock as it is always running */
- clk = clk_get(&pdev->dev, "io");
- WARN_ON(!clk);
+ clk = clk_get_io();
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Failed to get clock\n");
+ return -ENOENT;
+ }
ltq_io_region_clk_rate = clk_get_rate(clk);
clk_put(clk);
+ /* find out if the watchdog caused the last reboot */
if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
ltq_wdt_bootstatus = WDIOF_CARDRESET;
+ dev_info(&pdev->dev, "Init done\n");
return misc_register(&ltq_wdt_miscdev);
}
@@ -227,33 +228,26 @@ ltq_wdt_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ltq_wdt_match[] = {
+ { .compatible = "lantiq,wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ltq_wdt_match);
static struct platform_driver ltq_wdt_driver = {
+ .probe = ltq_wdt_probe,
.remove = __devexit_p(ltq_wdt_remove),
.driver = {
- .name = "ltq_wdt",
+ .name = "wdt",
.owner = THIS_MODULE,
+ .of_match_table = ltq_wdt_match,
},
};
-static int __init
-init_ltq_wdt(void)
-{
- return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
-}
-
-static void __exit
-exit_ltq_wdt(void)
-{
- return platform_driver_unregister(&ltq_wdt_driver);
-}
-
-module_init(init_ltq_wdt);
-module_exit(exit_ltq_wdt);
+module_platform_driver(ltq_wdt_driver);
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
-
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC Watchdog");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 55d2f66..294fb4e 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -297,7 +297,7 @@ static int __devinit xwdt_probe(struct platform_device *pdev)
no_timeout = 0;
- pfreq = (u32 *)of_get_property(pdev->dev.of_node->parent,
+ pfreq = (u32 *)of_get_property(pdev->dev.of_node,
"clock-frequency", NULL);
if (pfreq == NULL) {
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 8285d65..fceec4f 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -126,8 +126,6 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
u32 pre_margin = GET_WLDR_VAL(timer_margin);
void __iomem *base = wdev->base;
- pm_runtime_get_sync(wdev->dev);
-
/* just count up at 32 KHz */
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
@@ -135,8 +133,6 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
__raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR);
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
-
- pm_runtime_put_sync(wdev->dev);
}
/*
@@ -166,8 +162,6 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
omap_wdt_ping(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
- pm_runtime_put_sync(wdev->dev);
-
return nonseekable_open(inode, file);
}
@@ -179,8 +173,6 @@ static int omap_wdt_release(struct inode *inode, struct file *file)
* Shut off the timer unless NOWAYOUT is defined.
*/
#ifndef CONFIG_WATCHDOG_NOWAYOUT
- pm_runtime_get_sync(wdev->dev);
-
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
@@ -199,11 +191,9 @@ static ssize_t omap_wdt_write(struct file *file, const char __user *data,
/* Refresh LOAD_TIME. */
if (len) {
- pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
- pm_runtime_put_sync(wdev->dev);
}
return len;
}
@@ -236,18 +226,15 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
(int __user *)arg);
return put_user(0, (int __user *)arg);
case WDIOC_KEEPALIVE:
- pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
- pm_runtime_put_sync(wdev->dev);
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, (int __user *)arg))
return -EFAULT;
omap_wdt_adjust_timeout(new_margin);
- pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_disable(wdev);
omap_wdt_set_timeout(wdev);
@@ -255,7 +242,6 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
- pm_runtime_put_sync(wdev->dev);
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(timer_margin, (int __user *)arg);
@@ -363,7 +349,6 @@ static void omap_wdt_shutdown(struct platform_device *pdev)
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
if (wdev->omap_wdt_users) {
- pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
@@ -403,7 +388,6 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
if (wdev->omap_wdt_users) {
- pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
@@ -419,7 +403,6 @@ static int omap_wdt_resume(struct platform_device *pdev)
pm_runtime_get_sync(wdev->dev);
omap_wdt_enable(wdev);
omap_wdt_ping(wdev);
- pm_runtime_put_sync(wdev->dev);
}
return 0;
@@ -430,6 +413,12 @@ static int omap_wdt_resume(struct platform_device *pdev)
#define omap_wdt_resume NULL
#endif
+static const struct of_device_id omap_wdt_of_match[] = {
+ { .compatible = "ti,omap3-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_wdt_of_match);
+
static struct platform_driver omap_wdt_driver = {
.probe = omap_wdt_probe,
.remove = __devexit_p(omap_wdt_remove),
@@ -439,6 +428,7 @@ static struct platform_driver omap_wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "omap_wdt",
+ .of_match_table = omap_wdt_of_match,
},
};
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 0f57369..a73bea4 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -16,22 +16,21 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/init.h>
-#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <mach/bridge-regs.h>
/*
* Watchdog timer block registers.
*/
#define TIMER_CTRL 0x0000
-#define WDT_EN 0x0010
+#define WDT_EN 0x0010
#define WDT_VAL 0x0024
#define WDT_MAX_CYCLE_COUNT 0xffffffff
@@ -44,27 +43,27 @@ static unsigned int wdt_max_duration; /* (seconds) */
static struct clk *clk;
static unsigned int wdt_tclk;
static void __iomem *wdt_reg;
-static unsigned long wdt_status;
static DEFINE_SPINLOCK(wdt_lock);
-static void orion_wdt_ping(void)
+static int orion_wdt_ping(struct watchdog_device *wdt_dev)
{
spin_lock(&wdt_lock);
/* Reload watchdog duration */
- writel(wdt_tclk * heartbeat, wdt_reg + WDT_VAL);
+ writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL);
spin_unlock(&wdt_lock);
+ return 0;
}
-static void orion_wdt_enable(void)
+static int orion_wdt_start(struct watchdog_device *wdt_dev)
{
u32 reg;
spin_lock(&wdt_lock);
/* Set watchdog duration */
- writel(wdt_tclk * heartbeat, wdt_reg + WDT_VAL);
+ writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL);
/* Clear watchdog timer interrupt */
reg = readl(BRIDGE_CAUSE);
@@ -82,9 +81,10 @@ static void orion_wdt_enable(void)
writel(reg, RSTOUTn_MASK);
spin_unlock(&wdt_lock);
+ return 0;
}
-static void orion_wdt_disable(void)
+static int orion_wdt_stop(struct watchdog_device *wdt_dev)
{
u32 reg;
@@ -101,139 +101,44 @@ static void orion_wdt_disable(void)
writel(reg, wdt_reg + TIMER_CTRL);
spin_unlock(&wdt_lock);
+ return 0;
}
-static int orion_wdt_get_timeleft(int *time_left)
+static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
{
+ unsigned int time_left;
+
spin_lock(&wdt_lock);
- *time_left = readl(wdt_reg + WDT_VAL) / wdt_tclk;
+ time_left = readl(wdt_reg + WDT_VAL) / wdt_tclk;
spin_unlock(&wdt_lock);
- return 0;
-}
-static int orion_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(WDT_IN_USE, &wdt_status))
- return -EBUSY;
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
- orion_wdt_enable();
- return nonseekable_open(inode, file);
+ return time_left;
}
-static ssize_t orion_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
+static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- set_bit(WDT_OK_TO_CLOSE, &wdt_status);
- }
- }
- orion_wdt_ping();
- }
- return len;
-}
-
-static int orion_wdt_settimeout(int new_time)
-{
- if ((new_time <= 0) || (new_time > wdt_max_duration))
- return -EINVAL;
-
- /* Set new watchdog time to be used when
- * orion_wdt_enable() or orion_wdt_ping() is called. */
- heartbeat = new_time;
+ wdt_dev->timeout = timeout;
return 0;
}
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING,
- .identity = "Orion Watchdog",
+static const struct watchdog_info orion_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "Orion Watchdog",
};
-static long orion_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret = -ENOTTY;
- int time;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- orion_wdt_ping();
- ret = 0;
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(time, (int *)arg);
- if (ret)
- break;
-
- if (orion_wdt_settimeout(time)) {
- ret = -EINVAL;
- break;
- }
- orion_wdt_ping();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(heartbeat, (int *)arg);
- break;
-
- case WDIOC_GETTIMELEFT:
- if (orion_wdt_get_timeleft(&time)) {
- ret = -EINVAL;
- break;
- }
- ret = put_user(time, (int *)arg);
- break;
- }
- return ret;
-}
-
-static int orion_wdt_release(struct inode *inode, struct file *file)
-{
- if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
- orion_wdt_disable();
- else
- pr_crit("Device closed unexpectedly - timer will not stop\n");
- clear_bit(WDT_IN_USE, &wdt_status);
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- return 0;
-}
-
-
-static const struct file_operations orion_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = orion_wdt_write,
- .unlocked_ioctl = orion_wdt_ioctl,
- .open = orion_wdt_open,
- .release = orion_wdt_release,
+static const struct watchdog_ops orion_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = orion_wdt_start,
+ .stop = orion_wdt_stop,
+ .ping = orion_wdt_ping,
+ .set_timeout = orion_wdt_set_timeout,
+ .get_timeleft = orion_wdt_get_timeleft,
};
-static struct miscdevice orion_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &orion_wdt_fops,
+static struct watchdog_device orion_wdt = {
+ .info = &orion_wdt_info,
+ .ops = &orion_wdt_ops,
};
static int __devinit orion_wdt_probe(struct platform_device *pdev)
@@ -241,29 +146,34 @@ static int __devinit orion_wdt_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- printk(KERN_ERR "Orion Watchdog missing clock\n");
+ dev_err(&pdev->dev, "Orion Watchdog missing clock\n");
return -ENODEV;
}
clk_prepare_enable(clk);
wdt_tclk = clk_get_rate(clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- wdt_reg = ioremap(res->start, resource_size(res));
-
- if (orion_wdt_miscdev.parent)
- return -EBUSY;
- orion_wdt_miscdev.parent = &pdev->dev;
+ wdt_reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!wdt_reg)
+ return -ENOMEM;
wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk;
- if (orion_wdt_settimeout(heartbeat))
+
+ if ((heartbeat < 1) || (heartbeat > wdt_max_duration))
heartbeat = wdt_max_duration;
- ret = misc_register(&orion_wdt_miscdev);
- if (ret)
+ orion_wdt.timeout = heartbeat;
+ orion_wdt.min_timeout = 1;
+ orion_wdt.max_timeout = wdt_max_duration;
+
+ watchdog_set_nowayout(&orion_wdt, nowayout);
+ ret = watchdog_register_device(&orion_wdt);
+ if (ret) {
+ clk_disable_unprepare(clk);
return ret;
+ }
pr_info("Initial timeout %d sec%s\n",
heartbeat, nowayout ? ", nowayout" : "");
@@ -272,27 +182,14 @@ static int __devinit orion_wdt_probe(struct platform_device *pdev)
static int __devexit orion_wdt_remove(struct platform_device *pdev)
{
- int ret;
-
- if (test_bit(WDT_IN_USE, &wdt_status)) {
- orion_wdt_disable();
- clear_bit(WDT_IN_USE, &wdt_status);
- }
-
- ret = misc_deregister(&orion_wdt_miscdev);
- if (!ret)
- orion_wdt_miscdev.parent = NULL;
-
+ watchdog_unregister_device(&orion_wdt);
clk_disable_unprepare(clk);
- clk_put(clk);
-
- return ret;
+ return 0;
}
static void orion_wdt_shutdown(struct platform_device *pdev)
{
- if (test_bit(WDT_IN_USE, &wdt_status))
- orion_wdt_disable();
+ orion_wdt_stop(&orion_wdt);
}
static struct platform_driver orion_wdt_driver = {
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 200ece5..9245b4d 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -519,21 +519,7 @@ static struct platform_driver s3c2410wdt_driver = {
},
};
-
-static int __init watchdog_init(void)
-{
- pr_info("S3C2410 Watchdog Timer, (c) 2004 Simtec Electronics\n");
-
- return platform_driver_register(&s3c2410wdt_driver);
-}
-
-static void __exit watchdog_exit(void)
-{
- platform_driver_unregister(&s3c2410wdt_driver);
-}
-
-module_init(watchdog_init);
-module_exit(watchdog_exit);
+module_platform_driver(s3c2410wdt_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
"Dimitry Andric <dimitry.andric@tomtom.com>");
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index f847700..9681ada 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -136,6 +136,8 @@ static void sch311x_wdt_set_timeout(int t)
static void sch311x_wdt_start(void)
{
+ unsigned char t;
+
spin_lock(&sch311x_wdt_data.io_lock);
/* set watchdog's timeout */
@@ -149,7 +151,8 @@ static void sch311x_wdt_start(void)
* Bit 4-6 (Reserved)
* Bit 7, Output Type: 0 = Push Pull Bit, 1 = Open Drain
*/
- outb(0x0e, sch311x_wdt_data.runtime_reg + GP60);
+ t = inb(sch311x_wdt_data.runtime_reg + GP60);
+ outb((t & ~0x0d) | 0x0c, sch311x_wdt_data.runtime_reg + GP60);
spin_unlock(&sch311x_wdt_data.io_lock);
@@ -157,10 +160,13 @@ static void sch311x_wdt_start(void)
static void sch311x_wdt_stop(void)
{
+ unsigned char t;
+
spin_lock(&sch311x_wdt_data.io_lock);
/* stop the watchdog */
- outb(0x01, sch311x_wdt_data.runtime_reg + GP60);
+ t = inb(sch311x_wdt_data.runtime_reg + GP60);
+ outb((t & ~0x0d) | 0x01, sch311x_wdt_data.runtime_reg + GP60);
/* disable timeout by setting it to 0 */
sch311x_wdt_set_timeout(0);
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index bbb170e..e4841c3 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
* Watchdog driver for ARM SP805 watchdog module
*
* Copyright (C) 2010 ST Microelectronics
- * Viresh Kumar<viresh.kumar@st.com>
+ * Viresh Kumar <viresh.linux@gmail.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2 or later. This program is licensed "as is" without any
@@ -16,20 +16,17 @@
#include <linux/amba/bus.h>
#include <linux/bitops.h>
#include <linux/clk.h>
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/math64.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
/* default timeout in seconds */
@@ -56,6 +53,7 @@
/**
* struct sp805_wdt: sp805 wdt device structure
+ * @wdd: instance of struct watchdog_device
* @lock: spin lock protecting dev structure and io access
* @base: base address of wdt
* @clk: clock structure of wdt
@@ -65,24 +63,24 @@
* @timeout: current programmed timeout
*/
struct sp805_wdt {
+ struct watchdog_device wdd;
spinlock_t lock;
void __iomem *base;
struct clk *clk;
struct amba_device *adev;
- unsigned long status;
- #define WDT_BUSY 0
- #define WDT_CAN_BE_CLOSED 1
unsigned int load_val;
unsigned int timeout;
};
-/* local variables */
-static struct sp805_wdt *wdt;
static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout,
+ "Set to 1 to keep watchdog running after device release");
/* This routine finds load value that will reset system in required timout */
-static void wdt_setload(unsigned int timeout)
+static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
rate = clk_get_rate(wdt->clk);
@@ -103,11 +101,14 @@ static void wdt_setload(unsigned int timeout)
/* roundup timeout to closest positive integer value */
wdt->timeout = div_u64((load + 1) * 2 + (rate / 2), rate);
spin_unlock(&wdt->lock);
+
+ return 0;
}
/* returns number of seconds left for reset to occur */
-static u32 wdt_timeleft(void)
+static unsigned int wdt_timeleft(struct watchdog_device *wdd)
{
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
u64 load, rate;
rate = clk_get_rate(wdt->clk);
@@ -123,166 +124,96 @@ static u32 wdt_timeleft(void)
return div_u64(load, rate);
}
-/* enables watchdog timers reset */
-static void wdt_enable(void)
+static int wdt_config(struct watchdog_device *wdd, bool ping)
{
- spin_lock(&wdt->lock);
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
+ int ret;
- writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
- writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
- writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
- writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL);
- writel_relaxed(LOCK, wdt->base + WDTLOCK);
+ if (!ping) {
+ ret = clk_prepare(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock prepare fail");
+ return ret;
+ }
- /* Flush posted writes. */
- readl_relaxed(wdt->base + WDTLOCK);
- spin_unlock(&wdt->lock);
-}
+ ret = clk_enable(wdt->clk);
+ if (ret) {
+ dev_err(&wdt->adev->dev, "clock enable fail");
+ clk_unprepare(wdt->clk);
+ return ret;
+ }
+ }
-/* disables watchdog timers reset */
-static void wdt_disable(void)
-{
spin_lock(&wdt->lock);
writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
- writel_relaxed(0, wdt->base + WDTCONTROL);
+ writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+
+ if (!ping) {
+ writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+ writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
+ WDTCONTROL);
+ }
+
writel_relaxed(LOCK, wdt->base + WDTLOCK);
/* Flush posted writes. */
readl_relaxed(wdt->base + WDTLOCK);
spin_unlock(&wdt->lock);
+
+ return 0;
}
-static ssize_t sp805_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
+static int wdt_ping(struct watchdog_device *wdd)
{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
-
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- /* Check for Magic Close character */
- if (c == 'V') {
- set_bit(WDT_CAN_BE_CLOSED,
- &wdt->status);
- break;
- }
- }
- }
- wdt_enable();
- }
- return len;
+ return wdt_config(wdd, true);
}
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
- .identity = MODULE_NAME,
-};
-
-static long sp805_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+/* enables watchdog timers reset */
+static int wdt_enable(struct watchdog_device *wdd)
{
- int ret = -ENOTTY;
- unsigned int timeout;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- wdt_enable();
- ret = 0;
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(timeout, (unsigned int *)arg);
- if (ret)
- break;
-
- wdt_setload(timeout);
-
- wdt_enable();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(wdt->timeout, (unsigned int *)arg);
- break;
- case WDIOC_GETTIMELEFT:
- ret = put_user(wdt_timeleft(), (unsigned int *)arg);
- break;
- }
- return ret;
+ return wdt_config(wdd, false);
}
-static int sp805_wdt_open(struct inode *inode, struct file *file)
+/* disables watchdog timers reset */
+static int wdt_disable(struct watchdog_device *wdd)
{
- int ret = 0;
-
- if (test_and_set_bit(WDT_BUSY, &wdt->status))
- return -EBUSY;
-
- ret = clk_enable(wdt->clk);
- if (ret) {
- dev_err(&wdt->adev->dev, "clock enable fail");
- goto err;
- }
-
- wdt_enable();
+ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd);
- /* can not be closed, once enabled */
- clear_bit(WDT_CAN_BE_CLOSED, &wdt->status);
- return nonseekable_open(inode, file);
+ spin_lock(&wdt->lock);
-err:
- clear_bit(WDT_BUSY, &wdt->status);
- return ret;
-}
+ writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
+ writel_relaxed(0, wdt->base + WDTCONTROL);
+ writel_relaxed(LOCK, wdt->base + WDTLOCK);
-static int sp805_wdt_release(struct inode *inode, struct file *file)
-{
- if (!test_bit(WDT_CAN_BE_CLOSED, &wdt->status)) {
- clear_bit(WDT_BUSY, &wdt->status);
- dev_warn(&wdt->adev->dev, "Device closed unexpectedly\n");
- return 0;
- }
+ /* Flush posted writes. */
+ readl_relaxed(wdt->base + WDTLOCK);
+ spin_unlock(&wdt->lock);
- wdt_disable();
clk_disable(wdt->clk);
- clear_bit(WDT_BUSY, &wdt->status);
+ clk_unprepare(wdt->clk);
return 0;
}
-static const struct file_operations sp805_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = sp805_wdt_write,
- .unlocked_ioctl = sp805_wdt_ioctl,
- .open = sp805_wdt_open,
- .release = sp805_wdt_release,
+static const struct watchdog_info wdt_info = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
+ .identity = MODULE_NAME,
};
-static struct miscdevice sp805_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &sp805_wdt_fops,
+static const struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_enable,
+ .stop = wdt_disable,
+ .ping = wdt_ping,
+ .set_timeout = wdt_setload,
+ .get_timeleft = wdt_timeleft,
};
static int __devinit
sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
{
+ struct sp805_wdt *wdt;
int ret = 0;
if (!devm_request_mem_region(&adev->dev, adev->res.start,
@@ -315,19 +246,26 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
}
wdt->adev = adev;
+ wdt->wdd.info = &wdt_info;
+ wdt->wdd.ops = &wdt_ops;
+
spin_lock_init(&wdt->lock);
- wdt_setload(DEFAULT_TIMEOUT);
+ watchdog_set_nowayout(&wdt->wdd, nowayout);
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT);
- ret = misc_register(&sp805_wdt_miscdev);
- if (ret < 0) {
- dev_warn(&adev->dev, "cannot register misc device\n");
- goto err_misc_register;
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret) {
+ dev_err(&adev->dev, "watchdog_register_device() failed: %d\n",
+ ret);
+ goto err_register;
}
+ amba_set_drvdata(adev, wdt);
dev_info(&adev->dev, "registration successful\n");
return 0;
-err_misc_register:
+err_register:
clk_put(wdt->clk);
err:
dev_err(&adev->dev, "Probe Failed!!!\n");
@@ -336,7 +274,11 @@ err:
static int __devexit sp805_wdt_remove(struct amba_device *adev)
{
- misc_deregister(&sp805_wdt_miscdev);
+ struct sp805_wdt *wdt = amba_get_drvdata(adev);
+
+ watchdog_unregister_device(&wdt->wdd);
+ amba_set_drvdata(adev, NULL);
+ watchdog_set_drvdata(&wdt->wdd, NULL);
clk_put(wdt->clk);
return 0;
@@ -345,28 +287,22 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
#ifdef CONFIG_PM
static int sp805_wdt_suspend(struct device *dev)
{
- if (test_bit(WDT_BUSY, &wdt->status)) {
- wdt_disable();
- clk_disable(wdt->clk);
- }
+ struct sp805_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdd))
+ return wdt_disable(&wdt->wdd);
return 0;
}
static int sp805_wdt_resume(struct device *dev)
{
- int ret = 0;
+ struct sp805_wdt *wdt = dev_get_drvdata(dev);
- if (test_bit(WDT_BUSY, &wdt->status)) {
- ret = clk_enable(wdt->clk);
- if (ret) {
- dev_err(dev, "clock enable fail");
- return ret;
- }
- wdt_enable();
- }
+ if (watchdog_active(&wdt->wdd))
+ return wdt_enable(&wdt->wdd);
- return ret;
+ return 0;
}
#endif /* CONFIG_PM */
@@ -395,11 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
module_amba_driver(sp805_wdt_driver);
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout,
- "Set to 1 to keep watchdog running after device release");
-
-MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
+MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
index 5603e31..aa50da3 100644
--- a/drivers/watchdog/via_wdt.c
+++ b/drivers/watchdog/via_wdt.c
@@ -91,7 +91,7 @@ static inline void wdt_reset(void)
static void wdt_timer_tick(unsigned long data)
{
if (time_before(jiffies, next_heartbeat) ||
- (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+ (!watchdog_active(&wdt_dev))) {
wdt_reset();
mod_timer(&timer, jiffies + WDT_HEARTBEAT);
} else
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 14d768b..6aa46a9 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -34,8 +34,13 @@
#include <linux/kernel.h> /* For printk/panic/... */
#include <linux/watchdog.h> /* For watchdog specific items */
#include <linux/init.h> /* For __init/__exit/... */
+#include <linux/idr.h> /* For ida_* macros */
+#include <linux/err.h> /* For IS_ERR macros */
-#include "watchdog_dev.h" /* For watchdog_dev_register/... */
+#include "watchdog_core.h" /* For watchdog_dev_register/... */
+
+static DEFINE_IDA(watchdog_ida);
+static struct class *watchdog_class;
/**
* watchdog_register_device() - register a watchdog device
@@ -49,7 +54,7 @@
*/
int watchdog_register_device(struct watchdog_device *wdd)
{
- int ret;
+ int ret, id, devno;
if (wdd == NULL || wdd->info == NULL || wdd->ops == NULL)
return -EINVAL;
@@ -74,10 +79,38 @@ int watchdog_register_device(struct watchdog_device *wdd)
* corrupted in a later stage then we expect a kernel panic!
*/
- /* We only support 1 watchdog device via the /dev/watchdog interface */
+ mutex_init(&wdd->lock);
+ id = ida_simple_get(&watchdog_ida, 0, MAX_DOGS, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ wdd->id = id;
+
ret = watchdog_dev_register(wdd);
if (ret) {
- pr_err("error registering /dev/watchdog (err=%d)\n", ret);
+ ida_simple_remove(&watchdog_ida, id);
+ if (!(id == 0 && ret == -EBUSY))
+ return ret;
+
+ /* Retry in case a legacy watchdog module exists */
+ id = ida_simple_get(&watchdog_ida, 1, MAX_DOGS, GFP_KERNEL);
+ if (id < 0)
+ return id;
+ wdd->id = id;
+
+ ret = watchdog_dev_register(wdd);
+ if (ret) {
+ ida_simple_remove(&watchdog_ida, id);
+ return ret;
+ }
+ }
+
+ devno = wdd->cdev.dev;
+ wdd->dev = device_create(watchdog_class, wdd->parent, devno,
+ NULL, "watchdog%d", wdd->id);
+ if (IS_ERR(wdd->dev)) {
+ watchdog_dev_unregister(wdd);
+ ida_simple_remove(&watchdog_ida, id);
+ ret = PTR_ERR(wdd->dev);
return ret;
}
@@ -95,6 +128,7 @@ EXPORT_SYMBOL_GPL(watchdog_register_device);
void watchdog_unregister_device(struct watchdog_device *wdd)
{
int ret;
+ int devno = wdd->cdev.dev;
if (wdd == NULL)
return;
@@ -102,9 +136,41 @@ void watchdog_unregister_device(struct watchdog_device *wdd)
ret = watchdog_dev_unregister(wdd);
if (ret)
pr_err("error unregistering /dev/watchdog (err=%d)\n", ret);
+ device_destroy(watchdog_class, devno);
+ ida_simple_remove(&watchdog_ida, wdd->id);
+ wdd->dev = NULL;
}
EXPORT_SYMBOL_GPL(watchdog_unregister_device);
+static int __init watchdog_init(void)
+{
+ int err;
+
+ watchdog_class = class_create(THIS_MODULE, "watchdog");
+ if (IS_ERR(watchdog_class)) {
+ pr_err("couldn't create class\n");
+ return PTR_ERR(watchdog_class);
+ }
+
+ err = watchdog_dev_init();
+ if (err < 0) {
+ class_destroy(watchdog_class);
+ return err;
+ }
+
+ return 0;
+}
+
+static void __exit watchdog_exit(void)
+{
+ watchdog_dev_exit();
+ class_destroy(watchdog_class);
+ ida_destroy(&watchdog_ida);
+}
+
+subsys_initcall(watchdog_init);
+module_exit(watchdog_exit);
+
MODULE_AUTHOR("Alan Cox <alan@lxorguk.ukuu.org.uk>");
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("WatchDog Timer Driver Core");
diff --git a/drivers/watchdog/watchdog_dev.h b/drivers/watchdog/watchdog_core.h
index bc7612b..6c95141 100644
--- a/drivers/watchdog/watchdog_dev.h
+++ b/drivers/watchdog/watchdog_core.h
@@ -26,8 +26,12 @@
* This material is provided "AS-IS" and at no charge.
*/
+#define MAX_DOGS 32 /* Maximum number of watchdog devices */
+
/*
* Functions/procedures to be called by the core
*/
-int watchdog_dev_register(struct watchdog_device *);
-int watchdog_dev_unregister(struct watchdog_device *);
+extern int watchdog_dev_register(struct watchdog_device *);
+extern int watchdog_dev_unregister(struct watchdog_device *);
+extern int __init watchdog_dev_init(void);
+extern void __exit watchdog_dev_exit(void);
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 8558da9..ef8edec 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -42,10 +42,12 @@
#include <linux/init.h> /* For __init/__exit/... */
#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
-/* make sure we only register one /dev/watchdog device */
-static unsigned long watchdog_dev_busy;
+#include "watchdog_core.h"
+
+/* the dev_t structure to store the dynamically allocated watchdog devices */
+static dev_t watchdog_devt;
/* the watchdog device behind /dev/watchdog */
-static struct watchdog_device *wdd;
+static struct watchdog_device *old_wdd;
/*
* watchdog_ping: ping the watchdog.
@@ -59,13 +61,26 @@ static struct watchdog_device *wdd;
static int watchdog_ping(struct watchdog_device *wddev)
{
- if (test_bit(WDOG_ACTIVE, &wddev->status)) {
- if (wddev->ops->ping)
- return wddev->ops->ping(wddev); /* ping the watchdog */
- else
- return wddev->ops->start(wddev); /* restart watchdog */
+ int err = 0;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_ping;
}
- return 0;
+
+ if (!watchdog_active(wddev))
+ goto out_ping;
+
+ if (wddev->ops->ping)
+ err = wddev->ops->ping(wddev); /* ping the watchdog */
+ else
+ err = wddev->ops->start(wddev); /* restart watchdog */
+
+out_ping:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -79,16 +94,25 @@ static int watchdog_ping(struct watchdog_device *wddev)
static int watchdog_start(struct watchdog_device *wddev)
{
- int err;
+ int err = 0;
- if (!test_bit(WDOG_ACTIVE, &wddev->status)) {
- err = wddev->ops->start(wddev);
- if (err < 0)
- return err;
+ mutex_lock(&wddev->lock);
- set_bit(WDOG_ACTIVE, &wddev->status);
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_start;
}
- return 0;
+
+ if (watchdog_active(wddev))
+ goto out_start;
+
+ err = wddev->ops->start(wddev);
+ if (err == 0)
+ set_bit(WDOG_ACTIVE, &wddev->status);
+
+out_start:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -103,22 +127,155 @@ static int watchdog_start(struct watchdog_device *wddev)
static int watchdog_stop(struct watchdog_device *wddev)
{
- int err = -EBUSY;
+ int err = 0;
- if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
- pr_info("%s: nowayout prevents watchdog to be stopped!\n",
- wddev->info->identity);
- return err;
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_stop;
}
- if (test_bit(WDOG_ACTIVE, &wddev->status)) {
- err = wddev->ops->stop(wddev);
- if (err < 0)
- return err;
+ if (!watchdog_active(wddev))
+ goto out_stop;
+ if (test_bit(WDOG_NO_WAY_OUT, &wddev->status)) {
+ dev_info(wddev->dev, "nowayout prevents watchdog being stopped!\n");
+ err = -EBUSY;
+ goto out_stop;
+ }
+
+ err = wddev->ops->stop(wddev);
+ if (err == 0)
clear_bit(WDOG_ACTIVE, &wddev->status);
+
+out_stop:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_get_status: wrapper to get the watchdog status
+ * @wddev: the watchdog device to get the status from
+ * @status: the status of the watchdog device
+ *
+ * Get the watchdog's status flags.
+ */
+
+static int watchdog_get_status(struct watchdog_device *wddev,
+ unsigned int *status)
+{
+ int err = 0;
+
+ *status = 0;
+ if (!wddev->ops->status)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_status;
}
- return 0;
+
+ *status = wddev->ops->status(wddev);
+
+out_status:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_set_timeout: set the watchdog timer timeout
+ * @wddev: the watchdog device to set the timeout for
+ * @timeout: timeout to set in seconds
+ */
+
+static int watchdog_set_timeout(struct watchdog_device *wddev,
+ unsigned int timeout)
+{
+ int err;
+
+ if ((wddev->ops->set_timeout == NULL) ||
+ !(wddev->info->options & WDIOF_SETTIMEOUT))
+ return -EOPNOTSUPP;
+
+ if ((wddev->max_timeout != 0) &&
+ (timeout < wddev->min_timeout || timeout > wddev->max_timeout))
+ return -EINVAL;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_timeout;
+ }
+
+ err = wddev->ops->set_timeout(wddev, timeout);
+
+out_timeout:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_get_timeleft: wrapper to get the time left before a reboot
+ * @wddev: the watchdog device to get the remaining time from
+ * @timeleft: the time that's left
+ *
+ * Get the time before a watchdog will reboot (if not pinged).
+ */
+
+static int watchdog_get_timeleft(struct watchdog_device *wddev,
+ unsigned int *timeleft)
+{
+ int err = 0;
+
+ *timeleft = 0;
+ if (!wddev->ops->get_timeleft)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_timeleft;
+ }
+
+ *timeleft = wddev->ops->get_timeleft(wddev);
+
+out_timeleft:
+ mutex_unlock(&wddev->lock);
+ return err;
+}
+
+/*
+ * watchdog_ioctl_op: call the watchdog drivers ioctl op if defined
+ * @wddev: the watchdog device to do the ioctl on
+ * @cmd: watchdog command
+ * @arg: argument pointer
+ */
+
+static int watchdog_ioctl_op(struct watchdog_device *wddev, unsigned int cmd,
+ unsigned long arg)
+{
+ int err;
+
+ if (!wddev->ops->ioctl)
+ return -ENOIOCTLCMD;
+
+ mutex_lock(&wddev->lock);
+
+ if (test_bit(WDOG_UNREGISTERED, &wddev->status)) {
+ err = -ENODEV;
+ goto out_ioctl;
+ }
+
+ err = wddev->ops->ioctl(wddev, cmd, arg);
+
+out_ioctl:
+ mutex_unlock(&wddev->lock);
+ return err;
}
/*
@@ -136,6 +293,7 @@ static int watchdog_stop(struct watchdog_device *wddev)
static ssize_t watchdog_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
+ struct watchdog_device *wdd = file->private_data;
size_t i;
char c;
@@ -175,23 +333,24 @@ static ssize_t watchdog_write(struct file *file, const char __user *data,
static long watchdog_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
+ struct watchdog_device *wdd = file->private_data;
void __user *argp = (void __user *)arg;
int __user *p = argp;
unsigned int val;
int err;
- if (wdd->ops->ioctl) {
- err = wdd->ops->ioctl(wdd, cmd, arg);
- if (err != -ENOIOCTLCMD)
- return err;
- }
+ err = watchdog_ioctl_op(wdd, cmd, arg);
+ if (err != -ENOIOCTLCMD)
+ return err;
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, wdd->info,
sizeof(struct watchdog_info)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
- val = wdd->ops->status ? wdd->ops->status(wdd) : 0;
+ err = watchdog_get_status(wdd, &val);
+ if (err == -ENODEV)
+ return err;
return put_user(val, p);
case WDIOC_GETBOOTSTATUS:
return put_user(wdd->bootstatus, p);
@@ -215,15 +374,9 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
watchdog_ping(wdd);
return 0;
case WDIOC_SETTIMEOUT:
- if ((wdd->ops->set_timeout == NULL) ||
- !(wdd->info->options & WDIOF_SETTIMEOUT))
- return -EOPNOTSUPP;
if (get_user(val, p))
return -EFAULT;
- if ((wdd->max_timeout != 0) &&
- (val < wdd->min_timeout || val > wdd->max_timeout))
- return -EINVAL;
- err = wdd->ops->set_timeout(wdd, val);
+ err = watchdog_set_timeout(wdd, val);
if (err < 0)
return err;
/* If the watchdog is active then we send a keepalive ping
@@ -237,21 +390,21 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
return -EOPNOTSUPP;
return put_user(wdd->timeout, p);
case WDIOC_GETTIMELEFT:
- if (!wdd->ops->get_timeleft)
- return -EOPNOTSUPP;
-
- return put_user(wdd->ops->get_timeleft(wdd), p);
+ err = watchdog_get_timeleft(wdd, &val);
+ if (err)
+ return err;
+ return put_user(val, p);
default:
return -ENOTTY;
}
}
/*
- * watchdog_open: open the /dev/watchdog device.
+ * watchdog_open: open the /dev/watchdog* devices.
* @inode: inode of device
* @file: file handle to device
*
- * When the /dev/watchdog device gets opened, we start the watchdog.
+ * When the /dev/watchdog* device gets opened, we start the watchdog.
* Watch out: the /dev/watchdog device is single open, so we make sure
* it can only be opened once.
*/
@@ -259,6 +412,13 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd,
static int watchdog_open(struct inode *inode, struct file *file)
{
int err = -EBUSY;
+ struct watchdog_device *wdd;
+
+ /* Get the corresponding watchdog device */
+ if (imajor(inode) == MISC_MAJOR)
+ wdd = old_wdd;
+ else
+ wdd = container_of(inode->i_cdev, struct watchdog_device, cdev);
/* the watchdog is single open! */
if (test_and_set_bit(WDOG_DEV_OPEN, &wdd->status))
@@ -275,6 +435,11 @@ static int watchdog_open(struct inode *inode, struct file *file)
if (err < 0)
goto out_mod;
+ file->private_data = wdd;
+
+ if (wdd->ops->ref)
+ wdd->ops->ref(wdd);
+
/* dev/watchdog is a virtual (and thus non-seekable) filesystem */
return nonseekable_open(inode, file);
@@ -286,9 +451,9 @@ out:
}
/*
- * watchdog_release: release the /dev/watchdog device.
- * @inode: inode of device
- * @file: file handle to device
+ * watchdog_release: release the watchdog device.
+ * @inode: inode of device
+ * @file: file handle to device
*
* This is the code for when /dev/watchdog gets closed. We will only
* stop the watchdog when we have received the magic char (and nowayout
@@ -297,6 +462,7 @@ out:
static int watchdog_release(struct inode *inode, struct file *file)
{
+ struct watchdog_device *wdd = file->private_data;
int err = -EBUSY;
/*
@@ -310,7 +476,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
/* If the watchdog was not stopped, send a keepalive ping */
if (err < 0) {
- pr_crit("%s: watchdog did not stop!\n", wdd->info->identity);
+ mutex_lock(&wdd->lock);
+ if (!test_bit(WDOG_UNREGISTERED, &wdd->status))
+ dev_crit(wdd->dev, "watchdog did not stop!\n");
+ mutex_unlock(&wdd->lock);
watchdog_ping(wdd);
}
@@ -320,6 +489,10 @@ static int watchdog_release(struct inode *inode, struct file *file)
/* make sure that /dev/watchdog can be re-opened */
clear_bit(WDOG_DEV_OPEN, &wdd->status);
+ /* Note wdd may be gone after this, do not use after this! */
+ if (wdd->ops->unref)
+ wdd->ops->unref(wdd);
+
return 0;
}
@@ -338,62 +511,92 @@ static struct miscdevice watchdog_miscdev = {
};
/*
- * watchdog_dev_register:
+ * watchdog_dev_register: register a watchdog device
* @watchdog: watchdog device
*
- * Register a watchdog device as /dev/watchdog. /dev/watchdog
- * is actually a miscdevice and thus we set it up like that.
+ * Register a watchdog device including handling the legacy
+ * /dev/watchdog node. /dev/watchdog is actually a miscdevice and
+ * thus we set it up like that.
*/
int watchdog_dev_register(struct watchdog_device *watchdog)
{
- int err;
-
- /* Only one device can register for /dev/watchdog */
- if (test_and_set_bit(0, &watchdog_dev_busy)) {
- pr_err("only one watchdog can use /dev/watchdog\n");
- return -EBUSY;
+ int err, devno;
+
+ if (watchdog->id == 0) {
+ watchdog_miscdev.parent = watchdog->parent;
+ err = misc_register(&watchdog_miscdev);
+ if (err != 0) {
+ pr_err("%s: cannot register miscdev on minor=%d (err=%d).\n",
+ watchdog->info->identity, WATCHDOG_MINOR, err);
+ if (err == -EBUSY)
+ pr_err("%s: a legacy watchdog module is probably present.\n",
+ watchdog->info->identity);
+ return err;
+ }
+ old_wdd = watchdog;
}
- wdd = watchdog;
-
- err = misc_register(&watchdog_miscdev);
- if (err != 0) {
- pr_err("%s: cannot register miscdev on minor=%d (err=%d)\n",
- watchdog->info->identity, WATCHDOG_MINOR, err);
- goto out;
+ /* Fill in the data structures */
+ devno = MKDEV(MAJOR(watchdog_devt), watchdog->id);
+ cdev_init(&watchdog->cdev, &watchdog_fops);
+ watchdog->cdev.owner = watchdog->ops->owner;
+
+ /* Add the device */
+ err = cdev_add(&watchdog->cdev, devno, 1);
+ if (err) {
+ pr_err("watchdog%d unable to add device %d:%d\n",
+ watchdog->id, MAJOR(watchdog_devt), watchdog->id);
+ if (watchdog->id == 0) {
+ misc_deregister(&watchdog_miscdev);
+ old_wdd = NULL;
+ }
}
-
- return 0;
-
-out:
- wdd = NULL;
- clear_bit(0, &watchdog_dev_busy);
return err;
}
/*
- * watchdog_dev_unregister:
+ * watchdog_dev_unregister: unregister a watchdog device
* @watchdog: watchdog device
*
- * Deregister the /dev/watchdog device.
+ * Unregister the watchdog and if needed the legacy /dev/watchdog device.
*/
int watchdog_dev_unregister(struct watchdog_device *watchdog)
{
- /* Check that a watchdog device was registered in the past */
- if (!test_bit(0, &watchdog_dev_busy) || !wdd)
- return -ENODEV;
-
- /* We can only unregister the watchdog device that was registered */
- if (watchdog != wdd) {
- pr_err("%s: watchdog was not registered as /dev/watchdog\n",
- watchdog->info->identity);
- return -ENODEV;
+ mutex_lock(&watchdog->lock);
+ set_bit(WDOG_UNREGISTERED, &watchdog->status);
+ mutex_unlock(&watchdog->lock);
+
+ cdev_del(&watchdog->cdev);
+ if (watchdog->id == 0) {
+ misc_deregister(&watchdog_miscdev);
+ old_wdd = NULL;
}
-
- misc_deregister(&watchdog_miscdev);
- wdd = NULL;
- clear_bit(0, &watchdog_dev_busy);
return 0;
}
+
+/*
+ * watchdog_dev_init: init dev part of watchdog core
+ *
+ * Allocate a range of chardev nodes to use for watchdog devices
+ */
+
+int __init watchdog_dev_init(void)
+{
+ int err = alloc_chrdev_region(&watchdog_devt, 0, MAX_DOGS, "watchdog");
+ if (err < 0)
+ pr_err("watchdog: unable to allocate char dev region\n");
+ return err;
+}
+
+/*
+ * watchdog_dev_exit: exit dev part of watchdog core
+ *
+ * Release the range of chardev nodes used for watchdog devices
+ */
+
+void __exit watchdog_dev_exit(void)
+{
+ unregister_chrdev_region(watchdog_devt, MAX_DOGS);
+}
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8d2501e..d4dffcd 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -196,4 +196,12 @@ config XEN_ACPI_PROCESSOR
called xen_acpi_processor If you do not know what to choose, select
M here. If the CPUFREQ drivers are built in, select Y here.
+config XEN_MCE_LOG
+ bool "Xen platform mcelog"
+ depends on XEN_DOM0 && X86_64 && X86_MCE
+ default n
+ help
+ Allow kernel fetching MCE error from Xen platform and
+ converting it into Linux mcelog format for mcelog tools
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index fc34886..d80bea5 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -17,7 +17,9 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
+obj-$(CONFIG_XEN_DOM0) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o
+obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6908e4c..7595581 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn)
handle_edge_irq, "event");
xen_irq_info_evtchn_init(irq, evtchn);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
}
out:
@@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_IPI);
}
out:
@@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
xen_irq_info_virq_init(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu);
+ } else {
+ struct irq_info *info = info_for_irq(irq);
+ WARN_ON(info == NULL || info->type != IRQT_VIRQ);
}
out:
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
new file mode 100644
index 0000000..8feee08
--- /dev/null
+++ b/drivers/xen/mcelog.c
@@ -0,0 +1,414 @@
+/******************************************************************************
+ * mcelog.c
+ * Driver for receiving and transferring machine check error infomation
+ *
+ * Copyright (c) 2012 Intel Corporation
+ * Author: Liu, Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang, Yunhong <yunhong.jiang@intel.com>
+ * Author: Ke, Liping <liping.ke@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/capability.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+
+#include <xen/interface/xen.h>
+#include <xen/events.h>
+#include <xen/interface/vcpu.h>
+#include <xen/xen.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#define XEN_MCELOG "xen_mcelog: "
+
+static struct mc_info g_mi;
+static struct mcinfo_logical_cpu *g_physinfo;
+static uint32_t ncpus;
+
+static DEFINE_MUTEX(mcelog_lock);
+
+static struct xen_mce_log xen_mcelog = {
+ .signature = XEN_MCE_LOG_SIGNATURE,
+ .len = XEN_MCE_LOG_LEN,
+ .recordlen = sizeof(struct xen_mce),
+};
+
+static DEFINE_SPINLOCK(xen_mce_chrdev_state_lock);
+static int xen_mce_chrdev_open_count; /* #times opened */
+static int xen_mce_chrdev_open_exclu; /* already open exclusive? */
+
+static DECLARE_WAIT_QUEUE_HEAD(xen_mce_chrdev_wait);
+
+static int xen_mce_chrdev_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&xen_mce_chrdev_state_lock);
+
+ if (xen_mce_chrdev_open_exclu ||
+ (xen_mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
+ spin_unlock(&xen_mce_chrdev_state_lock);
+
+ return -EBUSY;
+ }
+
+ if (file->f_flags & O_EXCL)
+ xen_mce_chrdev_open_exclu = 1;
+ xen_mce_chrdev_open_count++;
+
+ spin_unlock(&xen_mce_chrdev_state_lock);
+
+ return nonseekable_open(inode, file);
+}
+
+static int xen_mce_chrdev_release(struct inode *inode, struct file *file)
+{
+ spin_lock(&xen_mce_chrdev_state_lock);
+
+ xen_mce_chrdev_open_count--;
+ xen_mce_chrdev_open_exclu = 0;
+
+ spin_unlock(&xen_mce_chrdev_state_lock);
+
+ return 0;
+}
+
+static ssize_t xen_mce_chrdev_read(struct file *filp, char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ char __user *buf = ubuf;
+ unsigned num;
+ int i, err;
+
+ mutex_lock(&mcelog_lock);
+
+ num = xen_mcelog.next;
+
+ /* Only supports full reads right now */
+ err = -EINVAL;
+ if (*off != 0 || usize < XEN_MCE_LOG_LEN*sizeof(struct xen_mce))
+ goto out;
+
+ err = 0;
+ for (i = 0; i < num; i++) {
+ struct xen_mce *m = &xen_mcelog.entry[i];
+
+ err |= copy_to_user(buf, m, sizeof(*m));
+ buf += sizeof(*m);
+ }
+
+ memset(xen_mcelog.entry, 0, num * sizeof(struct xen_mce));
+ xen_mcelog.next = 0;
+
+ if (err)
+ err = -EFAULT;
+
+out:
+ mutex_unlock(&mcelog_lock);
+
+ return err ? err : buf - ubuf;
+}
+
+static unsigned int xen_mce_chrdev_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &xen_mce_chrdev_wait, wait);
+
+ if (xen_mcelog.next)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static long xen_mce_chrdev_ioctl(struct file *f, unsigned int cmd,
+ unsigned long arg)
+{
+ int __user *p = (int __user *)arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case MCE_GET_RECORD_LEN:
+ return put_user(sizeof(struct xen_mce), p);
+ case MCE_GET_LOG_LEN:
+ return put_user(XEN_MCE_LOG_LEN, p);
+ case MCE_GETCLEAR_FLAGS: {
+ unsigned flags;
+
+ do {
+ flags = xen_mcelog.flags;
+ } while (cmpxchg(&xen_mcelog.flags, flags, 0) != flags);
+
+ return put_user(flags, p);
+ }
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations xen_mce_chrdev_ops = {
+ .open = xen_mce_chrdev_open,
+ .release = xen_mce_chrdev_release,
+ .read = xen_mce_chrdev_read,
+ .poll = xen_mce_chrdev_poll,
+ .unlocked_ioctl = xen_mce_chrdev_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice xen_mce_chrdev_device = {
+ MISC_MCELOG_MINOR,
+ "mcelog",
+ &xen_mce_chrdev_ops,
+};
+
+/*
+ * Caller should hold the mcelog_lock
+ */
+static void xen_mce_log(struct xen_mce *mce)
+{
+ unsigned entry;
+
+ entry = xen_mcelog.next;
+
+ /*
+ * When the buffer fills up discard new entries.
+ * Assume that the earlier errors are the more
+ * interesting ones:
+ */
+ if (entry >= XEN_MCE_LOG_LEN) {
+ set_bit(XEN_MCE_OVERFLOW,
+ (unsigned long *)&xen_mcelog.flags);
+ return;
+ }
+
+ memcpy(xen_mcelog.entry + entry, mce, sizeof(struct xen_mce));
+
+ xen_mcelog.next++;
+}
+
+static int convert_log(struct mc_info *mi)
+{
+ struct mcinfo_common *mic;
+ struct mcinfo_global *mc_global;
+ struct mcinfo_bank *mc_bank;
+ struct xen_mce m;
+ uint32_t i;
+
+ mic = NULL;
+ x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL);
+ if (unlikely(!mic)) {
+ pr_warning(XEN_MCELOG "Failed to find global error info\n");
+ return -ENODEV;
+ }
+
+ memset(&m, 0, sizeof(struct xen_mce));
+
+ mc_global = (struct mcinfo_global *)mic;
+ m.mcgstatus = mc_global->mc_gstatus;
+ m.apicid = mc_global->mc_apicid;
+
+ for (i = 0; i < ncpus; i++)
+ if (g_physinfo[i].mc_apicid == m.apicid)
+ break;
+ if (unlikely(i == ncpus)) {
+ pr_warning(XEN_MCELOG "Failed to match cpu with apicid %d\n",
+ m.apicid);
+ return -ENODEV;
+ }
+
+ m.socketid = g_physinfo[i].mc_chipid;
+ m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
+ m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
+ m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value;
+
+ mic = NULL;
+ x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK);
+ if (unlikely(!mic)) {
+ pr_warning(XEN_MCELOG "Fail to find bank error info\n");
+ return -ENODEV;
+ }
+
+ do {
+ if ((!mic) || (mic->size == 0) ||
+ (mic->type != MC_TYPE_GLOBAL &&
+ mic->type != MC_TYPE_BANK &&
+ mic->type != MC_TYPE_EXTENDED &&
+ mic->type != MC_TYPE_RECOVERY))
+ break;
+
+ if (mic->type == MC_TYPE_BANK) {
+ mc_bank = (struct mcinfo_bank *)mic;
+ m.misc = mc_bank->mc_misc;
+ m.status = mc_bank->mc_status;
+ m.addr = mc_bank->mc_addr;
+ m.tsc = mc_bank->mc_tsc;
+ m.bank = mc_bank->mc_bank;
+ m.finished = 1;
+ /*log this record*/
+ xen_mce_log(&m);
+ }
+ mic = x86_mcinfo_next(mic);
+ } while (1);
+
+ return 0;
+}
+
+static int mc_queue_handle(uint32_t flags)
+{
+ struct xen_mc mc_op;
+ int ret = 0;
+
+ mc_op.cmd = XEN_MC_fetch;
+ mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
+ set_xen_guest_handle(mc_op.u.mc_fetch.data, &g_mi);
+ do {
+ mc_op.u.mc_fetch.flags = flags;
+ ret = HYPERVISOR_mca(&mc_op);
+ if (ret) {
+ pr_err(XEN_MCELOG "Failed to fetch %s error log\n",
+ (flags == XEN_MC_URGENT) ?
+ "urgnet" : "nonurgent");
+ break;
+ }
+
+ if (mc_op.u.mc_fetch.flags & XEN_MC_NODATA ||
+ mc_op.u.mc_fetch.flags & XEN_MC_FETCHFAILED)
+ break;
+ else {
+ ret = convert_log(&g_mi);
+ if (ret)
+ pr_warning(XEN_MCELOG
+ "Failed to convert this error log, "
+ "continue acking it anyway\n");
+
+ mc_op.u.mc_fetch.flags = flags | XEN_MC_ACK;
+ ret = HYPERVISOR_mca(&mc_op);
+ if (ret) {
+ pr_err(XEN_MCELOG
+ "Failed to ack previous error log\n");
+ break;
+ }
+ }
+ } while (1);
+
+ return ret;
+}
+
+/* virq handler for machine check error info*/
+static void xen_mce_work_fn(struct work_struct *work)
+{
+ int err;
+
+ mutex_lock(&mcelog_lock);
+
+ /* urgent mc_info */
+ err = mc_queue_handle(XEN_MC_URGENT);
+ if (err)
+ pr_err(XEN_MCELOG
+ "Failed to handle urgent mc_info queue, "
+ "continue handling nonurgent mc_info queue anyway.\n");
+
+ /* nonurgent mc_info */
+ err = mc_queue_handle(XEN_MC_NONURGENT);
+ if (err)
+ pr_err(XEN_MCELOG
+ "Failed to handle nonurgent mc_info queue.\n");
+
+ /* wake processes polling /dev/mcelog */
+ wake_up_interruptible(&xen_mce_chrdev_wait);
+
+ mutex_unlock(&mcelog_lock);
+}
+static DECLARE_WORK(xen_mce_work, xen_mce_work_fn);
+
+static irqreturn_t xen_mce_interrupt(int irq, void *dev_id)
+{
+ schedule_work(&xen_mce_work);
+ return IRQ_HANDLED;
+}
+
+static int bind_virq_for_mce(void)
+{
+ int ret;
+ struct xen_mc mc_op;
+
+ memset(&mc_op, 0, sizeof(struct xen_mc));
+
+ /* Fetch physical CPU Numbers */
+ mc_op.cmd = XEN_MC_physcpuinfo;
+ mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
+ set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
+ ret = HYPERVISOR_mca(&mc_op);
+ if (ret) {
+ pr_err(XEN_MCELOG "Failed to get CPU numbers\n");
+ return ret;
+ }
+
+ /* Fetch each CPU Physical Info for later reference*/
+ ncpus = mc_op.u.mc_physcpuinfo.ncpus;
+ g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu),
+ GFP_KERNEL);
+ if (!g_physinfo)
+ return -ENOMEM;
+ set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
+ ret = HYPERVISOR_mca(&mc_op);
+ if (ret) {
+ pr_err(XEN_MCELOG "Failed to get CPU info\n");
+ kfree(g_physinfo);
+ return ret;
+ }
+
+ ret = bind_virq_to_irqhandler(VIRQ_MCA, 0,
+ xen_mce_interrupt, 0, "mce", NULL);
+ if (ret < 0) {
+ pr_err(XEN_MCELOG "Failed to bind virq\n");
+ kfree(g_physinfo);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init xen_late_init_mcelog(void)
+{
+ /* Only DOM0 is responsible for MCE logging */
+ if (xen_initial_domain()) {
+ /* register character device /dev/mcelog for xen mcelog */
+ if (misc_register(&xen_mce_chrdev_device))
+ return -ENODEV;
+ return bind_virq_for_mce();
+ }
+
+ return -ENODEV;
+}
+device_initcall(xen_late_init_mcelog);
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index b84bf0b..18fff88 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev)
#ifdef CONFIG_ACPI
handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
- if (!handle)
+ if (!handle && pci_dev->bus->bridge)
handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
#ifdef CONFIG_PCI_IOV
if (!handle && pci_dev->is_virtfn)
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
new file mode 100644
index 0000000..067fcfa
--- /dev/null
+++ b/drivers/xen/pcpu.c
@@ -0,0 +1,371 @@
+/******************************************************************************
+ * pcpu.c
+ * Management physical cpu in dom0, get pcpu info and provide sys interface
+ *
+ * Copyright (c) 2012 Intel Corporation
+ * Author: Liu, Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang, Yunhong <yunhong.jiang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/stat.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#define XEN_PCPU "xen_cpu: "
+
+/*
+ * @cpu_id: Xen physical cpu logic number
+ * @flags: Xen physical cpu status flag
+ * - XEN_PCPU_FLAGS_ONLINE: cpu is online
+ * - XEN_PCPU_FLAGS_INVALID: cpu is not present
+ */
+struct pcpu {
+ struct list_head list;
+ struct device dev;
+ uint32_t cpu_id;
+ uint32_t flags;
+};
+
+static struct bus_type xen_pcpu_subsys = {
+ .name = "xen_cpu",
+ .dev_name = "xen_cpu",
+};
+
+static DEFINE_MUTEX(xen_pcpu_lock);
+
+static LIST_HEAD(xen_pcpus);
+
+static int xen_pcpu_down(uint32_t cpu_id)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_cpu_offline,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.cpu_ol.cpuid = cpu_id,
+ };
+
+ return HYPERVISOR_dom0_op(&op);
+}
+
+static int xen_pcpu_up(uint32_t cpu_id)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_cpu_online,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.cpu_ol.cpuid = cpu_id,
+ };
+
+ return HYPERVISOR_dom0_op(&op);
+}
+
+static ssize_t show_online(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pcpu *cpu = container_of(dev, struct pcpu, dev);
+
+ return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
+}
+
+static ssize_t __ref store_online(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
+ unsigned long long val;
+ ssize_t ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (kstrtoull(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ switch (val) {
+ case 0:
+ ret = xen_pcpu_down(pcpu->cpu_id);
+ break;
+ case 1:
+ ret = xen_pcpu_up(pcpu->cpu_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret >= 0)
+ ret = count;
+ return ret;
+}
+static DEVICE_ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online);
+
+static bool xen_pcpu_online(uint32_t flags)
+{
+ return !!(flags & XEN_PCPU_FLAGS_ONLINE);
+}
+
+static void pcpu_online_status(struct xenpf_pcpuinfo *info,
+ struct pcpu *pcpu)
+{
+ if (xen_pcpu_online(info->flags) &&
+ !xen_pcpu_online(pcpu->flags)) {
+ /* the pcpu is onlined */
+ pcpu->flags |= XEN_PCPU_FLAGS_ONLINE;
+ kobject_uevent(&pcpu->dev.kobj, KOBJ_ONLINE);
+ } else if (!xen_pcpu_online(info->flags) &&
+ xen_pcpu_online(pcpu->flags)) {
+ /* The pcpu is offlined */
+ pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE;
+ kobject_uevent(&pcpu->dev.kobj, KOBJ_OFFLINE);
+ }
+}
+
+static struct pcpu *get_pcpu(uint32_t cpu_id)
+{
+ struct pcpu *pcpu;
+
+ list_for_each_entry(pcpu, &xen_pcpus, list) {
+ if (pcpu->cpu_id == cpu_id)
+ return pcpu;
+ }
+
+ return NULL;
+}
+
+static void pcpu_release(struct device *dev)
+{
+ struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
+
+ list_del(&pcpu->list);
+ kfree(pcpu);
+}
+
+static void unregister_and_remove_pcpu(struct pcpu *pcpu)
+{
+ struct device *dev;
+
+ if (!pcpu)
+ return;
+
+ dev = &pcpu->dev;
+ if (dev->id)
+ device_remove_file(dev, &dev_attr_online);
+
+ /* pcpu remove would be implicitly done */
+ device_unregister(dev);
+}
+
+static int register_pcpu(struct pcpu *pcpu)
+{
+ struct device *dev;
+ int err = -EINVAL;
+
+ if (!pcpu)
+ return err;
+
+ dev = &pcpu->dev;
+ dev->bus = &xen_pcpu_subsys;
+ dev->id = pcpu->cpu_id;
+ dev->release = pcpu_release;
+
+ err = device_register(dev);
+ if (err) {
+ pcpu_release(dev);
+ return err;
+ }
+
+ /*
+ * Xen never offline cpu0 due to several restrictions
+ * and assumptions. This basically doesn't add a sys control
+ * to user, one cannot attempt to offline BSP.
+ */
+ if (dev->id) {
+ err = device_create_file(dev, &dev_attr_online);
+ if (err) {
+ device_unregister(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)
+{
+ struct pcpu *pcpu;
+ int err;
+
+ if (info->flags & XEN_PCPU_FLAGS_INVALID)
+ return ERR_PTR(-ENODEV);
+
+ pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL);
+ if (!pcpu)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&pcpu->list);
+ pcpu->cpu_id = info->xen_cpuid;
+ pcpu->flags = info->flags;
+
+ /* Need hold on xen_pcpu_lock before pcpu list manipulations */
+ list_add_tail(&pcpu->list, &xen_pcpus);
+
+ err = register_pcpu(pcpu);
+ if (err) {
+ pr_warning(XEN_PCPU "Failed to register pcpu%u\n",
+ info->xen_cpuid);
+ return ERR_PTR(-ENOENT);
+ }
+
+ return pcpu;
+}
+
+/*
+ * Caller should hold the xen_pcpu_lock
+ */
+static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
+{
+ int ret;
+ struct pcpu *pcpu = NULL;
+ struct xenpf_pcpuinfo *info;
+ struct xen_platform_op op = {
+ .cmd = XENPF_get_cpuinfo,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.pcpu_info.xen_cpuid = cpu,
+ };
+
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ return ret;
+
+ info = &op.u.pcpu_info;
+ if (max_cpu)
+ *max_cpu = info->max_present;
+
+ pcpu = get_pcpu(cpu);
+
+ /*
+ * Only those at cpu present map has its sys interface.
+ */
+ if (info->flags & XEN_PCPU_FLAGS_INVALID) {
+ if (pcpu)
+ unregister_and_remove_pcpu(pcpu);
+ return 0;
+ }
+
+ if (!pcpu) {
+ pcpu = create_and_register_pcpu(info);
+ if (IS_ERR_OR_NULL(pcpu))
+ return -ENODEV;
+ } else
+ pcpu_online_status(info, pcpu);
+
+ return 0;
+}
+
+/*
+ * Sync dom0's pcpu information with xen hypervisor's
+ */
+static int xen_sync_pcpus(void)
+{
+ /*
+ * Boot cpu always have cpu_id 0 in xen
+ */
+ uint32_t cpu = 0, max_cpu = 0;
+ int err = 0;
+ struct pcpu *pcpu, *tmp;
+
+ mutex_lock(&xen_pcpu_lock);
+
+ while (!err && (cpu <= max_cpu)) {
+ err = sync_pcpu(cpu, &max_cpu);
+ cpu++;
+ }
+
+ if (err)
+ list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, list)
+ unregister_and_remove_pcpu(pcpu);
+
+ mutex_unlock(&xen_pcpu_lock);
+
+ return err;
+}
+
+static void xen_pcpu_work_fn(struct work_struct *work)
+{
+ xen_sync_pcpus();
+}
+static DECLARE_WORK(xen_pcpu_work, xen_pcpu_work_fn);
+
+static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
+{
+ schedule_work(&xen_pcpu_work);
+ return IRQ_HANDLED;
+}
+
+static int __init xen_pcpu_init(void)
+{
+ int irq, ret;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ irq = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0,
+ xen_pcpu_interrupt, 0,
+ "xen-pcpu", NULL);
+ if (irq < 0) {
+ pr_warning(XEN_PCPU "Failed to bind pcpu virq\n");
+ return irq;
+ }
+
+ ret = subsys_system_register(&xen_pcpu_subsys, NULL);
+ if (ret) {
+ pr_warning(XEN_PCPU "Failed to register pcpu subsys\n");
+ goto err1;
+ }
+
+ ret = xen_sync_pcpus();
+ if (ret) {
+ pr_warning(XEN_PCPU "Failed to sync pcpu info\n");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ bus_unregister(&xen_pcpu_subsys);
+err1:
+ unbind_from_irqhandler(irq, NULL);
+ return ret;
+}
+arch_initcall(xen_pcpu_init);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 2389e58..d4c50d6 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -101,6 +101,19 @@ static int platform_pci_resume(struct pci_dev *pdev)
return 0;
}
+static void __devinit prepare_shared_info(void)
+{
+#ifdef CONFIG_KEXEC
+ unsigned long addr;
+ struct shared_info *hvm_shared_info;
+
+ addr = alloc_xen_mmio(PAGE_SIZE);
+ hvm_shared_info = ioremap(addr, PAGE_SIZE);
+ memset(hvm_shared_info, 0, PAGE_SIZE);
+ xen_hvm_prepare_kexec(hvm_shared_info, addr >> PAGE_SHIFT);
+#endif
+}
+
static int __devinit platform_pci_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -109,6 +122,9 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
long mmio_addr, mmio_len;
unsigned int max_nr_gframes;
+ if (!xen_domain())
+ return -ENODEV;
+
i = pci_enable_device(pdev);
if (i)
return i;
@@ -135,6 +151,8 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
+ prepare_shared_info();
+
if (!xen_have_vector_callback) {
ret = xen_allocate_irq(pdev);
if (ret) {
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index dcb79521..89f264c 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
}
/* returns 0 if the page was successfully put into frontswap, -1 if not */
-static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_store(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
* returns 0 if the page was successfully gotten from frontswap, -1 if
* was not present (should never happen!)
*/
-static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+static int tmem_frontswap_load(unsigned type, pgoff_t offset,
struct page *page)
{
u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s)
__setup("nofrontswap", no_frontswap);
static struct frontswap_ops __initdata tmem_frontswap_ops = {
- .put_page = tmem_frontswap_put_page,
- .get_page = tmem_frontswap_get_page,
+ .store = tmem_frontswap_store,
+ .load = tmem_frontswap_load,
.invalidate_page = tmem_frontswap_flush_page,
.invalidate_area = tmem_frontswap_flush_area,
.init = tmem_frontswap_init
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 0b48579..b590ee0 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -29,6 +29,7 @@
#include <acpi/acpi_drivers.h>
#include <acpi/processor.h>
+#include <xen/xen.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -519,15 +520,18 @@ static int __init xen_acpi_processor_init(void)
if (!pr_backup) {
pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
- memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
+ if (pr_backup)
+ memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
}
(void)upload_pm_data(_pr);
}
rc = check_acpi_ids(pr_backup);
- if (rc)
- goto err_unregister;
kfree(pr_backup);
+ pr_backup = NULL;
+
+ if (rc)
+ goto err_unregister;
return 0;
err_unregister:
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index d1c217b..bce15cf 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -618,6 +618,23 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
+static void xs_reset_watches(void)
+{
+ int err, supported = 0;
+
+ if (!xen_hvm_domain())
+ return;
+
+ err = xenbus_scanf(XBT_NIL, "control",
+ "platform-feature-xs_reset_watches", "%d", &supported);
+ if (err != 1 || !supported)
+ return;
+
+ err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
+ if (err && err != -EEXIST)
+ printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
+}
+
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -900,5 +917,8 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
+ /* shutdown watches for kexec boot */
+ xs_reset_watches();
+
return 0;
}